8 #ifndef DEM_BONES_DEM_BONES
9 #define DEM_BONES_DEM_BONES
11 #include <Eigen/Dense>
12 #include <Eigen/Sparse>
13 #include <Eigen/StdVector>
18 #ifndef DEM_BONES_MAT_BLOCKS
20 #define DEM_BONES_DEM_BONES_MAT_BLOCKS_UNDEFINED
64 template<
class _Scalar,
class _AniMeshScalar>
67 EIGEN_MAKE_ALIGNED_OPERATOR_NEW
69 using MatrixX=Eigen::Matrix<_Scalar, Eigen::Dynamic, Eigen::Dynamic>;
70 using Matrix4=Eigen::Matrix<_Scalar, 4, 4>;
71 using Matrix3=Eigen::Matrix<_Scalar, 3, 3>;
72 using VectorX=Eigen::Matrix<_Scalar, Eigen::Dynamic, 1>;
73 using Vector4=Eigen::Matrix<_Scalar, 4, 1>;
74 using Vector3=Eigen::Matrix<_Scalar, 3, 1>;
75 using SparseMatrix=Eigen::SparseMatrix<_Scalar>;
76 using Triplet=Eigen::Triplet<_Scalar>;
138 Eigen::Matrix<_AniMeshScalar, Eigen::Dynamic, Eigen::Dynamic>
v;
141 std::vector<std::vector<int>>
fv;
164 laplacian.resize(0, 0);
178 if (modelSize<0) modelSize=sqrt((
u-(
u.rowwise().sum()/
nV).replicate(1,
nV)).squaredNorm()/
nV/
nS);
179 if (laplacian.cols()!=
nV) computeSmoothSolver();
181 if (((
int)
w.rows()!=
nB)||((
int)
w.cols()!=
nV)) {
182 if (((
int)
m.rows()!=
nF*4)||((
int)
m.cols()!=
nB*4)) {
186 label=Eigen::VectorXi::Zero(
nV);
187 computeTransFromLabel();
192 split(targetNB,
nnz);
195 computeTransFromLabel();
201 m.conservativeResize(
nF*4,
nB*4);
203 }
else initWeights();
205 if (((
int)
m.rows()!=
nF*4)||((
int)
m.cols()!=
nB*4)) {
206 m=Matrix4::Identity().replicate(
nF,
nB);
232 for (_iterTransformations=0; _iterTransformations<
nTransIters; _iterTransformations++) {
234 #pragma omp parallel for
235 for (
int k=0; k<
nF; k++)
236 for (
int j=0; j<
nB; j++) {
237 Matrix4 qpT=vuT.blk4(k, j);
238 for (
int it=uuT.outerIdx(j); it<uuT.outerIdx(j + 1); it++)
239 if (uuT.innerIdx(it)!=j) qpT-=
m.blk4(k, uuT.innerIdx(it))*uuT.val.blk4(
subjectID(k), it);
270 aTb=MatrixX::Zero(
nB,
nV);
272 std::vector<Triplet, Eigen::aligned_allocator<Triplet>> trip;
273 trip.reserve(
nV*
nnz);
275 for (_iterWeights=0; _iterWeights<
nWeightsIters; _iterWeights++) {
284 #pragma omp parallel for
285 for (
int i=0; i<
nV; i++) {
287 compute_aTa(i, aTai);
288 aTai+=reg*MatrixX::Identity(
nB,
nB);
289 VectorX aTbi=aTb.col(i)+reg*ws.col(i);
292 Eigen::ArrayXi idx=Eigen::ArrayXi::LinSpaced(
nB, 0,
nB-1);
293 std::sort(idx.data(), idx.data()+
nB, [&x](
int i1,
int i2) {
return x(i1)>x(i2); });
294 int nnzi=std::min(
nnz,
nB);
297 x=indexing_vector(
w.col(i).toDense().cwiseMax(0.0), idx.head(nnzi));
299 if (s>_Scalar(0.1)) x/=s;
else x=VectorX::Constant(nnzi, _Scalar(1)/nnzi);
301 wSolver.solve(indexing_row_col(aTai, idx.head(nnzi), idx.head(nnzi)), indexing_vector(aTbi, idx.head(nnzi)), x,
true,
true);
304 for (
int j=0; j<nnzi; j++)
305 if (x(j)!=0) trip.push_back(Triplet(idx[j], i, x(j)));
309 w.setFromTriplets(trip.begin(), trip.end());
332 for (_iter=0; _iter<
nIters; _iter++) {
343 #pragma omp parallel for
344 for (
int i=0; i<
nV; i++) {
347 for (
int k=0; k<
nF; k++) {
349 for (
typename SparseMatrix::InnerIterator it(
w, i); it; ++it) mki+=it.value()*
m.blk4(k, it.row());
350 ei+=(mki.template topLeftCorner<3, 3>()*
u.vec3(
subjectID(k), i)+mki.template topRightCorner<3, 1>()-
v.vec3(k, i).template cast<_Scalar>()).squaredNorm();
355 return std::sqrt(e/
nF/
nV);
389 int _iter, _iterTransformations, _iterWeights;
396 void qpT2m(
const Matrix4& _qpT,
int k,
int j) {
398 Matrix4 qpT=_qpT/_qpT(3, 3);
399 Eigen::JacobiSVD<Matrix3> svd(qpT.template topLeftCorner<3, 3>()-qpT.template topRightCorner<3, 1>()*qpT.template bottomLeftCorner<1, 3>(), Eigen::ComputeFullU|Eigen::ComputeFullV);
400 Matrix3 d=Matrix3::Identity();
401 d(2, 2)=(svd.matrixU()*svd.matrixV().transpose()).determinant();
402 m.rotMat(k, j)=svd.matrixU()*d*svd.matrixV().transpose();
403 m.transVec(k, j)=qpT.template topRightCorner<3, 1>()-
m.rotMat(k, j)*qpT.template bottomLeftCorner<1, 3>().transpose();
411 _Scalar errorVtxBone(
int i,
int j,
bool par=
true) {
413 #pragma omp parallel for if(par)
414 for (
int k=0; k<
nF; k++)
416 e+=(
m.rotMat(k, j)*
u.vec3(
subjectID(k), i)+
m.transVec(k, j)-
v.vec3(k, i).template cast<_Scalar>()).squaredNorm();
421 Eigen::VectorXi label;
425 bool operator() (
const Triplet& t1,
const Triplet& t2) {
426 return t1.value()>t2.value();
432 void computeLabel() {
434 Eigen::VectorXi seed=Eigen::VectorXi::Constant(
nB, -1);
436 #pragma omp parallel for
437 for (
int i=0; i<
nV; i++) {
440 ei(i)=errorVtxBone(i, j,
false);
441 if ((seed(j)==-1)||(ei(i)<gMin(j))) {
443 if ((seed(j)==-1)||(ei(i)<gMin(j))) {
451 std::priority_queue<Triplet, std::vector<Triplet, Eigen::aligned_allocator<Triplet>>, TripletLess> heap;
452 for (
int j=0; j<
nB; j++)
if (seed(j)!=-1) heap.push(Triplet(j, seed(j), ei(seed(j))));
454 if (laplacian.cols()!=
nV) computeSmoothSolver();
456 std::vector<bool> dirty(
nV,
true);
457 while (!heap.empty()) {
458 Triplet top=heap.top();
460 int i=(int)top.col();
461 int j=(int)top.row();
466 for (
typename SparseMatrix::InnerIterator it(laplacian, i); it; ++it) {
467 int i2=(int)it.row();
469 double tmp=(label(i2)==j)?ei(i2):errorVtxBone(i2, j);
470 heap.push(Triplet(j, i2, tmp));
476 #pragma omp parallel for
477 for (
int i=0; i<
nV; i++)
480 for (
int j=0; j<
nB; j++) {
481 _Scalar ej=errorVtxBone(i, j,
false);
482 if ((label(i)==-1)||(gMin>ej)) {
492 void computeTransFromLabel() {
493 m=Matrix4::Identity().replicate(
nF,
nB);
494 #pragma omp parallel for
495 for (
int k=0; k<
nF; k++) {
496 MatrixX qpT=MatrixX::Zero(4, 4*
nB);
497 for (
int i=0; i<
nV; i++)
498 if (label(i)!=-1) qpT.blk4(0, label(i))+=Vector4(
v.vec3(k, i).template cast<_Scalar>().homogeneous())*
u.vec3(
subjectID(k), i).homogeneous().transpose();
499 for (
int j=0; j<
nB; j++) qpT2m(qpT.blk4(0, j), k, j);
505 void labelToWeights() {
506 std::vector<Triplet, Eigen::aligned_allocator<Triplet>> trip(
nV);
507 for (
int i=0; i<
nV; i++) trip[i]=Triplet(label(i), i, _Scalar(1));
509 w.setFromTriplets(trip.begin(), trip.end());
516 void split(
int maxB,
int threshold) {
518 MatrixX cu=MatrixX::Zero(3*
nS,
nB);
519 Eigen::VectorXi s=Eigen::VectorXi::Zero(
nB);
520 for (
int i=0; i<
nV; i++) {
521 cu.col(label(i))+=
u.col(i);
524 for (
int j=0; j<
nB; j++)
if (s(j)!=0) cu.col(j)/=_Scalar(s(j));
527 Eigen::VectorXi seed=Eigen::VectorXi::Constant(
nB, -1);
529 VectorX ce=VectorX::Zero(
nB);
531 #pragma omp parallel for
532 for (
int i=0; i<
nV; i++) {
535 double e=errorVtxBone(i, j,
false);
539 double tmp=e*(
u.col(i)-cu.col(j)).squaredNorm();
541 if ((seed(j)==-1)||(tmp>gMax(j))) {
543 if ((seed(j)==-1)||(tmp>gMax(j))) {
551 _Scalar avgErr=ce.sum()/
nB;
552 for (
int j=0; j<
nB; j++)
553 if ((countID<maxB)&&(s(j)>threshold*2)&&(ce(j)>avgErr/100)) {
554 int newLabel=countID++;
556 for (
typename SparseMatrix::InnerIterator it(laplacian, i); it; ++it) label(it.row())=newLabel;
564 void pruneBones(
int threshold) {
565 Eigen::VectorXi s=Eigen::VectorXi::Zero(
nB);
566 #pragma omp parallel for
567 for (
int i=0; i<
nV; i++) {
572 Eigen::VectorXi newID(
nB);
574 for (
int j=0; j<
nB; j++)
575 if (s(j)<threshold) newID(j)=-1;
else newID(j)=countID++;
577 if (countID==
nB)
return;
579 for (
int j=0; j<
nB; j++)
580 if (newID(j)!=-1)
m.template middleCols<4>(newID(j)*4)=
m.template middleCols<4>(j*4);
582 #pragma omp parallel for
583 for (
int i=0; i<
nV; i++) label(i)=newID(label(i));
586 m.conservativeResize(
nF*4,
nB*4);
593 label=Eigen::VectorXi::Constant(
nV, -1);
594 #pragma omp parallel for
595 for (
int i=0; i<
nV; i++) {
597 for (
int j=0; j<
nB; j++) {
598 _Scalar ej=errorVtxBone(i, j,
false);
599 if ((label(i)==-1)||(gMin>ej)) {
615 vuT=MatrixX::Zero(
nF*4,
nB*4);
616 #pragma omp parallel for
617 for (
int k=0; k<
nF; k++) {
618 MatrixX vuTp=MatrixX::Zero(4,
nB*4);
619 for (
int i=0; i<
nV; i++)
620 for (
typename SparseMatrix::InnerIterator it(
w, i); it; ++it) {
621 Matrix4 tmp=Vector4(
v.vec3(k, i).template cast<_Scalar>().homogeneous())*
u.vec3(
subjectID(k), i).homogeneous().transpose();
622 vuT.blk4(k, it.row())+=it.value()*tmp;
625 for (
int j=0; j<
nB; j++)
626 if (vuTp(3, j*4+3)!=0)
627 vuT.blk4(k, j)+=(
transAffine*vuT(k*4+3, j*4+3)/vuTp(3, j*4+3))*vuTp.blk4(0, j);
632 struct SparseMatrixBlock {
633 EIGEN_MAKE_ALIGNED_OPERATOR_NEW
635 Eigen::VectorXi innerIdx, outerIdx;
641 Eigen::MatrixXi pos=Eigen::MatrixXi::Constant(
nB,
nB, -1);
642 #pragma omp parallel for
643 for (
int i=0; i<
nV; i++)
644 for (
typename SparseMatrix::InnerIterator it(
w, i); it; ++it)
645 for (
typename SparseMatrix::InnerIterator jt(
w, i); jt; ++jt)
646 pos(it.row(), jt.row())=1;
648 uuT.outerIdx.resize(
nB+1);
649 uuT.innerIdx.resize(
nB*
nB);
651 for (
int j=0; j<
nB; j++) {
653 for (
int i=0; i<
nB; i++)
659 uuT.outerIdx(
nB)=
nnz;
660 uuT.innerIdx.conservativeResize(
nnz);
661 uuT.val=MatrixX::Zero(
nS*4,
nnz*4);
663 #pragma omp parallel for
664 for (
int i=0; i<
nV; i++)
665 for (
typename SparseMatrix::InnerIterator it(
w, i); it; ++it)
666 for (
typename SparseMatrix::InnerIterator jt(
w, i); jt; ++jt)
667 if (it.row()>=jt.row()) {
668 double _w=it.value()*jt.value();
669 MatrixX _uuT(4*
nS, 4);
671 for (
int s=0; s<
nS; s++) {
672 _u=
u.vec3(s, i).homogeneous();
673 _uuT.blk4(s, 0)=_w*_u*_u.transpose();
675 int p=pos(it.row(), jt.row())*4;
676 for (
int c=0; c<4; c++)
677 for (
int r=0; r<4*
nS; r++)
679 uuT.val(r, p+c)+=_uuT(r, c);
682 for (
int i=0; i<
nB; i++)
683 for (
int j=i+1; j<
nB; j++)
685 uuT.val.middleCols(pos(i, j)*4, 4)=uuT.val.middleCols(pos(j, i)*4, 4);
696 Eigen::MatrixXi idx(2,
nB*(
nB+1)/2);
698 for (
int i=0; i<
nB; i++)
699 for (
int j=i; j<
nB; j++) {
705 mTm=MatrixX::Zero(
nS*
nB*4,
nB*4);
706 #pragma omp parallel for
707 for (
int p=0; p<nPairs; p++) {
710 for (
int k=0; k<
nF; k++)
711 mTm.blk4(
subjectID(k)*
nB+i, j)+=
m.blk4(k, i).template topRows<3>().transpose()*
m.blk4(k, j).template topRows<3>();
712 if (i!=j)
for (
int s=0; s<
nS; s++) mTm.blk4(s*
nB+j, i)=mTm.blk4(s*
nB+i, j);
722 #pragma omp parallel for
723 for (
int i=0; i<
nV; i++)
724 for (
int j=0; j<
nB; j++)
725 if ((aTb(j, i)==0)&&(ws(j, i)>
weightEps))
726 for (
int k=0; k<
nF; k++)
727 aTb(j, i)+=
v.vec3(k, i).template cast<_Scalar>().dot(
m.blk4(k, j).template topRows<3>()*
u.vec3(
subjectID(k), i).homogeneous());
734 SparseMatrix laplacian;
737 Eigen::SparseLU<SparseMatrix> smoothSolver;
741 void computeSmoothSolver() {
742 int nFV=(int)
fv.size();
745 for (
int f=0; f<nFV; f++) {
746 int nf=(int)
fv[f].size();
747 for (
int g=0; g<nf; g++) {
749 int j=
fv[f][(g+1)%nf];
750 epsDis+=(
u.col(i)-
u.col(j)).norm();
755 std::vector<Triplet, Eigen::aligned_allocator<Triplet>> triplet;
756 VectorX d=VectorX::Zero(
nV);
758 #pragma omp parallel for
759 for (
int f=0; f<nFV; f++) {
760 int nf=(int)
fv[f].size();
761 for (
int g=0; g<nf; g++) {
763 int j=
fv[f][(g+1)%nf];
767 for (
int s=0; s<
nS; s++) {
768 double du=(
u.vec3(s, i)-
u.vec3(s, j)).norm();
770 val+=pow((
v.vec3(k, i).template cast<_Scalar>()-
v.vec3(k, j).template cast<_Scalar>()).norm()-du, 2);
772 val=1/(sqrt(val/
nF)+epsDis);
775 triplet.push_back(Triplet(i, j, -val));
780 triplet.push_back(Triplet(j, i, -val));
787 for (
int i=0; i<
nV; i++)
788 triplet.push_back(Triplet(i, i, d(i)));
790 laplacian.resize(
nV,
nV);
791 laplacian.setFromTriplets(triplet.begin(), triplet.end());
793 for (
int i=0; i<
nV; i++)
794 if (d(i)!=0) laplacian.row(i)/=d(i);
797 smoothSolver.compute(laplacian);
807 #pragma omp parallel for
808 for (
int j=0; j<
nB; j++) ws.col(j)=smoothSolver.solve(ws.col(j));
809 ws.transposeInPlace();
811 #pragma omp parallel for
812 for (
int i=0; i<
nV; i++) {
813 ws.col(i)=ws.col(i).cwiseMax(0.0);
814 _Scalar si=ws.col(i).sum();
815 if (si<_Scalar(0.1)) ws.col(i)=VectorX::Constant(
nB, _Scalar(1)/
nB);
else ws.col(i)/=si;
820 ConvexLS<_Scalar> wSolver;
826 void compute_aTa(
int i, MatrixX& aTa) {
827 aTa=MatrixX::Zero(
nB,
nB);
828 for (
int j1=0; j1<
nB; j1++)
829 for (
int j2=j1; j2<
nB; j2++) {
830 for (
int s=0; s<
nS; s++) aTa(j1, j2)+=
u.vec3(s, i).homogeneous().dot(mTm.blk4(s*
nB+j1, j2)*
u.vec3(s, i).homogeneous());
831 if (j1!=j2) aTa(j2, j1)=aTa(j1, j2);
838 #ifdef DEM_BONES_DEM_BONES_MAT_BLOCKS_UNDEFINED
843 #undef DEM_BONES_MAT_BLOCKS