arap_dof.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. // This file is part of libigl, a simple c++ geometry processing library.
  2. //
  3. // Copyright (C) 2013 Alec Jacobson <alecjacobson@gmail.com>
  4. //
  5. // This Source Code Form is subject to the terms of the Mozilla Public License
  6. // v. 2.0. If a copy of the MPL was not distributed with this file, You can
  7. // obtain one at http://mozilla.org/MPL/2.0/.
  8. #include "arap_dof.h"
  9. #include "cotmatrix.h"
  10. #include "massmatrix.h"
  11. #include "speye.h"
  12. #include "repdiag.h"
  13. #include "repmat.h"
  14. #include "slice.h"
  15. #include "colon.h"
  16. #include "is_sparse.h"
  17. #include "mode.h"
  18. #include "is_symmetric.h"
  19. #include "group_sum_matrix.h"
  20. #include "arap_rhs.h"
  21. #include "covariance_scatter_matrix.h"
  22. #include "fit_rotations.h"
  23. #include "verbose.h"
  24. #include "print_ijv.h"
  25. #include "get_seconds_hires.h"
  26. //#include "MKLEigenInterface.h"
  27. #include "min_quad_dense.h"
  28. #include "get_seconds.h"
  29. #include "columnize.h"
  30. // defined if no early exit is supported, i.e., always take a fixed number of iterations
  31. #define IGL_ARAP_DOF_FIXED_ITERATIONS_COUNT
  32. // A carefull derivation of this implementation is given in the corresponding
  33. // matlab function arap_dof.m
  34. template <typename LbsMatrixType, typename SSCALAR>
  35. IGL_INLINE bool igl::arap_dof_precomputation(
  36. const Eigen::MatrixXd & V,
  37. const Eigen::MatrixXi & F,
  38. const LbsMatrixType & M,
  39. const Eigen::Matrix<int,Eigen::Dynamic,1> & G,
  40. ArapDOFData<LbsMatrixType, SSCALAR> & data)
  41. {
  42. using namespace Eigen;
  43. typedef Matrix<SSCALAR, Dynamic, Dynamic> MatrixXS;
  44. // number of mesh (domain) vertices
  45. int n = V.rows();
  46. // cache problem size
  47. data.n = n;
  48. // dimension of mesh
  49. data.dim = V.cols();
  50. assert(data.dim == M.rows()/n);
  51. assert(data.dim*n == M.rows());
  52. if(data.dim == 3)
  53. {
  54. // Check if z-coordinate is all zeros
  55. if(V.col(2).minCoeff() == 0 && V.col(2).maxCoeff() == 0)
  56. {
  57. data.effective_dim = 2;
  58. }
  59. }else
  60. {
  61. data.effective_dim = data.dim;
  62. }
  63. // Number of handles
  64. data.m = M.cols()/data.dim/(data.dim+1);
  65. assert(data.m*data.dim*(data.dim+1) == M.cols());
  66. //assert(m == C.rows());
  67. //printf("n=%d; dim=%d; m=%d;\n",n,data.dim,data.m);
  68. // Build cotangent laplacian
  69. SparseMatrix<double> Lcot;
  70. //printf("cotmatrix()\n");
  71. cotmatrix(V,F,Lcot);
  72. // Discrete laplacian (should be minus matlab version)
  73. SparseMatrix<double> Lapl = -2.0*Lcot;
  74. #ifdef EXTREME_VERBOSE
  75. cout<<"LaplIJV=["<<endl;print_ijv(Lapl,1);cout<<endl<<"];"<<
  76. endl<<"Lapl=sparse(LaplIJV(:,1),LaplIJV(:,2),LaplIJV(:,3),"<<
  77. Lapl.rows()<<","<<Lapl.cols()<<");"<<endl;
  78. #endif
  79. // Get group sum scatter matrix, when applied sums all entries of the same
  80. // group according to G
  81. SparseMatrix<double> G_sum;
  82. if(G.size() == 0)
  83. {
  84. speye(n,G_sum);
  85. }else
  86. {
  87. // groups are defined per vertex, convert to per face using mode
  88. Eigen::Matrix<int,Eigen::Dynamic,1> GG;
  89. if(data.energy == ARAP_ENERGY_TYPE_ELEMENTS)
  90. {
  91. MatrixXi GF(F.rows(),F.cols());
  92. for(int j = 0;j<F.cols();j++)
  93. {
  94. Matrix<int,Eigen::Dynamic,1> GFj;
  95. slice(G,F.col(j),GFj);
  96. GF.col(j) = GFj;
  97. }
  98. mode<int>(GF,2,GG);
  99. }else
  100. {
  101. GG=G;
  102. }
  103. //printf("group_sum_matrix()\n");
  104. group_sum_matrix(GG,G_sum);
  105. }
  106. #ifdef EXTREME_VERBOSE
  107. cout<<"G_sumIJV=["<<endl;print_ijv(G_sum,1);cout<<endl<<"];"<<
  108. endl<<"G_sum=sparse(G_sumIJV(:,1),G_sumIJV(:,2),G_sumIJV(:,3),"<<
  109. G_sum.rows()<<","<<G_sum.cols()<<");"<<endl;
  110. #endif
  111. // Get covariance scatter matrix, when applied collects the covariance matrices
  112. // used to fit rotations to during optimization
  113. SparseMatrix<double> CSM;
  114. //printf("covariance_scatter_matrix()\n");
  115. covariance_scatter_matrix(V,F,data.energy,CSM);
  116. #ifdef EXTREME_VERBOSE
  117. cout<<"CSMIJV=["<<endl;print_ijv(CSM,1);cout<<endl<<"];"<<
  118. endl<<"CSM=sparse(CSMIJV(:,1),CSMIJV(:,2),CSMIJV(:,3),"<<
  119. CSM.rows()<<","<<CSM.cols()<<");"<<endl;
  120. #endif
  121. // Build the covariance matrix "constructor". This is a set of *scatter*
  122. // matrices that when multiplied on the right by column of the transformation
  123. // matrix entries (the degrees of freedom) L, we get a stack of dim by 1
  124. // covariance matrix column, with a column in the stack for each rotation
  125. // *group*. The output is a list of matrices because we construct each column
  126. // in the stack of covariance matrices with an independent matrix-vector
  127. // multiplication.
  128. //
  129. // We want to build S which is a stack of dim by dim covariance matrices.
  130. // Thus S is dim*g by dim, where dim is the number of dimensions and g is the
  131. // number of groups. We can precompute dim matrices CSM_M such that column i
  132. // in S is computed as S(:,i) = CSM_M{i} * L, where L is a column of the
  133. // skinning transformation matrix values. To be clear, the covariance matrix
  134. // for group k is then given as the dim by dim matrix pulled from the stack:
  135. // S((k-1)*dim + 1:dim,:)
  136. // Apply group sum to each dimension's block of covariance scatter matrix
  137. SparseMatrix<double> G_sum_dim;
  138. repdiag(G_sum,data.dim,G_sum_dim);
  139. CSM = G_sum_dim * CSM;
  140. #ifdef EXTREME_VERBOSE
  141. cout<<"CSMIJV=["<<endl;print_ijv(CSM,1);cout<<endl<<"];"<<
  142. endl<<"CSM=sparse(CSMIJV(:,1),CSMIJV(:,2),CSMIJV(:,3),"<<
  143. CSM.rows()<<","<<CSM.cols()<<");"<<endl;
  144. #endif
  145. //printf("CSM_M()\n");
  146. // Precompute CSM times M for each dimension
  147. data.CSM_M.resize(data.dim);
  148. #ifdef EXTREME_VERBOSE
  149. cout<<"data.CSM_M = cell("<<data.dim<<",1);"<<endl;
  150. #endif
  151. // span of integers from 0 to n-1
  152. Eigen::Matrix<int,Eigen::Dynamic,1> span_n(n);
  153. for(int i = 0;i<n;i++)
  154. {
  155. span_n(i) = i;
  156. }
  157. // span of integers from 0 to M.cols()-1
  158. Eigen::Matrix<int,Eigen::Dynamic,1> span_mlbs_cols(M.cols());
  159. for(int i = 0;i<M.cols();i++)
  160. {
  161. span_mlbs_cols(i) = i;
  162. }
  163. // number of groups
  164. int k = CSM.rows()/data.dim;
  165. for(int i = 0;i<data.dim;i++)
  166. {
  167. //printf("CSM_M(): Mi\n");
  168. LbsMatrixType M_i;
  169. //printf("CSM_M(): slice\n");
  170. slice(M,(span_n.array()+i*n).matrix().eval(),span_mlbs_cols,M_i);
  171. LbsMatrixType M_i_dim;
  172. data.CSM_M[i].resize(k*data.dim,data.m*data.dim*(data.dim+1));
  173. assert(data.CSM_M[i].cols() == M.cols());
  174. for(int j = 0;j<data.dim;j++)
  175. {
  176. SparseMatrix<double> CSMj;
  177. //printf("CSM_M(): slice\n");
  178. slice(
  179. CSM,
  180. colon<int>(j*k,(j+1)*k-1),
  181. colon<int>(j*n,(j+1)*n-1),
  182. CSMj);
  183. assert(CSMj.rows() == k);
  184. assert(CSMj.cols() == n);
  185. LbsMatrixType CSMjM_i = CSMj * M_i;
  186. if(is_sparse(CSMjM_i))
  187. {
  188. // Convert to full
  189. //printf("CSM_M(): full\n");
  190. MatrixXd CSMjM_ifull(CSMjM_i);
  191. // printf("CSM_M[%d]: %d %d\n",i,data.CSM_M[i].rows(),data.CSM_M[i].cols());
  192. // printf("CSM_M[%d].block(%d*%d=%d,0,%d,%d): %d %d\n",i,j,k,CSMjM_i.rows(),CSMjM_i.cols(),
  193. // data.CSM_M[i].block(j*k,0,CSMjM_i.rows(),CSMjM_i.cols()).rows(),
  194. // data.CSM_M[i].block(j*k,0,CSMjM_i.rows(),CSMjM_i.cols()).cols());
  195. // printf("CSM_MjMi: %d %d\n",i,CSMjM_i.rows(),CSMjM_i.cols());
  196. // printf("CSM_MjM_ifull: %d %d\n",i,CSMjM_ifull.rows(),CSMjM_ifull.cols());
  197. data.CSM_M[i].block(j*k,0,CSMjM_i.rows(),CSMjM_i.cols()) = CSMjM_ifull;
  198. }else
  199. {
  200. data.CSM_M[i].block(j*k,0,CSMjM_i.rows(),CSMjM_i.cols()) = CSMjM_i;
  201. }
  202. }
  203. #ifdef EXTREME_VERBOSE
  204. cout<<"CSM_Mi=["<<endl<<data.CSM_M[i]<<endl<<"];"<<endl;
  205. #endif
  206. }
  207. // precompute arap_rhs matrix
  208. //printf("arap_rhs()\n");
  209. SparseMatrix<double> K;
  210. arap_rhs(V,F,V.cols(),data.energy,K);
  211. //#ifdef EXTREME_VERBOSE
  212. // cout<<"KIJV=["<<endl;print_ijv(K,1);cout<<endl<<"];"<<
  213. // endl<<"K=sparse(KIJV(:,1),KIJV(:,2),KIJV(:,3),"<<
  214. // K.rows()<<","<<K.cols()<<");"<<endl;
  215. //#endif
  216. // Precompute left muliplication by M and right multiplication by G_sum
  217. SparseMatrix<double> G_sumT = G_sum.transpose();
  218. SparseMatrix<double> G_sumT_dim_dim;
  219. repdiag(G_sumT,data.dim*data.dim,G_sumT_dim_dim);
  220. LbsMatrixType MT = M.transpose();
  221. // If this is a bottle neck then consider reordering matrix multiplication
  222. data.M_KG = -4.0 * (MT * (K * G_sumT_dim_dim));
  223. //#ifdef EXTREME_VERBOSE
  224. // cout<<"data.M_KGIJV=["<<endl;print_ijv(data.M_KG,1);cout<<endl<<"];"<<
  225. // endl<<"data.M_KG=sparse(data.M_KGIJV(:,1),data.M_KGIJV(:,2),data.M_KGIJV(:,3),"<<
  226. // data.M_KG.rows()<<","<<data.M_KG.cols()<<");"<<endl;
  227. //#endif
  228. // Precompute system matrix
  229. //printf("A()\n");
  230. SparseMatrix<double> A;
  231. repdiag(Lapl,data.dim,A);
  232. data.Q = MT * (A * M);
  233. //#ifdef EXTREME_VERBOSE
  234. // cout<<"QIJV=["<<endl;print_ijv(data.Q,1);cout<<endl<<"];"<<
  235. // endl<<"Q=sparse(QIJV(:,1),QIJV(:,2),QIJV(:,3),"<<
  236. // data.Q.rows()<<","<<data.Q.cols()<<");"<<endl;
  237. //#endif
  238. // Always do dynamics precomputation so we can hot-switch
  239. //if(data.with_dynamics)
  240. //{
  241. // Build cotangent laplacian
  242. SparseMatrix<double> Mass;
  243. //printf("massmatrix()\n");
  244. massmatrix(V,F,(F.cols()>3?MASSMATRIX_TYPE_BARYCENTRIC:MASSMATRIX_TYPE_VORONOI),Mass);
  245. //cout<<"MIJV=["<<endl;print_ijv(Mass,1);cout<<endl<<"];"<<
  246. // endl<<"M=sparse(MIJV(:,1),MIJV(:,2),MIJV(:,3),"<<
  247. // Mass.rows()<<","<<Mass.cols()<<");"<<endl;
  248. //speye(data.n,Mass);
  249. SparseMatrix<double> Mass_rep;
  250. repdiag(Mass,data.dim,Mass_rep);
  251. // Multiply either side by weights matrix (should be dense)
  252. data.Mass_tilde = MT * Mass_rep * M;
  253. MatrixXd ones(data.dim*data.n,data.dim);
  254. for(int i = 0;i<data.n;i++)
  255. {
  256. for(int d = 0;d<data.dim;d++)
  257. {
  258. ones(i+d*data.n,d) = 1;
  259. }
  260. }
  261. data.fgrav = MT * (Mass_rep * ones);
  262. data.fext = MatrixXS::Zero(MT.rows(),1);
  263. //data.fgrav = MT * (ones);
  264. //}
  265. // This may/should be superfluous
  266. //printf("is_symmetric()\n");
  267. if(!is_symmetric(data.Q))
  268. {
  269. //printf("Fixing symmetry...\n");
  270. // "Fix" symmetry
  271. LbsMatrixType QT = data.Q.transpose();
  272. LbsMatrixType Q_copy = data.Q;
  273. data.Q = 0.5*(Q_copy+QT);
  274. // Check that ^^^ this really worked. It doesn't always
  275. //assert(is_symmetric(*Q));
  276. }
  277. //printf("arap_dof_precomputation() succeeded... so far...\n");
  278. verbose("Number of handles: %i\n", data.m);
  279. return true;
  280. }
  281. /////////////////////////////////////////////////////////////////////////
  282. //
  283. // STATIC FUNCTIONS (These should be removed or properly defined)
  284. //
  285. /////////////////////////////////////////////////////////////////////////
  286. namespace igl
  287. {
  288. // returns maximal difference of 'blok' from scalar times 3x3 identity:
  289. template <typename SSCALAR>
  290. inline static SSCALAR maxBlokErr(const Eigen::Matrix3f &blok)
  291. {
  292. SSCALAR mD;
  293. SSCALAR value = blok(0,0);
  294. SSCALAR diff1 = fabs(blok(1,1) - value);
  295. SSCALAR diff2 = fabs(blok(2,2) - value);
  296. if (diff1 > diff2) mD = diff1;
  297. else mD = diff2;
  298. for (int v=0; v<3; v++)
  299. {
  300. for (int w=0; w<3; w++)
  301. {
  302. if (v == w)
  303. {
  304. continue;
  305. }
  306. if (mD < fabs(blok(v, w)))
  307. {
  308. mD = fabs(blok(v, w));
  309. }
  310. }
  311. }
  312. return mD;
  313. }
  314. // converts CSM_M_SSCALAR[0], CSM_M_SSCALAR[1], CSM_M_SSCALAR[2] into one
  315. // "condensed" matrix CSM while checking we're not loosing any information by
  316. // this process; specifically, returns maximal difference from scaled 3x3
  317. // identity blocks, which should be pretty small number
  318. template <typename MatrixXS>
  319. static typename MatrixXS::Scalar condense_CSM(
  320. const std::vector<MatrixXS> &CSM_M_SSCALAR,
  321. int numBones,
  322. int dim,
  323. MatrixXS &CSM)
  324. {
  325. const int numRows = CSM_M_SSCALAR[0].rows();
  326. assert(CSM_M_SSCALAR[0].cols() == dim*(dim+1)*numBones);
  327. assert(CSM_M_SSCALAR[1].cols() == dim*(dim+1)*numBones);
  328. assert(CSM_M_SSCALAR[2].cols() == dim*(dim+1)*numBones);
  329. assert(CSM_M_SSCALAR[1].rows() == numRows);
  330. assert(CSM_M_SSCALAR[2].rows() == numRows);
  331. const int numCols = (dim + 1)*numBones;
  332. CSM.resize(numRows, numCols);
  333. typedef typename MatrixXS::Scalar SSCALAR;
  334. SSCALAR maxDiff = 0.0f;
  335. for (int r=0; r<numRows; r++)
  336. {
  337. for (int coord=0; coord<dim+1; coord++)
  338. {
  339. for (int b=0; b<numBones; b++)
  340. {
  341. // this is just a test if we really have a multiple of 3x3 identity
  342. Eigen::Matrix3f blok;
  343. for (int v=0; v<3; v++)
  344. {
  345. for (int w=0; w<3; w++)
  346. {
  347. blok(v,w) = CSM_M_SSCALAR[v](r, coord*(numBones*dim) + b + w*numBones);
  348. }
  349. }
  350. //SSCALAR value[3];
  351. //for (int v=0; v<3; v++)
  352. // CSM_M_SSCALAR[v](r, coord*(numBones*dim) + b + v*numBones);
  353. SSCALAR mD = maxBlokErr<SSCALAR>(blok);
  354. if (mD > maxDiff) maxDiff = mD;
  355. // use the first value:
  356. CSM(r, coord*numBones + b) = blok(0,0);
  357. }
  358. }
  359. }
  360. return maxDiff;
  361. }
  362. // splits x_0, ... , x_dim coordinates in column vector 'L' into a numBones*(dimp1) x dim matrix 'Lsep';
  363. // assumes 'Lsep' has already been preallocated
  364. //
  365. // is this the same as uncolumnize? no.
  366. template <typename MatL, typename MatLsep>
  367. static void splitColumns(
  368. const MatL &L,
  369. int numBones,
  370. int dim,
  371. int dimp1,
  372. MatLsep &Lsep)
  373. {
  374. assert(L.cols() == 1);
  375. assert(L.rows() == dim*(dimp1)*numBones);
  376. assert(Lsep.rows() == (dimp1)*numBones && Lsep.cols() == dim);
  377. for (int b=0; b<numBones; b++)
  378. {
  379. for (int coord=0; coord<dimp1; coord++)
  380. {
  381. for (int c=0; c<dim; c++)
  382. {
  383. Lsep(coord*numBones + b, c) = L(coord*numBones*dim + c*numBones + b, 0);
  384. }
  385. }
  386. }
  387. }
  388. // the inverse of splitColumns, i.e., takes numBones*(dimp1) x dim matrix 'Lsep' and merges the dimensions
  389. // into columns vector 'L' (which is assumed to be already allocated):
  390. //
  391. // is this the same as columnize? no.
  392. template <typename MatrixXS>
  393. static void mergeColumns(const MatrixXS &Lsep, int numBones, int dim, int dimp1, MatrixXS &L)
  394. {
  395. assert(L.cols() == 1);
  396. assert(L.rows() == dim*(dimp1)*numBones);
  397. assert(Lsep.rows() == (dimp1)*numBones && Lsep.cols() == dim);
  398. for (int b=0; b<numBones; b++)
  399. {
  400. for (int coord=0; coord<dimp1; coord++)
  401. {
  402. for (int c=0; c<dim; c++)
  403. {
  404. L(coord*numBones*dim + c*numBones + b, 0) = Lsep(coord*numBones + b, c);
  405. }
  406. }
  407. }
  408. }
  409. // converts "Solve1" the "rotations" part of FullSolve matrix (the first part)
  410. // into one "condensed" matrix CSolve1 while checking we're not loosing any
  411. // information by this process; specifically, returns maximal difference from
  412. // scaled 3x3 identity blocks, which should be pretty small number
  413. template <typename MatrixXS>
  414. static typename MatrixXS::Scalar condense_Solve1(MatrixXS &Solve1, int numBones, int numGroups, int dim, MatrixXS &CSolve1)
  415. {
  416. assert(Solve1.rows() == dim*(dim + 1)*numBones);
  417. assert(Solve1.cols() == dim*dim*numGroups);
  418. typedef typename MatrixXS::Scalar SSCALAR;
  419. SSCALAR maxDiff = 0.0f;
  420. CSolve1.resize((dim + 1)*numBones, dim*numGroups);
  421. for (int rowCoord=0; rowCoord<dim+1; rowCoord++)
  422. {
  423. for (int b=0; b<numBones; b++)
  424. {
  425. for (int colCoord=0; colCoord<dim; colCoord++)
  426. {
  427. for (int g=0; g<numGroups; g++)
  428. {
  429. Eigen::Matrix3f blok;
  430. for (int r=0; r<3; r++)
  431. {
  432. for (int c=0; c<3; c++)
  433. {
  434. blok(r, c) = Solve1(rowCoord*numBones*dim + r*numBones + b, colCoord*numGroups*dim + c*numGroups + g);
  435. }
  436. }
  437. SSCALAR mD = maxBlokErr<SSCALAR>(blok);
  438. if (mD > maxDiff) maxDiff = mD;
  439. CSolve1(rowCoord*numBones + b, colCoord*numGroups + g) = blok(0,0);
  440. }
  441. }
  442. }
  443. }
  444. return maxDiff;
  445. }
  446. }
  447. template <typename LbsMatrixType, typename SSCALAR>
  448. IGL_INLINE bool igl::arap_dof_recomputation(
  449. const Eigen::Matrix<int,Eigen::Dynamic,1> & fixed_dim,
  450. const Eigen::SparseMatrix<double> & A_eq,
  451. ArapDOFData<LbsMatrixType, SSCALAR> & data)
  452. {
  453. using namespace Eigen;
  454. typedef Matrix<SSCALAR, Dynamic, Dynamic> MatrixXS;
  455. LbsMatrixType * Q;
  456. LbsMatrixType Qdyn;
  457. if(data.with_dynamics)
  458. {
  459. // multiply by 1/timestep and to quadratic coefficients matrix
  460. // Might be missing a 0.5 here
  461. LbsMatrixType Q_copy = data.Q;
  462. Qdyn = Q_copy + (1.0/(data.h*data.h))*data.Mass_tilde;
  463. Q = &Qdyn;
  464. // This may/should be superfluous
  465. //printf("is_symmetric()\n");
  466. if(!is_symmetric(*Q))
  467. {
  468. //printf("Fixing symmetry...\n");
  469. // "Fix" symmetry
  470. LbsMatrixType QT = (*Q).transpose();
  471. LbsMatrixType Q_copy = *Q;
  472. *Q = 0.5*(Q_copy+QT);
  473. // Check that ^^^ this really worked. It doesn't always
  474. //assert(is_symmetric(*Q));
  475. }
  476. }else
  477. {
  478. Q = &data.Q;
  479. }
  480. assert((int)data.CSM_M.size() == data.dim);
  481. assert(A_eq.cols() == data.m*data.dim*(data.dim+1));
  482. data.fixed_dim = fixed_dim;
  483. if(fixed_dim.size() > 0)
  484. {
  485. assert(fixed_dim.maxCoeff() < data.m*data.dim*(data.dim+1));
  486. assert(fixed_dim.minCoeff() >= 0);
  487. }
  488. #ifdef EXTREME_VERBOSE
  489. cout<<"data.fixed_dim=["<<endl<<data.fixed_dim<<endl<<"]+1;"<<endl;
  490. #endif
  491. // Compute dense solve matrix (alternative of matrix factorization)
  492. //printf("min_quad_dense_precompute()\n");
  493. MatrixXd Qfull(*Q);
  494. MatrixXd A_eqfull(A_eq);
  495. MatrixXd M_Solve;
  496. double timer0_start = get_seconds_hires();
  497. bool use_lu = data.effective_dim != 2;
  498. //use_lu = false;
  499. //printf("use_lu: %s\n",(use_lu?"TRUE":"FALSE"));
  500. min_quad_dense_precompute(Qfull, A_eqfull, use_lu,M_Solve);
  501. double timer0_end = get_seconds_hires();
  502. verbose("Bob timing: %.20f\n", (timer0_end - timer0_start)*1000.0);
  503. // Precompute full solve matrix:
  504. const int fsRows = data.m * data.dim * (data.dim + 1); // 12 * number_of_bones
  505. const int fsCols1 = data.M_KG.cols(); // 9 * number_of_posConstraints
  506. const int fsCols2 = A_eq.rows(); // number_of_posConstraints
  507. data.M_FullSolve.resize(fsRows, fsCols1 + fsCols2);
  508. // note the magical multiplicative constant "-0.5", I've no idea why it has
  509. // to be there :)
  510. data.M_FullSolve <<
  511. (-0.5 * M_Solve.block(0, 0, fsRows, fsRows) * data.M_KG).template cast<SSCALAR>(),
  512. M_Solve.block(0, fsRows, fsRows, fsCols2).template cast<SSCALAR>();
  513. if(data.with_dynamics)
  514. {
  515. printf(
  516. "---------------------------------------------------------------------\n"
  517. "\n\n\nWITH DYNAMICS recomputation\n\n\n"
  518. "---------------------------------------------------------------------\n"
  519. );
  520. // Also need to save Π1 before it gets multiplied by Ktilde (aka M_KG)
  521. data.Pi_1 = M_Solve.block(0, 0, fsRows, fsRows).template cast<SSCALAR>();
  522. }
  523. // Precompute condensed matrices,
  524. // first CSM:
  525. std::vector<MatrixXS> CSM_M_SSCALAR;
  526. CSM_M_SSCALAR.resize(data.dim);
  527. for (int i=0; i<data.dim; i++) CSM_M_SSCALAR[i] = data.CSM_M[i].template cast<SSCALAR>();
  528. SSCALAR maxErr1 = condense_CSM(CSM_M_SSCALAR, data.m, data.dim, data.CSM);
  529. verbose("condense_CSM maxErr = %.15f (this should be close to zero)\n", maxErr1);
  530. assert(fabs(maxErr1) < 1e-5);
  531. // and then solveBlock1:
  532. // number of groups
  533. const int k = data.CSM_M[0].rows()/data.dim;
  534. MatrixXS SolveBlock1 = data.M_FullSolve.block(0, 0, data.M_FullSolve.rows(), data.dim * data.dim * k);
  535. SSCALAR maxErr2 = condense_Solve1(SolveBlock1, data.m, k, data.dim, data.CSolveBlock1);
  536. verbose("condense_Solve1 maxErr = %.15f (this should be close to zero)\n", maxErr2);
  537. assert(fabs(maxErr2) < 1e-5);
  538. return true;
  539. }
  540. template <typename LbsMatrixType, typename SSCALAR>
  541. IGL_INLINE bool igl::arap_dof_update(
  542. const ArapDOFData<LbsMatrixType, SSCALAR> & data,
  543. const Eigen::Matrix<double,Eigen::Dynamic,1> & B_eq,
  544. const Eigen::MatrixXd & L0,
  545. const int max_iters,
  546. const double
  547. #ifdef IGL_ARAP_DOF_FIXED_ITERATIONS_COUNT
  548. tol,
  549. #else
  550. /*tol*/,
  551. #endif
  552. Eigen::MatrixXd & L
  553. )
  554. {
  555. using namespace Eigen;
  556. typedef Matrix<SSCALAR, Dynamic, Dynamic> MatrixXS;
  557. #ifdef ARAP_GLOBAL_TIMING
  558. double timer_start = get_seconds_hires();
  559. #endif
  560. // number of dimensions
  561. assert((int)data.CSM_M.size() == data.dim);
  562. assert((int)L0.size() == (data.m)*data.dim*(data.dim+1));
  563. assert(max_iters >= 0);
  564. assert(tol >= 0);
  565. // timing variables
  566. double
  567. sec_start,
  568. sec_covGather,
  569. sec_fitRotations,
  570. //sec_rhs,
  571. sec_prepMult,
  572. sec_solve, sec_end;
  573. assert(L0.cols() == 1);
  574. #ifdef EXTREME_VERBOSE
  575. cout<<"dim="<<data.dim<<";"<<endl;
  576. cout<<"m="<<data.m<<";"<<endl;
  577. #endif
  578. // number of groups
  579. const int k = data.CSM_M[0].rows()/data.dim;
  580. for(int i = 0;i<data.dim;i++)
  581. {
  582. assert(data.CSM_M[i].rows()/data.dim == k);
  583. }
  584. #ifdef EXTREME_VERBOSE
  585. cout<<"k="<<k<<";"<<endl;
  586. #endif
  587. // resize output and initialize with initial guess
  588. L = L0;
  589. #ifndef IGL_ARAP_DOF_FIXED_ITERATIONS_COUNT
  590. // Keep track of last solution
  591. MatrixXS L_prev;
  592. #endif
  593. // We will be iterating on L_SSCALAR, only at the end we convert back to double
  594. MatrixXS L_SSCALAR = L.cast<SSCALAR>();
  595. int iters = 0;
  596. #ifndef IGL_ARAP_DOF_FIXED_ITERATIONS_COUNT
  597. double max_diff = tol+1;
  598. #endif
  599. MatrixXS S(k*data.dim,data.dim);
  600. MatrixXS R(data.dim,data.dim*k);
  601. Eigen::Matrix<SSCALAR,Eigen::Dynamic,1> Rcol(data.dim * data.dim * k);
  602. Matrix<SSCALAR,Dynamic,1> B_eq_SSCALAR = B_eq.cast<SSCALAR>();
  603. Matrix<SSCALAR,Dynamic,1> B_eq_fix_SSCALAR;
  604. Matrix<SSCALAR,Dynamic,1> L0SSCALAR = L0.cast<SSCALAR>();
  605. slice(L0SSCALAR, data.fixed_dim, B_eq_fix_SSCALAR);
  606. //MatrixXS rhsFull(Rcol.rows() + B_eq.rows() + B_eq_fix_SSCALAR.rows(), 1);
  607. MatrixXS Lsep(data.m*(data.dim + 1), 3);
  608. const MatrixXS L_part2 =
  609. data.M_FullSolve.block(0, Rcol.rows(), data.M_FullSolve.rows(), B_eq_SSCALAR.rows()) * B_eq_SSCALAR;
  610. const MatrixXS L_part3 =
  611. data.M_FullSolve.block(0, Rcol.rows() + B_eq_SSCALAR.rows(), data.M_FullSolve.rows(), B_eq_fix_SSCALAR.rows()) * B_eq_fix_SSCALAR;
  612. MatrixXS L_part2and3 = L_part2 + L_part3;
  613. // preallocate workspace variables:
  614. MatrixXS Rxyz(k*data.dim, data.dim);
  615. MatrixXS L_part1xyz((data.dim + 1) * data.m, data.dim);
  616. MatrixXS L_part1(data.dim * (data.dim + 1) * data.m, 1);
  617. #ifdef ARAP_GLOBAL_TIMING
  618. double timer_prepFinished = get_seconds_hires();
  619. #endif
  620. #ifdef IGL_ARAP_DOF_FIXED_ITERATIONS_COUNT
  621. while(iters < max_iters)
  622. #else
  623. while(iters < max_iters && max_diff > tol)
  624. #endif
  625. {
  626. if(data.print_timings)
  627. {
  628. sec_start = get_seconds_hires();
  629. }
  630. #ifndef IGL_ARAP_DOF_FIXED_ITERATIONS_COUNT
  631. L_prev = L_SSCALAR;
  632. #endif
  633. ///////////////////////////////////////////////////////////////////////////
  634. // Local step: Fix positions, fit rotations
  635. ///////////////////////////////////////////////////////////////////////////
  636. // Gather covariance matrices
  637. splitColumns(L_SSCALAR, data.m, data.dim, data.dim + 1, Lsep);
  638. S = data.CSM * Lsep;
  639. // interestingly, this doesn't seem to be so slow, but
  640. //MKL is still 2x faster (probably due to AVX)
  641. //#ifdef IGL_ARAP_DOF_DOUBLE_PRECISION_SOLVE
  642. // MKL_matMatMult_double(S, data.CSM, Lsep);
  643. //#else
  644. // MKL_matMatMult_single(S, data.CSM, Lsep);
  645. //#endif
  646. if(data.print_timings)
  647. {
  648. sec_covGather = get_seconds_hires();
  649. }
  650. #ifdef EXTREME_VERBOSE
  651. cout<<"S=["<<endl<<S<<endl<<"];"<<endl;
  652. #endif
  653. // Fit rotations to covariance matrices
  654. if(data.effective_dim == 2)
  655. {
  656. fit_rotations_planar(S,R);
  657. }else
  658. {
  659. #ifdef __SSE__ // fit_rotations_SSE will convert to float if necessary
  660. fit_rotations_SSE(S,R);
  661. #else
  662. fit_rotations(S,false,R);
  663. #endif
  664. }
  665. #ifdef EXTREME_VERBOSE
  666. cout<<"R=["<<endl<<R<<endl<<"];"<<endl;
  667. #endif
  668. if(data.print_timings)
  669. {
  670. sec_fitRotations = get_seconds_hires();
  671. }
  672. ///////////////////////////////////////////////////////////////////////////
  673. // "Global" step: fix rotations per mesh vertex, solve for
  674. // linear transformations at handles
  675. ///////////////////////////////////////////////////////////////////////////
  676. // all this shuffling is retarded and not completely negligible time-wise;
  677. // TODO: change fit_rotations_XXX so it returns R in the format ready for
  678. // CSolveBlock1 multiplication
  679. columnize(R, k, 2, Rcol);
  680. #ifdef EXTREME_VERBOSE
  681. cout<<"Rcol=["<<endl<<Rcol<<endl<<"];"<<endl;
  682. #endif
  683. splitColumns(Rcol, k, data.dim, data.dim, Rxyz);
  684. if(data.print_timings)
  685. {
  686. sec_prepMult = get_seconds_hires();
  687. }
  688. L_part1xyz = data.CSolveBlock1 * Rxyz;
  689. //#ifdef IGL_ARAP_DOF_DOUBLE_PRECISION_SOLVE
  690. // MKL_matMatMult_double(L_part1xyz, data.CSolveBlock1, Rxyz);
  691. //#else
  692. // MKL_matMatMult_single(L_part1xyz, data.CSolveBlock1, Rxyz);
  693. //#endif
  694. mergeColumns(L_part1xyz, data.m, data.dim, data.dim + 1, L_part1);
  695. if(data.with_dynamics)
  696. {
  697. // Consider reordering or precomputing matrix multiplications
  698. MatrixXS L_part1_dyn(data.dim * (data.dim + 1) * data.m, 1);
  699. // Eigen can't parse this:
  700. //L_part1_dyn =
  701. // -(2.0/(data.h*data.h)) * data.Pi_1 * data.Mass_tilde * data.L0 +
  702. // (1.0/(data.h*data.h)) * data.Pi_1 * data.Mass_tilde * data.Lm1;
  703. // -1.0 because we've moved these linear terms to the right hand side
  704. //MatrixXS temp = -1.0 *
  705. // ((-2.0/(data.h*data.h)) * data.L0.array() +
  706. // (1.0/(data.h*data.h)) * data.Lm1.array()).matrix();
  707. //MatrixXS temp = -1.0 *
  708. // ( (-1.0/(data.h*data.h)) * data.L0.array() +
  709. // (1.0/(data.h*data.h)) * data.Lm1.array()
  710. // (-1.0/(data.h*data.h)) * data.L0.array() +
  711. // ).matrix();
  712. //Lvel0 = (1.0/(data.h)) * data.Lm1.array() - data.L0.array();
  713. MatrixXS temp = -1.0 *
  714. ( (-1.0/(data.h*data.h)) * data.L0.array() +
  715. (1.0/(data.h)) * data.Lvel0.array()
  716. ).matrix();
  717. MatrixXd temp_d = temp.template cast<double>();
  718. MatrixXd temp_g = data.fgrav*(data.grav_mag*data.grav_dir);
  719. assert(data.fext.rows() == temp_g.rows());
  720. assert(data.fext.cols() == temp_g.cols());
  721. MatrixXd temp2 = data.Mass_tilde * temp_d + temp_g + data.fext.template cast<double>();
  722. MatrixXS temp2_f = temp2.template cast<SSCALAR>();
  723. L_part1_dyn = data.Pi_1 * temp2_f;
  724. L_part1.array() = L_part1.array() + L_part1_dyn.array();
  725. }
  726. //L_SSCALAR = L_part1 + L_part2and3;
  727. assert(L_SSCALAR.rows() == L_part1.rows() && L_SSCALAR.rows() == L_part2and3.rows());
  728. for (int i=0; i<L_SSCALAR.rows(); i++)
  729. {
  730. L_SSCALAR(i, 0) = L_part1(i, 0) + L_part2and3(i, 0);
  731. }
  732. #ifdef EXTREME_VERBOSE
  733. cout<<"L=["<<endl<<L<<endl<<"];"<<endl;
  734. #endif
  735. if(data.print_timings)
  736. {
  737. sec_solve = get_seconds_hires();
  738. }
  739. #ifndef IGL_ARAP_DOF_FIXED_ITERATIONS_COUNT
  740. // Compute maximum absolute difference with last iteration's solution
  741. max_diff = (L_SSCALAR-L_prev).eval().array().abs().matrix().maxCoeff();
  742. #endif
  743. iters++;
  744. if(data.print_timings)
  745. {
  746. sec_end = get_seconds_hires();
  747. #ifndef WIN32
  748. // trick to get sec_* variables to compile without warning on mac
  749. if(false)
  750. #endif
  751. printf(
  752. "\ntotal iteration time = %f "
  753. "[local: covGather = %f, "
  754. "fitRotations = %f, "
  755. "global: prep = %f, "
  756. "solve = %f, "
  757. "error = %f [ms]]\n",
  758. (sec_end - sec_start)*1000.0,
  759. (sec_covGather - sec_start)*1000.0,
  760. (sec_fitRotations - sec_covGather)*1000.0,
  761. (sec_prepMult - sec_fitRotations)*1000.0,
  762. (sec_solve - sec_prepMult)*1000.0,
  763. (sec_end - sec_solve)*1000.0 );
  764. }
  765. }
  766. L = L_SSCALAR.template cast<double>();
  767. assert(L.cols() == 1);
  768. #ifdef ARAP_GLOBAL_TIMING
  769. double timer_finito = get_seconds_hires();
  770. printf(
  771. "ARAP preparation = %f, "
  772. "all %i iterations = %f [ms]\n",
  773. (timer_prepFinished - timer_start)*1000.0,
  774. max_iters,
  775. (timer_finito - timer_prepFinished)*1000.0);
  776. #endif
  777. return true;
  778. }
  779. #ifdef IGL_STATIC_LIBRARY
  780. // Explicit instanciation
  781. template bool igl::arap_dof_update<Eigen::Matrix<double, -1, -1, 0, -1, -1>, double>(ArapDOFData<Eigen::Matrix<double, -1, -1, 0, -1, -1>, double> const&, Eigen::Matrix<double, -1, 1, 0, -1, 1> const&, Eigen::Matrix<double, -1, -1, 0, -1, -1> const&, int, double, Eigen::Matrix<double, -1, -1, 0, -1, -1>&);
  782. template bool igl::arap_dof_recomputation<Eigen::Matrix<double, -1, -1, 0, -1, -1>, double>(Eigen::Matrix<int, -1, 1, 0, -1, 1> const&, Eigen::SparseMatrix<double, 0, int> const&, ArapDOFData<Eigen::Matrix<double, -1, -1, 0, -1, -1>, double>&);
  783. template bool igl::arap_dof_precomputation<Eigen::Matrix<double, -1, -1, 0, -1, -1>, double>(Eigen::Matrix<double, -1, -1, 0, -1, -1> const&, Eigen::Matrix<int, -1, -1, 0, -1, -1> const&, Eigen::Matrix<double, -1, -1, 0, -1, -1> const&, Eigen::Matrix<int, -1, 1, 0, -1, 1> const&, ArapDOFData<Eigen::Matrix<double, -1, -1, 0, -1, -1>, double>&);
  784. template bool igl::arap_dof_update<Eigen::Matrix<double, -1, -1, 0, -1, -1>, float>(igl::ArapDOFData<Eigen::Matrix<double, -1, -1, 0, -1, -1>, float> const&, Eigen::Matrix<double, -1, 1, 0, -1, 1> const&, Eigen::Matrix<double, -1, -1, 0, -1, -1> const&, int, double, Eigen::Matrix<double, -1, -1, 0, -1, -1>&);
  785. template bool igl::arap_dof_recomputation<Eigen::Matrix<double, -1, -1, 0, -1, -1>, float>(Eigen::Matrix<int, -1, 1, 0, -1, 1> const&, Eigen::SparseMatrix<double, 0, int> const&, igl::ArapDOFData<Eigen::Matrix<double, -1, -1, 0, -1, -1>, float>&);
  786. template bool igl::arap_dof_precomputation<Eigen::Matrix<double, -1, -1, 0, -1, -1>, float>(Eigen::Matrix<double, -1, -1, 0, -1, -1> const&, Eigen::Matrix<int, -1, -1, 0, -1, -1> const&, Eigen::Matrix<double, -1, -1, 0, -1, -1> const&, Eigen::Matrix<int, -1, 1, 0, -1, 1> const&, igl::ArapDOFData<Eigen::Matrix<double, -1, -1, 0, -1, -1>, float>&);
  787. #endif