Browse Source

added a slew of matlab/igl_toolbox translations for eigen matrices, many are works in progress in the sense that they are only implemented for sparse or dense but should support both. Each has been tested (at least once).
Also added an example that lists known eigen bugs/gotchas
Added a few more rows to matlab-to-eigen including way to enter gotchas


Former-commit-id: 4bf2083b205f19b00b5f7a098fbc3477ea559404

jalec 13 years ago
parent
commit
86d30ae471
13 changed files with 1319 additions and 5 deletions
  1. 6 3
      examples/ReAntTweakBar/example.cpp
  2. 23 0
      examples/eigen-gotchas/Makefile
  3. 224 0
      examples/eigen-gotchas/example.cpp
  4. 30 0
      is_symmetric.h
  5. 151 0
      lu_lagrange.h
  6. 66 0
      mat_max.h
  7. 70 0
      mat_min.h
  8. 86 2
      matlab-to-eigen.html
  9. 290 0
      min_quad_with_fixed.h
  10. 86 0
      repdiag.h
  11. 55 0
      repmat.h
  12. 142 0
      slice.h
  13. 90 0
      sparse.h

+ 6 - 3
examples/ReAntTweakBar/example.cpp

@@ -57,8 +57,8 @@ using namespace igl;
 
 
 // This example displays one of the following shapes
-typedef enum { SHAPE_TEAPOT=1, SHAPE_TORUS, SHAPE_CONE } Shape;
-#define NUM_SHAPES 3
+typedef enum { SHAPE_TEAPOT=1, SHAPE_TORUS, SHAPE_CONE, SHAPE_SPHERE } Shape;
+#define NUM_SHAPES 4
 Shape g_CurrentShape = SHAPE_TORUS;
 // Shapes scale
 float g_Zoom = 1.0f;
@@ -304,6 +304,9 @@ int main(int argc, char *argv[])
   glNewList(SHAPE_CONE, GL_COMPILE);
   glutSolidCone(1.0, 1.5, 64, 4);
   glEndList();
+  glNewList(SHAPE_SPHERE, GL_COMPILE);
+  glutSolidSphere(1.0, 50, 40);
+  glEndList();
   
   // Create a tweak bar
   rebar.TwNewBar("TweakBar");
@@ -342,7 +345,7 @@ int main(int argc, char *argv[])
   // (before adding an enum variable, its enum type must be declared to AntTweakBar as follow)
   {
     // ShapeEV associates Shape enum values with labels that will be displayed instead of enum values
-    TwEnumVal shapeEV[NUM_SHAPES] = { {SHAPE_TEAPOT, "Teapot"}, {SHAPE_TORUS, "Torus"}, {SHAPE_CONE, "Cone"} };
+    TwEnumVal shapeEV[NUM_SHAPES] = { {SHAPE_TEAPOT, "Teapot"}, {SHAPE_TORUS, "Torus"}, {SHAPE_CONE, "Cone"} , {SHAPE_SPHERE, "Sphere"}};
     // Create a type for the enum shapeEV
     TwType shapeType = ReTwDefineEnum("ShapeType", shapeEV, NUM_SHAPES);
     // add 'g_CurrentShape' to 'bar': this is a variable of type ShapeType. Its key shortcuts are [<] and [>].

+ 23 - 0
examples/eigen-gotchas/Makefile

@@ -0,0 +1,23 @@
+
+.PHONY: all
+
+all: example
+
+.PHONY: example
+
+igl_lib=-I../../
+eigen=-I/usr/local/include/eigen3 -I/usr/local/include/eigen3/unsupported
+
+CFLAGS=-g
+inc=$(igl_lib) $(eigen)
+lib=
+
+example: example.o
+	g++ $(CFLAGS) -o example example.o $(lib)
+	rm example.o
+
+example.o: example.cpp
+	g++ $(CFLAGS) -c example.cpp -o example.o $(inc)
+clean:
+	rm -f example.o
+	rm -f example

+ 224 - 0
examples/eigen-gotchas/example.cpp

@@ -0,0 +1,224 @@
+// Make an effort to verify bugs/gotchas for column and row major
+//#define EIGEN_DEFAULT_TO_ROW_MAJOR
+
+#define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET
+#define EIGEN_IM_MAD_AS_HELL_AND_IM_NOT_GOING_TO_TAKE_IT_ANYMORE
+#include <Eigen/Sparse>
+#include <Eigen/SparseExtra>
+using namespace Eigen;
+
+#include <cstdio>
+#include <iostream>
+using namespace std;
+
+#include "print_ijv.h"
+using namespace igl;
+
+// Eigen fails to notice at compile time that the inneriterator used to loop
+// over the contents of a sparsematrix of type T is a different type
+//
+// http://www.alecjacobson.com/weblog/?p=2216
+void wrong_sparsematrix_inner_iterator_type()
+{
+  // Fill 10 by 10 matrix with 0.5*(1:10) along the diagonal
+  SparseMatrix<double> A(10,10);
+  A.reserve(10);
+  for(int i = 0;i<10;i++)
+  {
+    A.insert(i,i) = (double)i/2.0;
+  }
+  A.finalize();
+  cout<<"AIJV=["<<endl;print_ijv(A,1);cout<<endl<<"];"<<endl<<
+    "A=sparse(AIJV(:,1),AIJV(:,2),AIJV(:,3),"<<
+    A.rows()<<","<<A.cols()<<");"<<endl;
+
+  // Traverse A as *int*
+  for(int k = 0;k<A.outerSize();k++)
+  {
+    // Each entry seems to be cast to int and if it's cast to zero then it's as
+    // if it wasn't even there
+    for (SparseMatrix<int>::InnerIterator it(A,k); it; ++it)
+    {
+      printf("A(%d,%d) = %d\n",it.row(),it.col(),it.value());
+    }
+  }
+}
+
+// Eigen can't handle .transpose within expression on right hand side of =
+// operator
+//
+// Temporary solution: Never use Something.transpose() in expression. Always
+// first compute Something into a matrix:
+//   SparseMatrix S = Something;
+// then compute tranpose
+//   SparseMatrix ST = Something.transpose();
+// then continue
+void sparsematrix_transpose_in_rhs_aliasing()
+{
+  SparseMatrix<double> A(7,2);
+  A.reserve(4);
+  A.insert(0,0) = -0.5;
+  A.insert(2,0) = -0.5;
+  A.insert(4,1) = -0.5;
+  A.insert(6,1) = -0.5;
+  A.finalize();
+  cout<<"AIJV=["<<endl;print_ijv(A,1);cout<<endl<<"];"<<endl<<
+    "A=sparse(AIJV(:,1),AIJV(:,2),AIJV(:,3),"<<
+    A.rows()<<","<<A.cols()<<");"<<endl;
+
+  SparseMatrix<double> B(2,7);
+  B.reserve(4);
+  B.insert(0,0) = -0.5;
+  B.insert(0,2) = -0.5;
+  B.insert(1,4) = -0.5;
+  B.insert(1,6) = -0.5;
+  B.finalize();
+  cout<<"BIJV=["<<endl;print_ijv(B,1);cout<<endl<<"];"<<endl<<
+    "B=sparse(BIJV(:,1),BIJV(:,2),BIJV(:,3),"<<
+    B.rows()<<","<<B.cols()<<");"<<endl;
+
+  SparseMatrix<double> C;
+
+  // Should be empty but isn't
+  C = A-B.transpose();
+  cout<<"C = A - B.transpose();"<<endl;
+  cout<<"CIJV=["<<endl;print_ijv(C,1);cout<<endl<<"];"<<endl<<
+    "C=sparse(CIJV(:,1),CIJV(:,2),CIJV(:,3),"<<
+    C.rows()<<","<<C.cols()<<");"<<endl;
+
+  // Should be empty but isn't
+  C = A-B.transpose().eval();
+  cout<<"C = A - B.transpose().eval();"<<endl;
+  cout<<"CIJV=["<<endl;print_ijv(C,1);cout<<endl<<"];"<<endl<<
+    "C=sparse(CIJV(:,1),CIJV(:,2),CIJV(:,3),"<<
+    C.rows()<<","<<C.cols()<<");"<<endl;
+
+  // This works
+  SparseMatrix<double> BT = B.transpose();
+  C = A-BT;
+  cout<<"C = A - BT;"<<endl;
+  cout<<"CIJV=["<<endl;print_ijv(C,1);cout<<endl<<"];"<<endl<<
+    "C=sparse(CIJV(:,1),CIJV(:,2),CIJV(:,3),"<<
+    C.rows()<<","<<C.cols()<<");"<<endl;
+}
+
+// Eigen claims the sparseLLT can be constructed using a SparseMatrix but
+// at runtime crashes unless it is given exclusively the lower triangle
+// 
+// Temporary solution, replace:
+// SparseLLT<SparseMatrix<T> > A_LLT(A);
+// with:
+// SparseLLT<SparseMatrix<T> > A_LLT(A.template triangularView<Eigen::Lower>());
+//
+void sparsellt_needs_triangular_view()
+{
+  // Succeeds
+  SparseMatrix<double> A(2,2);
+  A.reserve(4);
+  A.insert(0,0) = 1;
+  A.insert(0,1) = -0.5;
+  A.insert(1,0) = -0.5;
+  A.insert(1,1) = 1;
+  A.finalize();
+  cout<<"AIJV=["<<endl;print_ijv(A,1);cout<<endl<<"];"<<endl<<
+    "A=sparse(AIJV(:,1),AIJV(:,2),AIJV(:,3),"<<
+    A.rows()<<","<<A.cols()<<");"<<endl;
+  SparseLLT<SparseMatrix<double> > A_LLT(A.triangularView<Eigen::Lower>());
+  SparseMatrix<double> A_L = A_LLT.matrixL();
+  cout<<"A_LIJV=["<<endl;print_ijv(A_L,1);cout<<endl<<"];"<<endl<<
+    "A_L=sparse(A_LIJV(:,1),A_LIJV(:,2),A_LIJV(:,3),"<<
+    A_L.rows()<<","<<A_L.cols()<<");"<<endl;
+
+  //Crashes
+  SparseMatrix<double> B(2,2);
+  B.reserve(4);
+  B.insert(0,0) = 1;
+  B.insert(0,1) = -0.5;
+  B.insert(1,0) = -0.5;
+  B.insert(1,1) = 1;
+  B.finalize();
+  cout<<"BIJV=["<<endl;print_ijv(B,1);cout<<endl<<"];"<<endl<<
+    "B=sparse(BIJV(:,1),BIJV(:,2),BIJV(:,3),"<<
+    B.rows()<<","<<B.cols()<<");"<<endl;
+  SparseLLT<SparseMatrix<double> > B_LLT(B);
+}
+
+void sparsematrix_nonzeros_after_expression()
+{
+  SparseMatrix<double> A(2,2);
+  A.reserve(4);
+  A.insert(0,0) = 1;
+  A.insert(0,1) = -0.5;
+  A.insert(1,0) = -0.5;
+  A.insert(1,1) = 1;
+  A.finalize();
+  cout<<"AIJV=["<<endl;print_ijv(A,1);cout<<endl<<"];"<<endl<<
+    "A=sparse(AIJV(:,1),AIJV(:,2),AIJV(:,3),"<<
+    A.rows()<<","<<A.cols()<<");"<<endl;
+  // Succeeds
+  SparseMatrix<double> AmA = A-A;
+  cout<<"(AmA).nonZeros(): "<<AmA.nonZeros()<<endl;
+  // Succeeds
+  cout<<"(A-A).eval().nonZeros(): "<<(A-A).eval().nonZeros()<<endl;
+  // Crashes
+  cout<<"(A-A).nonZeros(): "<<(A-A).nonZeros()<<endl;
+}
+
+// Eigen's SparseLLT's succeeded() method claims to return whether LLT
+// computation was successful. Instead it seems its value is meaningless
+//
+// Temporary solution: Check for presence NaNs in matrixL()
+// bool succeeded = (A_LLT.matrixL()*0).eval().nonZeros() == 0;
+void sparsellt_succeeded_is_meaningless()
+{
+  // Should succeed
+  SparseMatrix<double> A(2,2);
+  A.reserve(4);
+  A.insert(0,0) = 1;
+  A.insert(0,1) = -0.5;
+  A.insert(1,0) = -0.5;
+  A.insert(1,1) = 1;
+  A.finalize();
+  cout<<"AIJV=["<<endl;print_ijv(A,1);cout<<endl<<"];"<<endl<<
+    "A=sparse(AIJV(:,1),AIJV(:,2),AIJV(:,3),"<<
+    A.rows()<<","<<A.cols()<<");"<<endl;
+  SparseLLT<SparseMatrix<double> > A_LLT(A.triangularView<Eigen::Lower>());
+  cout<<"A_LLT.succeeded(): "<<(A_LLT.succeeded()?"TRUE":"FALSE")<<endl;
+  SparseMatrix<double> A_L = A_LLT.matrixL();
+  // See sparsematrix_nonzeros_after_expression
+  cout<<"(A_L*0).eval().nonZeros(): "<<(A_L*0).eval().nonZeros()<<endl;
+
+  cout<<"A_LIJV=["<<endl;print_ijv(A_L,1);cout<<endl<<"];"<<endl<<
+    "A_L=sparse(A_LIJV(:,1),A_LIJV(:,2),A_LIJV(:,3),"<<
+    A_L.rows()<<","<<A_L.cols()<<");"<<endl;
+
+  // Should not succeed
+  SparseMatrix<double> B(2,2);
+  B.reserve(4);
+  B.insert(0,0) = -1;
+  B.insert(0,1) = 0.5;
+  B.insert(1,0) = 0.5;
+  B.insert(1,1) = -1;
+  B.finalize();
+  cout<<"BIJV=["<<endl;print_ijv(B,1);cout<<endl<<"];"<<endl<<
+    "B=sparse(BIJV(:,1),BIJV(:,2),BIJV(:,3),"<<
+    B.rows()<<","<<B.cols()<<");"<<endl;
+  SparseLLT<SparseMatrix<double> > B_LLT(B.triangularView<Eigen::Lower>());
+  cout<<"B_LLT.succeeded(): "<<(B_LLT.succeeded()?"TRUE":"FALSE")<<endl;
+  SparseMatrix<double> B_L = B_LLT.matrixL();
+  // See sparsematrix_nonzeros_after_expression
+  cout<<"(B_L*0).eval().nonZeros(): "<<(B_L*0).eval().nonZeros()<<endl;
+  cout<<"B_LIJV=["<<endl;print_ijv(B_L,1);cout<<endl<<"];"<<endl<<
+    "B_L=sparse(B_LIJV(:,1),B_LIJV(:,2),B_LIJV(:,3),"<<
+    B_L.rows()<<","<<B_L.cols()<<");"<<endl;
+}
+
+int main(int argc, char * argv[])
+{
+  //wrong_sparsematrix_inner_iterator_type();
+  //sparsematrix_transpose_in_rhs_aliasing();
+  //sparsellt_needs_triangular_view();
+  //sparsematrix_nonzeros_after_expression();
+  //sparsellt_succeeded_is_meaningless();
+  return 0;
+}

+ 30 - 0
is_symmetric.h

@@ -0,0 +1,30 @@
+#ifndef IGL_IS_SYMMETRIC_H
+#define IGL_IS_SYMMETRIC_H
+namespace igl
+{
+  // Returns true if the given matrix is symmetric
+  // Inputs:
+  //   A  m by m matrix
+  // Returns true if the matrix is square and symmetric
+  template <typename T>
+  inline bool is_symmetric(const Eigen::SparseMatrix<T>& A);
+}
+
+// Implementation
+
+template <typename T>
+inline bool igl::is_symmetric(const Eigen::SparseMatrix<T>& A)
+{
+  if(A.rows() != A.cols())
+  {
+    return false;
+  }
+  SparseMatrix<T> AT = A.transpose();
+  SparseMatrix<T> AmAT = A-AT;
+  //// Eigen screws up something with LLT if you try to do
+  //SparseMatrix<T> AmAT = A-A.transpose();
+  //// Eigen crashes at runtime if you try to do
+  // return (A-A.transpose()).nonZeros() == 0;
+  return AmAT.nonZeros() == 0;
+}
+#endif

+ 151 - 0
lu_lagrange.h

@@ -0,0 +1,151 @@
+#ifndef IGL_LU_LAGRANGE_H
+#define IGL_LU_LAGRANGE_H
+
+#define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET
+#include <Eigen/Dense>
+#include <Eigen/Sparse>
+namespace igl
+{
+  // LU_LAGRANGE Compute a LU decomposition for a special type of
+  // matrix Q that is symmetric but not positive-definite:
+  // Q = [A'*A C
+  //      C'   0];
+  // where A'*A, or ATA, is given as a symmetric positive definite matrix and C
+  // has full column-rank(?)
+  //
+  // [J] = lu_lagrange(ATA,C)
+  //
+  // Templates:
+  //   T  should be a eigen matrix primitive type like int or double
+  // Inputs:
+  //   ATA   n by n square, symmetric, positive-definite system matrix, usually
+  //     the quadratic coefficients corresponding to the original unknowns in a
+  //     system
+  //   C  n by m rectangular matrix corresponding the quadratic coefficients of
+  //     the original unknowns times the lagrange multipliers enforcing linear
+  //     equality constraints
+  // Outputs:
+  //   L  lower triangular matrix such that Q = L*U
+  //   U  upper triangular matrix such that Q = L*U
+  // Returns true on success, false on error
+  //
+  template <typename T>
+  bool lu_lagrange(
+    const SparseMatrix<T> & ATA,
+    const SparseMatrix<T> & C,
+    SparseMatrix<T> & L,
+    SparseMatrix<T> & U);
+
+}
+
+// Implementation
+// Cholesky LLT decomposition for symmetric positive definite
+#include <Eigen/SparseExtra>
+#include <cassert>
+#include "find.h"
+#include "sparse.h"
+
+template <typename T>
+bool igl::lu_lagrange(
+  const SparseMatrix<T> & ATA,
+  const SparseMatrix<T> & C,
+  SparseMatrix<T> & L,
+  SparseMatrix<T> & U)
+{
+  // number of unknowns
+  int n = ATA.rows();
+  // number of lagrange multipliers
+  int m = C.cols();
+
+  assert(ATA.cols() == n);
+  if(m != 0)
+  {
+    assert(C.rows() == n);
+  }
+
+
+  // Cholesky factorization of ATA
+  //// Eigen fails if you give a full view of the matrix like this:
+  //Eigen::SparseLLT<SparseMatrix<T> > ATA_LLT(ATA);
+  SparseMatrix<T> ATA_LT = ATA.template triangularView<Eigen::Lower>();
+  Eigen::SparseLLT<SparseMatrix<T> > ATA_LLT(ATA_LT);
+
+  Eigen::SparseMatrix<T> J = ATA_LLT.matrixL();
+
+  //if(!ATA_LLT.succeeded())
+  if(!((J*0).eval().nonZeros() == 0))
+  {
+    fprintf(stderr,"Error: lu_lagrange() failed to factor ATA\n");
+    return false;
+  }
+
+  if(m == 0)
+  {
+    L = J;
+    U = J.transpose();
+  }else
+  {
+    // Construct helper matrix M
+    Eigen::SparseMatrix<T> M = C;
+    J.template triangularView<Eigen::Lower>().solveInPlace(M);
+
+
+    // Compute cholesky factorizaiton of M'*M
+    Eigen::SparseMatrix<T> MTM = M.transpose() * M;
+
+
+    Eigen::SparseLLT<SparseMatrix<T> > MTM_LLT(MTM.template triangularView<Eigen::Lower>());
+
+    Eigen::SparseMatrix<T> K = MTM_LLT.matrixL();
+
+
+    //if(!MTM_LLT.succeeded())
+    if(!((K*0).eval().nonZeros() == 0))
+    {
+      fprintf(stderr,"Error: lu_lagrange() failed to factor MTM\n");
+      return false;
+    }
+
+    // assemble LU decomposition of Q
+    Eigen::Matrix<int,Eigen::Dynamic,1> MI;
+    Eigen::Matrix<int,Eigen::Dynamic,1> MJ;
+    Eigen::Matrix<T,Eigen::Dynamic,1> MV;
+    igl::find(M,MI,MJ,MV);
+
+    Eigen::Matrix<int,Eigen::Dynamic,1> KI;
+    Eigen::Matrix<int,Eigen::Dynamic,1> KJ;
+    Eigen::Matrix<T,Eigen::Dynamic,1> KV;
+    igl::find(K,KI,KJ,KV);
+
+    Eigen::Matrix<int,Eigen::Dynamic,1> JI;
+    Eigen::Matrix<int,Eigen::Dynamic,1> JJ;
+    Eigen::Matrix<T,Eigen::Dynamic,1> JV;
+    igl::find(J,JI,JJ,JV);
+
+    int nnz = JV.size()  + MV.size() + KV.size();
+
+    Eigen::Matrix<int,Eigen::Dynamic,1> UI(nnz);
+    Eigen::Matrix<int,Eigen::Dynamic,1> UJ(nnz);
+    Eigen::Matrix<T,Eigen::Dynamic,1> UV(nnz);
+    UI << JJ,                        MI, (KJ.array() + n).matrix();
+    UJ << JI, (MJ.array() + n).matrix(), (KI.array() + n).matrix(); 
+    UV << JV,                        MV,                     KV*-1;
+    igl::sparse(UI,UJ,UV,U);
+
+    Eigen::Matrix<int,Eigen::Dynamic,1> LI(nnz);
+    Eigen::Matrix<int,Eigen::Dynamic,1> LJ(nnz);
+    Eigen::Matrix<T,Eigen::Dynamic,1> LV(nnz);
+    LI << JI, (MJ.array() + n).matrix(), (KI.array() + n).matrix();
+    LJ << JJ,                        MI, (KJ.array() + n).matrix(); 
+    LV << JV,                        MV,                        KV;
+    igl::sparse(LI,LJ,LV,L);
+
+    //// Only keep lower and upper parts
+    //L = L.template triangularView<Lower>();
+    //U = U.template triangularView<Upper>();
+  }
+
+  return true;
+}
+
+#endif

+ 66 - 0
mat_max.h

@@ -0,0 +1,66 @@
+#ifndef IGL_MAT_MAX_H
+#define IGL_MAT_MAX_H
+#include <Eigen/Dense>
+
+namespace igl
+{
+  // Ideally this becomes a super overloaded function supporting everything
+  // that matlab's max supports
+
+  // Max function for matrices to act like matlab's max function. Specifically
+  // like [Y,I] = max(X,[],dim);
+  //
+  // Templates:
+  //   T  should be a eigen matrix primitive type like int or double
+  // Inputs:
+  //   X  m by n matrix
+  //   dim  dimension along which to take max
+  // Outputs:
+  //   Y  n-long sparse vector (if dim == 1) 
+  //   or
+  //   Y  m-long sparse vector (if dim == 2)
+  //   I  vector the same size as Y containing the indices along dim of maximum
+  //     entries
+  template <typename T>
+  void mat_max(
+    const Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & X,
+    const int dim,
+    Eigen::Matrix<T,Eigen::Dynamic,1> & Y,
+    Eigen::Matrix<int,Eigen::Dynamic,1> & I);
+}
+
+template <typename T>
+void igl::mat_max(
+  const Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & X,
+  const int dim,
+  Eigen::Matrix<T,Eigen::Dynamic,1> & Y,
+  Eigen::Matrix<int,Eigen::Dynamic,1> & I)
+{
+  assert(dim==1||dim==2);
+
+  // output size
+  int n = (dim==1?X.cols():X.rows());
+  // resize output
+  Y.resize(n);
+  I.resize(n);
+
+  // loop over dimension opposite of dim
+  for(int j = 0;j<n;j++)
+  {
+    int PHONY;
+    int i;
+    int m;
+    if(dim==1)
+    {
+      m = X.col(j).maxCoeff(&i,&PHONY);
+    }else
+    {
+      m = X.row(j).maxCoeff(&PHONY,&i);
+    }
+    Y(j) = m;
+    I(j) = i;
+  }
+}
+
+#endif
+

+ 70 - 0
mat_min.h

@@ -0,0 +1,70 @@
+#ifndef IGL_MAT_MIN_H
+#define IGL_MAT_MIN_H
+#include <Eigen/Dense>
+
+namespace igl
+{
+  // Ideally this becomes a super overloaded function supporting everything
+  // that matlab's min supports
+
+  // Min function for matrices to act like matlab's min function. Specifically
+  // like [Y,I] = min(X,[],dim);
+  //
+  // Templates:
+  //   T  should be a eigen matrix primitive type like int or double
+  // Inputs:
+  //   X  m by n matrix
+  //   dim  dimension along which to take min 
+  // Outputs:
+  //   Y  n-long sparse vector (if dim == 1) 
+  //   or
+  //   Y  m-long sparse vector (if dim == 2)
+  //   I  vector the same size as Y containing the indices along dim of minimum
+  //     entries
+  //
+  // See also: mat_max
+  template <typename T>
+  void mat_min(
+    const Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & X,
+    const int dim,
+    Eigen::Matrix<T,Eigen::Dynamic,1> & Y,
+    Eigen::Matrix<int,Eigen::Dynamic,1> & I);
+}
+
+#include "verbose.h"
+
+template <typename T>
+void igl::mat_min(
+  const Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & X,
+  const int dim,
+  Eigen::Matrix<T,Eigen::Dynamic,1> & Y,
+  Eigen::Matrix<int,Eigen::Dynamic,1> & I)
+{
+  assert(dim==1||dim==2);
+
+  // output size
+  int n = (dim==1?X.cols():X.rows());
+  // resize output
+  Y.resize(n);
+  I.resize(n);
+
+  // loop over dimension opposite of dim
+  for(int j = 0;j<n;j++)
+  {
+    typename Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic>::Index PHONY;
+    typename Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic>::Index i;
+    T m;
+    if(dim==1)
+    {
+      m = X.col(j).minCoeff(&i,&PHONY);
+    }else
+    {
+      m = X.row(j).minCoeff(&PHONY,&i);
+    }
+    Y(j) = m;
+    I(j) = i;
+  }
+}
+
+#endif
+

+ 86 - 2
matlab-to-eigen.html

@@ -29,6 +29,25 @@ tr.d1 td
   padding-right: 20px;
   min-width: 200px;
 }
+
+tr.gotcha1 td
+{
+  background-color: #fcb;
+  color: black;
+  padding-left: 10px;
+  padding-right: 20px;
+  min-width: 200px;
+}
+
+tr.gotcha2 td
+{
+  background-color: #fed;
+  color: black;
+  padding-left: 10px;
+  padding-right: 20px;
+  min-width: 200px;
+}
+
     </style>
   </head>
   <body>
@@ -93,8 +112,8 @@ tr.d1 td
       </tr>
 
       <tr class=d0>
-        <td><pre><code>Adim_sum = sum(A,dim)</code></pre></td>
-        <td><pre><code>igl::sum(A,dim,Adim_sum)</code></pre></td>
+        <td><pre><code>Acol_sum = sum(A,1)<br>Arow_sum = sum(A,2)<br>Adim_sum = sum(Asparse,dim)</code></pre></td>
+        <td><pre><code>Acol_sum = A.colwise().sum()<br>Arow_sum = A.rowwise().sum()<br>igl::sum(Asparse,dim,Adim_sum)</code></pre></td>
         <td>Currently the igl version only supports sparse matrix input (and
             dim must be 1 or 2)</td>
       </tr>
@@ -113,6 +132,69 @@ tr.d1 td
         diagonal. Currently igl version supports sparse only.</td>
       </tr>
 
+      <tr class=d1>
+        <td><pre><code>[Y,I] = max(X,[],dim)</code></pre></td>
+        <td id=mat_max><pre><code>igl::mat_max(X,dim,Y,I)</code></pre></td>
+        <td>Matlab has a bizarre convention of passing [] as the second
+          argument to mean take the max/min along dimension dim.</td>
+      </tr>
+
+      <tr class=d0>
+        <td><pre><code>Y = max(X,[],1)<br>Y = max(X,[],2)<br>Y = min(X,[],1)<br>Y = min(X,[],2)</code></pre></td>
+        <td><pre><code>Y = X.colwise().maxCoeff()<br>Y = X.rowwise().maxCoeff()<br>Y = X.colwise().minCoeff()<br>Y = X.rowwise().minCoeff()</code></pre></td>
+        <td>Matlab allows you to obtain the indices of extrema see <a href=#mat_max>mat_max</a></td>
+      </tr>
+
+      <tr class=d1>
+        <td><pre><code>C = A.*B</code></pre></td>
+        <td><pre><code>C = (A.array() * B.array()).matrix()</code></pre></td>
+        <td></td>
+      </tr>
+
+      <tr class=d0>
+        <td><pre><code>C = A.^b</code></pre></td>
+        <td><pre><code>C = A.array().pow(b).matrix()</code></pre></td>
+        <td></td>
+      </tr>
+
+      <tr class=d1>
+        <td><pre><code>A(B == 0) = C</code></pre></td>
+        <td><pre><code>A = (B == 0).select(C,A)</code></pre></td>
+        <td></td>
+      </tr>
+
+      <tr class=gotcha1>
+        <td><pre><code>C = A + B'</code></pre></td>
+        <td><pre><code>SparseMatrixType BT = B.transpose()<br>SparseMatrixType C = A+BT;</code></pre></td>
+        <td>Do <b>not</b> attempt to combine .transpose() in expression like
+        this: <pre><code>C = A + B.transpose()</code></pre></td>
+      </tr>
+
+      <tr class=gotcha2>
+        <td><pre><code>[L,p] = chol(A)</code></pre></td>
+        <td><pre><code>SparseLLT&lt;SparseMatrixType&gt; A_LLT(A.template triangularView&lt;Lower&gt;())<br>SparseMatrixType L = A_LLT.matrixL();bool p = (L*0).eval().nonZeros()==0;</code></pre></td>
+        <td>Do <b>not</b> attempt to use A in constructor of A_LLT like
+        this: <pre><code>SparseLLT&lt;SparseMatrixType&gt; A_LLT(A)</code></pre><br>
+        Do <b>not</b> attempt to use A_LLT.succeeded() to determine if Cholesky
+        factorization succeeded, like this:
+        <pre><code>bool p = A_LLT.succeeded()</code></pre>
+        </td>
+      </tr>
+
+      <tr class=d0>
+        <td><pre><code>X = U\(L\b)</code></pre></td>
+        <td><pre><code>X = b;<br>L.template triangularView&lt;Lower&gt;().solveInPlace(X);<br>U.template triangularView&lt;Upper&gt;().solveInPlace(X);</code></pre></td>
+        <td>Expects that L and U are lower and upper triangular matrices
+        respectively</td>
+      </tr>
+
+      <tr class=d1>
+        <td><pre><code>B = repmat(A,i,j)</code></pre></td>
+        <td><pre><code>igl::repmat(A,i,j,B)</code></pre></td>
+        <td>So far igl::repmat is only implemented for dense matrices.
+        </td>
+      </tr>
+
       <!-- Insert rows for each command pair -->
 
       <!-- Leave this here for copy and pasting
@@ -126,5 +208,7 @@ tr.d1 td
       -->
 
     </table>
+    <a href="http://eigen.tuxfamily.org/dox/AsciiQuickReference.txt">Eigen's
+    "ASCII Quick Reference" with MATLAB translations</a>
   </body>
 </html>

+ 290 - 0
min_quad_with_fixed.h

@@ -0,0 +1,290 @@
+#ifndef IGL_MIN_QUAD_WITH_FIXED_H
+#define IGL_MIN_QUAD_WITH_FIXED_H
+
+#define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET
+#include <Eigen/Core>
+#include <Eigen/Dense>
+#include <Eigen/Sparse>
+
+namespace igl
+{
+  template <typename T>
+  struct min_quad_with_fixed_data;
+  // MIN_QUAD_WITH_FIXED Minimize quadratic energy Z'*A*Z + Z'*B + C with
+  // constraints that Z(known) = Y, optionally also subject to the constraints
+  // Aeq*Z = Beq
+  //
+  // Templates:
+  //   T  should be a eigen matrix primitive type like int or double
+  // Inputs:
+  //   A  n by n matrix of quadratic coefficients
+  //   B  n by 1 column of linear coefficients
+  //   known list of indices to known rows in Z
+  //   Y  list of fixed values corresponding to known rows in Z
+  //   Optional:
+  //     Aeq  m by n list of linear equality constraint coefficients
+  //     Beq  m by 1 list of linear equality constraint constant values
+  //     pd flag specifying whether A(unknown,unknown) is positive definite
+  // Outputs:
+  //   data  factorization struct with all necessary information to solve
+  //     using min_quad_with_fixed_solve
+  // Returns true on success, false on error
+  template <typename T>
+  bool min_quad_with_fixed_precompute(
+    const Eigen::SparseMatrix<T>& A,
+    const Eigen::MatrixXi & known,
+    const Eigen::SparseMatrix<T>& Aeq,
+    const bool pd,
+    min_quad_with_fixed_data<T> & data
+    );
+
+  // Solves a system previously factored using min_quad_with_fixed_precompute
+  // Inputs:
+  //   data  factorization struct with all necessary precomputation to solve
+  // Outputs:
+  //   Z  n by cols solution
+  // Returns true on success, false on error
+  template <typename T>
+  bool min_quad_with_fixed_solve(
+    const min_quad_with_fixed_data<T> & data,
+    const Eigen::Matrix<T,Eigen::Dynamic,1> & B,
+    const Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & Y,
+    const Eigen::Matrix<T,Eigen::Dynamic,1> & Beq,
+    Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & Z);
+}
+
+// Implementation
+#include <Eigen/SparseExtra>
+#include <cassert>
+#include <cstdio>
+#include "slice.h"
+#include "is_symmetric.h"
+
+#include "find.h"
+#include "sparse.h"
+#include "lu_lagrange.h"
+
+template <typename T>
+struct igl::min_quad_with_fixed_data
+{
+  int n;
+  bool Auu_pd;
+  bool Auu_sym;
+  Eigen::Matrix<int,Eigen::Dynamic,1> known;
+  Eigen::Matrix<int,Eigen::Dynamic,1> unknown;
+  Eigen::Matrix<int,Eigen::Dynamic,1> lagrange;
+  Eigen::Matrix<int,Eigen::Dynamic,1> unknown_lagrange;
+  SparseMatrix<T> preY;
+  SparseMatrix<T> L;
+  SparseMatrix<T> U;
+};
+
+template <typename T>
+bool igl::min_quad_with_fixed_precompute(
+  const Eigen::SparseMatrix<T>& A,
+  const Eigen::Matrix<int,Eigen::Dynamic,1> & known,
+  const Eigen::SparseMatrix<T>& Aeq,
+  const bool pd,
+  igl::min_quad_with_fixed_data<T> & data
+  )
+{
+  // number of rows
+  int n = A.rows();
+  // cache problem size
+  data.n = n;
+
+  int neq = Aeq.rows();
+  // defulat is to have 0 linear equality constraints
+  if(Aeq.size() != 0)
+  {
+    //Aeq = Eigen::SparseMatrix<T>(0,n);
+    assert(n == Aeq.cols());
+  }
+
+  assert(A.rows() == n);
+  assert(A.cols() == n);
+
+  // number of known rows
+  int kr = known.size();
+
+  assert(kr == 0 || known.minCoeff() >= 0);
+  assert(kr == 0 || known.maxCoeff() < n);
+  assert(neq <= n);
+
+  // cache known
+  data.known = known;
+  // get list of unknown indices
+  data.unknown.resize(n-kr);
+  std::vector<bool> unknown_mask;
+  unknown_mask.resize(n,true);
+  for(int i = 0;i<kr;i++)
+  {
+    unknown_mask[known(i)] = false;
+  }
+  int u = 0;
+  for(int i = 0;i<n;i++)
+  {
+    if(unknown_mask[i])
+    {
+      data.unknown(u) = i;
+      u++;
+    }
+  }
+  // get list of lagrange multiplier indices
+  data.lagrange.resize(neq);
+  for(int i = 0;i<neq;i++)
+  {
+    data.lagrange(i) = n + i;
+  }
+  // cache unknown followed by lagrange indices
+  data.unknown_lagrange.resize(data.unknown.size()+data.lagrange.size());
+  data.unknown_lagrange << data.unknown, data.lagrange;
+
+  Eigen::SparseMatrix<T> Auu;
+  igl::slice(A,data.unknown,data.unknown,Auu);
+
+  // determine if A(unknown,unknown) is symmetric and/or positive definite
+  data.Auu_sym = igl::is_symmetric(Auu);
+  // Positive definiteness is *not* determined, rather it is given as a
+  // parameter
+  data.Auu_pd = pd;
+
+  // Append lagrange multiplier quadratic terms
+  SparseMatrix<T> new_A;
+  Eigen::Matrix<int,Eigen::Dynamic,1> AI;
+  Eigen::Matrix<int,Eigen::Dynamic,1> AJ;
+  Eigen::Matrix<T,Eigen::Dynamic,1> AV;
+  igl::find(A,AI,AJ,AV);
+  Eigen::Matrix<int,Eigen::Dynamic,1> AeqI(0,1);
+  Eigen::Matrix<int,Eigen::Dynamic,1> AeqJ(0,1);
+  Eigen::Matrix<T,Eigen::Dynamic,1> AeqV(0,1);
+  if(neq > 0)
+  {
+    igl::find(Aeq,AeqI,AeqJ,AeqV);
+  }
+  Eigen::Matrix<int,Eigen::Dynamic,1> new_AI(AV.size()+AeqV.size()*2);
+  Eigen::Matrix<int,Eigen::Dynamic,1> new_AJ(AV.size()+AeqV.size()*2);
+  Eigen::Matrix<T,Eigen::Dynamic,1>   new_AV(AV.size()+AeqV.size()*2);
+  new_AI << AI, (AeqI.array()+n).matrix(), AeqJ;
+  new_AJ << AJ, AeqJ, (AeqI.array()+n).matrix();
+  new_AV << AV, AeqV, AeqV;
+  //new_AI.block(0,0,n,1) = AI;
+  //new_AJ.block(0,0,n,1) = AJ;
+  //new_AV.block(0,0,n,1) = AV;
+  //new_AI.block(n,0,neq,1) = AeqI+n;
+  //new_AJ.block(n,0,neq,1) = AeqJ;
+  //new_AV.block(n,0,neq,1) = AeqV;
+  //new_AI.block(n+neq,0,neq,1) = AeqJ;
+  //new_AJ.block(n+neq,0,neq,1) = AeqI+n;
+  //new_AV.block(n+neq,0,neq,1) = AeqV;
+  igl::sparse(new_AI,new_AJ,new_AV,n+neq,n+neq,new_A);
+
+  // precompute RHS builders
+  Eigen::SparseMatrix<T> Aulk;
+  igl::slice(new_A,data.unknown_lagrange,data.known,Aulk);
+  Eigen::SparseMatrix<T> Akul;
+  igl::slice(new_A,data.known,data.unknown_lagrange,Akul);
+
+  //// This doesn't work!!!
+  //data.preY = Aulk + Akul.transpose();
+  Eigen::SparseMatrix<T> AkulT = Akul.transpose();
+  //// Resize preY before assigning
+  //data.preY.resize(data.unknown_lagrange.size(),data.known.size());
+  data.preY = Aulk + AkulT;
+
+  // Create factorization
+  if(data.Auu_sym && data.Auu_pd)
+  {
+    Eigen::SparseMatrix<T> Aequ(0,0);
+    if(neq>0)
+    {
+      Eigen::Matrix<int,Eigen::Dynamic,1> Aeq_rows;
+      Aeq_rows.resize(neq);
+      for(int i = 0;i<neq;i++)
+      {
+        Aeq_rows(i) = i;
+      }
+      igl::slice(Aeq,Aeq_rows,data.unknown,Aequ);
+    }
+    // Get transpose of Aequ
+    Eigen::SparseMatrix<T> AequT = Aequ.transpose();
+    // Compute LU decomposition
+    bool lu_success = igl::lu_lagrange(Auu,AequT,data.L,data.U);
+    if(!lu_success)
+    {
+      return false;
+    }
+  }else
+  {
+    Eigen::SparseMatrix<T> NA;
+    igl::slice(new_A,data.unknown_lagrange,data.unknown_lagrange,NA);
+    assert(false);
+    return false;
+  }
+  return true;
+}
+
+
+template <typename T>
+bool igl::min_quad_with_fixed_solve(
+  const igl::min_quad_with_fixed_data<T> & data,
+  const Eigen::Matrix<T,Eigen::Dynamic,1> & B,
+  const Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & Y,
+  const Eigen::Matrix<T,Eigen::Dynamic,1> & Beq,
+  Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & Z)
+{
+  // number of known rows
+  int kr = data.known.size();
+  if(kr!=0)
+  {
+    assert(kr == Y.rows());
+  }
+  // number of columns to solve
+  int cols = Y.cols();
+
+  // number of lagrange multipliers aka linear equality constraints
+  int neq = data.lagrange.size();
+
+  if(neq != 0)
+  {
+  }
+
+  // append lagrange multiplier rhs's
+  Eigen::Matrix<T,Eigen::Dynamic,1> BBeq(B.size() + Beq.size());
+  BBeq << B, (Beq*-2.0);
+
+  // Build right hand side
+  Eigen::Matrix<T,Eigen::Dynamic,1> BBequl;
+  igl::slice(BBeq,data.unknown_lagrange,BBequl);
+  Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> BBequlcols;
+  igl::repmat(BBequl,1,cols,BBequlcols);
+  Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> NB;
+  NB = data.preY * Y + BBequlcols;
+
+  // resize output
+  Z.resize(data.n,cols);
+
+  // Set known values
+  for(int i = 0;i < kr;i++)
+  {
+    for(int j = 0;j < cols;j++)
+    {
+      Z(data.known(i),j) = Y(i,j);
+    }
+  }
+  data.L.template triangularView<Lower>().solveInPlace(NB);
+  data.U.template triangularView<Upper>().solveInPlace(NB);
+  // Now NB contains sol/-0.5
+  NB *= -0.5;
+  // Now NB contains solution
+  // Place solution in Z
+  for(int i = 0;i<(NB.rows()-neq);i++)
+  {
+    for(int j = 0;j<NB.cols();j++)
+    {
+      Z(data.unknown_lagrange(i),j) = NB(i,j);
+    }
+  }
+  return true;
+}
+#endif

+ 86 - 0
repdiag.h

@@ -0,0 +1,86 @@
+#ifndef IGL_REPDIAG_H
+#define IGL_REPDIAG_H
+
+#define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET
+#include <Eigen/Dense>
+#include <Eigen/Sparse>
+
+namespace igl
+{
+  // REPDIAG repeat a matrix along the diagonal a certain number of times, so
+  // that if A is a m by n matrix and we want to repeat along the diagonal d
+  // times, we get a m*d by n*d matrix B such that:
+  // B( (k*m+1):(k*m+1+m-1), (k*n+1):(k*n+1+n-1)) = A 
+  // for k from 0 to d-1
+  //
+  // Inputs:
+  //   A  m by n matrix we are repeating along the diagonal. May be dense or
+  //     sparse
+  //   d  number of times to repeat A along the diagonal
+  // Outputs:
+  //   B  m*d by n*d matrix with A repeated d times along the diagonal,
+  //     will be dense or sparse to match A
+  //
+
+  // Sparse version
+  template <typename T>
+  void repdiag(
+    const Eigen::SparseMatrix<T>& A,
+    const int d,
+    Eigen::SparseMatrix<T>& B);
+  // Dense version
+  template <typename T>
+  void repdiag(
+    const Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & A,
+    const int d,
+    Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & B);
+}
+
+// Implementation
+
+template <typename T>
+void igl::repdiag(
+  const Eigen::SparseMatrix<T>& A,
+  const int d,
+  Eigen::SparseMatrix<T>& B)
+{
+  int m = A.rows();
+  int n = A.cols();
+
+  Eigen::DynamicSparseMatrix<T, Eigen::RowMajor> 
+    dyn_B(m*d,n*d);
+
+  // loop over reps
+  for(int i=0;i<d;i++)
+  {
+    // Loop outer level
+    for (int k=0; k<A.outerSize(); ++k)
+    {
+      // loop inner level
+      for (typename SparseMatrix<T>::InnerIterator it(A,k); it; ++it)
+      {
+        dyn_B.coeffRef(i*m+it.row(),i*n+it.col()) += it.value();
+      }
+    }
+  }
+
+  B = Eigen::SparseMatrix<T>(dyn_B);
+}
+
+template <typename T>
+void igl::repdiag(
+  const Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & A,
+  const int d,
+  Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & B)
+{
+  int m = A.rows();
+  int n = A.cols();
+  B.resize(m*d,n*d);
+  B.array() *= 0;
+  for(int i = 0;i<d;i++)
+  {
+    B.block(i*m,i*n,m,n) = A;
+  }
+}
+
+#endif

+ 55 - 0
repmat.h

@@ -0,0 +1,55 @@
+#ifndef IGL_REPMAT_H
+#define IGL_REPMAT_H
+
+#define EIGEN_YES_I_KNOW_SPARSE_MODULE_IS_NOT_STABLE_YET
+#include <Eigen/Dense>
+#include <Eigen/Sparse>
+
+namespace igl
+{
+  // Ideally this is a super overloaded function that behaves just like
+  // matlab's repmat
+
+  // Replicate and tile a matrix
+  //
+  // Templates:
+  //   T  should be a eigen matrix primitive type like int or double
+  // Inputs:
+  //   A  m by n input matrix
+  //   r  number of row-direction copies
+  //   c  number of col-direction copies
+  // Outputs:
+  //   B  r*m by c*n output matrix
+  //
+  template <typename T,const int W, const int H>
+  inline void repmat(
+    const Eigen::Matrix<T,W,H> & A,
+    const int r,
+    const int c,
+    Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & B);
+}
+
+// Implementation
+
+template <typename T,const int W, const int H>
+inline void igl::repmat(
+  const Eigen::Matrix<T,W,H> & A,
+  const int r,
+  const int c,
+  Eigen::Matrix<T,Eigen::Dynamic,Eigen::Dynamic> & B)
+{
+  assert(r>0);
+  assert(c>0);
+  // Make room for output
+  B.resize(r*A.rows(),c*A.cols());
+
+  // copy tiled blocks
+  for(int i = 0;i<r;i++)
+  {
+    for(int j = 0;j<c;j++)
+    {
+      B.block(i*A.rows(),j*A.cols(),A.rows(),A.cols()) = A;
+    }
+  }
+}
+#endif

+ 142 - 0
slice.h

@@ -0,0 +1,142 @@
+#ifndef IGL_SLICE_H
+#define IGL_SLICE_H
+namespace igl
+{
+  // Act like the matlab X(row_indices,col_indices) operator
+  // 
+  // Inputs:
+  //   X  m by n matrix
+  //   R  list of row indices
+  //   C  list of column indices
+  // Output:
+  //   Y  #R by #C matrix
+  template <typename T>
+  inline void slice(
+    const Eigen::SparseMatrix<T>& X,
+    const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
+    const Eigen::Matrix<int,Eigen::Dynamic,1> & C,
+    Eigen::SparseMatrix<T>& Y);
+
+  template <typename T, const int W, const int H>
+  inline void slice(
+    const Eigen::Matrix<T,W,H> & X,
+    const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
+    const Eigen::Matrix<int,Eigen::Dynamic,1> & C,
+    Eigen::Matrix<T,W,H> & Y);
+
+  template <typename T>
+  inline void slice(
+    const Eigen::Matrix<T,Eigen::Dynamic,1> & X,
+    const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
+    Eigen::Matrix<T,Eigen::Dynamic,1> & Y);
+}
+
+// Implementation
+
+template <typename T>
+inline void igl::slice(
+  const Eigen::SparseMatrix<T>& X,
+  const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
+  const Eigen::Matrix<int,Eigen::Dynamic,1> & C,
+  Eigen::SparseMatrix<T>& Y)
+{
+  int xm = X.rows();
+  int xn = X.cols();
+  int ym = R.size();
+  int yn = C.size();
+  assert(R.minCoeff() >= 0);
+  assert(R.maxCoeff() < xm);
+  assert(C.minCoeff() >= 0);
+  assert(C.maxCoeff() < xn);
+  // Build reindexing maps for columns and rows, -1 means not in map
+  Eigen::Matrix<int,Eigen::Dynamic,1> RI;
+  RI.resize(xm);
+  RI.array() = RI.array()*0-1;
+  for(int i = 0;i<ym;i++)
+  {
+    RI(R(i)) = i;
+  }
+  Eigen::Matrix<int,Eigen::Dynamic,1> CI;
+  CI.resize(xn);
+  CI.array() = CI.array()*0-1;
+  for(int i = 0;i<yn;i++)
+  {
+    CI(C(i)) = i;
+  }
+  // Resize output
+  Eigen::DynamicSparseMatrix<T, Eigen::RowMajor> 
+    dyn_Y(ym,yn);
+  // Iterate over outside
+  for(int k=0; k<X.outerSize(); ++k)
+  {
+    // Iterate over inside
+    for(typename Eigen::SparseMatrix<T>::InnerIterator it (X,k); it; ++it)
+    {
+      if(RI(it.row()) >= 0 && CI(it.col()) >= 0)
+      {
+        dyn_Y.coeffRef(RI(it.row()),CI(it.col())) = it.value();
+      }
+    }
+  }
+  Y = Eigen::SparseMatrix<T>(dyn_Y);
+}
+
+template <typename T, const int W, const int H>
+inline void igl::slice(
+  const Eigen::Matrix<T,W,H> & X,
+  const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
+  const Eigen::Matrix<int,Eigen::Dynamic,1> & C,
+  Eigen::Matrix<T,W,H> & Y)
+{
+  int xm = X.rows();
+  int xn = X.cols();
+  int ym = R.size();
+  int yn = C.size();
+  assert(R.minCoeff() >= 0);
+  assert(R.maxCoeff() < xm);
+  assert(C.minCoeff() >= 0);
+  assert(C.maxCoeff() < xn);
+  // Build reindexing maps for columns and rows, -1 means not in map
+  Eigen::Matrix<int,Eigen::Dynamic,1> RI;
+  RI.resize(xm);
+  RI.array() = RI.array()*0-1;
+  for(int i = 0;i<ym;i++)
+  {
+    RI(R(i)) = i;
+  }
+  Eigen::Matrix<int,Eigen::Dynamic,1> CI;
+  CI.resize(xn);
+  CI.array() = CI.array()*0-1;
+  for(int i = 0;i<yn;i++)
+  {
+    CI(C(i)) = i;
+  }
+  // Resize output
+  Y.resize(ym,yn);
+  for(int i = 0;i<xm;i++)
+  {
+    for(int j = 0;j<xn;j++)
+    {
+      if(RI(i) >= 0 && CI(j) >= 0)
+      {
+        Y(RI(i),CI(j)) = X(i,j);
+      }
+    }
+  }
+}
+
+template <typename T>
+inline void igl::slice(
+  const Eigen::Matrix<T,Eigen::Dynamic,1> & X,
+  const Eigen::Matrix<int,Eigen::Dynamic,1> & R,
+  Eigen::Matrix<T,Eigen::Dynamic,1> & Y)
+{
+  // phony column indices
+  Eigen::Matrix<int,Eigen::Dynamic,1> C;
+  C.resize(1);
+  C(0) = 0;
+  return igl::slice(X,R,C,Y);
+}
+
+#endif
+

+ 90 - 0
sparse.h

@@ -0,0 +1,90 @@
+#ifndef IGL_SPARSE_H
+#define IGL_SPARSE_H
+#include <Eigen/Dense>
+#include <Eigen/Sparse>
+namespace igl
+{
+  // Build a sparse matrix from list of indices and values (I,J,V), functions
+  // like the sparse function in matlab
+  //
+  // Templates:
+  //   IndexVector  list of indices, value should be non-negative and should
+  //     expect to be cast to an index. Must implement operator(i) to retrieve
+  //     ith element
+  //   ValueVector  list of values, value should be expect to be cast to type
+  //     T. Must implement operator(i) to retrieve ith element
+  //   T  should be a eigen sparse matrix primitive type like int or double
+  // Input:
+  //   I  nnz vector of row indices of non zeros entries in X
+  //   J  nnz vector of column indices of non zeros entries in X
+  //   V  nnz vector of non-zeros entries in X
+  //   Optional:
+  //     m  number of rows
+  //     n  number of cols
+  // Outputs:
+  //   X  m by n matrix of type T whose entries are to be found 
+  //
+  template <class IndexVector, class ValueVector, typename T>
+  inline void sparse(
+    const IndexVector & I,
+    const IndexVector & J,
+    const ValueVector & V,
+    Eigen::SparseMatrix<T>& X);
+  template <class IndexVector, class ValueVector, typename T>
+  inline void sparse(
+    const IndexVector & I,
+    const IndexVector & J,
+    const ValueVector & V,
+    const size_t m,
+    const size_t n,
+    Eigen::SparseMatrix<T>& X);
+}
+
+// Implementation
+
+template <class IndexVector, class ValueVector, typename T>
+inline void igl::sparse(
+  const IndexVector & I,
+  const IndexVector & J,
+  const ValueVector & V,
+  Eigen::SparseMatrix<T>& X)
+{
+  size_t m = (size_t)I.maxCoeff()+1;
+  size_t n = (size_t)J.maxCoeff()+1;
+  return igl::sparse(I,J,V,m,n,X);
+}
+
+#include "verbose.h"
+template <class IndexVector, class ValueVector, typename T>
+inline void igl::sparse(
+  const IndexVector & I,
+  const IndexVector & J,
+  const ValueVector & V,
+  const size_t m,
+  const size_t n,
+  Eigen::SparseMatrix<T>& X)
+{
+  assert((int)I.maxCoeff() < (int)m);
+  assert((int)I.minCoeff() >= 0);
+  assert((int)J.maxCoeff() < (int)n);
+  assert((int)J.minCoeff() >= 0);
+  assert(I.size() == J.size());
+  assert(J.size() == V.size());
+  // Really we just need .size() to be the same, but this is safer
+  assert(I.rows() == J.rows());
+  assert(J.rows() == V.rows());
+  assert(I.cols() == J.cols());
+  assert(J.cols() == V.cols());
+  // number of values
+  int nv = V.size();
+
+  Eigen::DynamicSparseMatrix<T, Eigen::RowMajor> 
+    dyn_X(m,n);
+  for(int i = 0;i < nv;i++)
+  {
+    dyn_X.coeffRef((int)I(i),(int)J(i)) += (T)V(i);
+  }
+  X = Eigen::SparseMatrix<T>(dyn_X);
+}
+
+#endif