1  namespace HeuristicLab.NativeInterpreter {


2  public class CeresTypes {


3  // Ceres Solver  A fast nonlinear least squares minimizer


4  // Copyright 2019 Google Inc. All rights reserved.


5  // http://ceressolver.org/


6  //


7  // Redistribution and use in source and binary forms, with or without


8  // modification, are permitted provided that the following conditions are met:


9  //


10  // * Redistributions of source code must retain the above copyright notice,


11  // this list of conditions and the following disclaimer.


12  // * Redistributions in binary form must reproduce the above copyright notice,


13  // this list of conditions and the following disclaimer in the documentation


14  // and/or other materials provided with the distribution.


15  // * Neither the name of Google Inc. nor the names of its contributors may be


16  // used to endorse or promote products derived from this software without


17  // specific prior written permission.


18  //


19  // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"


20  // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE


21  // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE


22  // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE


23  // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR


24  // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF


25  // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS


26  // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN


27  // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)


28  // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE


29  // POSSIBILITY OF SUCH DAMAGE.


30  //


31  // Author: sameeragarwal@google.com (Sameer Agarwal)


32  //


33  // Wrapped for access from C# by Gabriel Kronberger


34 


35  public enum LinearSolverType : int {


36  // These solvers are for general rectangular systems formed from the


37  // normal equations A'A x = A'b. They are direct solvers and do not


38  // assume any special problem structure.


39 


40  // Solve the normal equations using a dense Cholesky solver; based


41  // on Eigen.


42  DENSE_NORMAL_CHOLESKY,


43 


44  // Solve the normal equations using a dense QR solver; based on


45  // Eigen.


46  DENSE_QR,


47 


48  // Solve the normal equations using a sparse cholesky solver; requires


49  // SuiteSparse or CXSparse.


50  SPARSE_NORMAL_CHOLESKY,


51 


52  // Specialized solvers, specific to problems with a generalized


53  // bipartitite structure.


54 


55  // Solves the reduced linear system using a dense Cholesky solver;


56  // based on Eigen.


57  DENSE_SCHUR,


58 


59  // Solves the reduced linear system using a sparse Cholesky solver;


60  // based on CHOLMOD.


61  SPARSE_SCHUR,


62 


63  // Solves the reduced linear system using Conjugate Gradients, based


64  // on a new Ceres implementation. Suitable for large scale


65  // problems.


66  ITERATIVE_SCHUR,


67 


68  // Conjugate gradients on the normal equations.


69  CGNR


70  };


71 


72  public enum PreconditionerType {


73  // Trivial preconditioner  the identity matrix.


74  IDENTITY,


75 


76  // Block diagonal of the GaussNewton Hessian.


77  JACOBI,


78 


79  // Note: The following three preconditioners can only be used with


80  // the ITERATIVE_SCHUR solver. They are well suited for Structure


81  // from Motion problems.


82 


83  // Block diagonal of the Schur complement. This preconditioner may


84  // only be used with the ITERATIVE_SCHUR solver.


85  SCHUR_JACOBI,


86 


87  // Visibility clustering based preconditioners.


88  //


89  // The following two preconditioners use the visibility structure of


90  // the scene to determine the sparsity structure of the


91  // preconditioner. This is done using a clustering algorithm. The


92  // available visibility clustering algorithms are described below.


93  CLUSTER_JACOBI,


94  CLUSTER_TRIDIAGONAL,


95 


96  // Subset preconditioner is a general purpose preconditioner


97  // linear least squares problems. Given a set of residual blocks,


98  // it uses the corresponding subset of the rows of the Jacobian to


99  // construct a preconditioner.


100  //


101  // Suppose the Jacobian J has been horizontally partitioned as


102  //


103  // J = [P]


104  // [Q]


105  //


106  // Where, Q is the set of rows corresponding to the residual


107  // blocks in residual_blocks_for_subset_preconditioner.


108  //


109  // The preconditioner is the inverse of the matrix Q'Q.


110  //


111  // Obviously, the efficacy of the preconditioner depends on how


112  // well the matrix Q approximates J'J, or how well the chosen


113  // residual blocks approximate the nonlinear least squares


114  // problem.


115  SUBSET,


116  };


117 


118  public enum MinimizerType : int {


119  LINE_SEARCH,


120  TRUST_REGION,


121  };


122 


123  public enum LineSearchDirectionType : int {


124  // Negative of the gradient.


125  STEEPEST_DESCENT,


126 


127  // A generalization of the Conjugate Gradient method to nonlinear


128  // functions. The generalization can be performed in a number of


129  // different ways, resulting in a variety of search directions. The


130  // precise choice of the nonlinear conjugate gradient algorithm


131  // used is determined by NonlinerConjuateGradientType.


132  NONLINEAR_CONJUGATE_GRADIENT,


133 


134  // BFGS, and it's limited memory approximation LBFGS, are quasiNewton


135  // algorithms that approximate the Hessian matrix by iteratively refining


136  // an initial estimate with rankone updates using the gradient at each


137  // iteration. They are a generalisation of the Secant method and satisfy


138  // the Secant equation. The Secant equation has an infinium of solutions


139  // in multiple dimensions, as there are N*(N+1)/2 degrees of freedom in a


140  // symmetric matrix but only N conditions are specified by the Secant


141  // equation. The requirement that the Hessian approximation be positive


142  // definite imposes another N additional constraints, but that still leaves


143  // remaining degreesoffreedom. (L)BFGS methods uniquely determine the


144  // approximate Hessian by imposing the additional constraints that the


145  // approximation at the next iteration must be the 'closest' to the current


146  // approximation (the nature of how this proximity is measured is actually


147  // the defining difference between a family of quasiNewton methods including


148  // (L)BFGS & DFP). (L)BFGS is currently regarded as being the best known


149  // general quasiNewton method.


150  //


151  // The principal difference between BFGS and LBFGS is that whilst BFGS


152  // maintains a full, dense approximation to the (inverse) Hessian, LBFGS


153  // maintains only a window of the last M observations of the parameters and


154  // gradients. Using this observation history, the calculation of the next


155  // search direction can be computed without requiring the construction of the


156  // full dense inverse Hessian approximation. This is particularly important


157  // for problems with a large number of parameters, where storage of an NbyN


158  // matrix in memory would be prohibitive.


159  //


160  // For more details on BFGS see:


161  //


162  // Broyden, C.G., "The Convergence of a Class of Doublerank Minimization


163  // Algorithms,"; J. Inst. Maths. Applics., Vol. 6, pp 7690, 1970.


164  //


165  // Fletcher, R., "A New Approach to Variable Metric Algorithms,"


166  // Computer Journal, Vol. 13, pp 317322, 1970.


167  //


168  // Goldfarb, D., "A Family of Variable Metric Updates Derived by Variational


169  // Means," Mathematics of Computing, Vol. 24, pp 2326, 1970.


170  //


171  // Shanno, D.F., "Conditioning of QuasiNewton Methods for Function


172  // Minimization," Mathematics of Computing, Vol. 24, pp 647656, 1970.


173  //


174  // For more details on LBFGS see:


175  //


176  // Nocedal, J. (1980). "Updating QuasiNewton Matrices with Limited


177  // Storage". Mathematics of Computation 35 (151): 773782.


178  //


179  // Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994).


180  // "Representations of QuasiNewton Matrices and their use in


181  // Limited Memory Methods". Mathematical Programming 63 (4):


182  // 129156.


183  //


184  // A general reference for both methods:


185  //


186  // Nocedal J., Wright S., Numerical Optimization, 2nd Ed. Springer, 1999.


187  LBFGS,


188  BFGS,


189  };


190 


191  // Nonlinear conjugate gradient methods are a generalization of the


192  // method of Conjugate Gradients for linear systems. The


193  // generalization can be carried out in a number of different ways


194  // leading to number of different rules for computing the search


195  // direction. Ceres provides a number of different variants. For more


196  // details see Numerical Optimization by Nocedal & Wright.


197  public enum NonlinearConjugateGradientType {


198  FLETCHER_REEVES,


199  POLAK_RIBIERE,


200  HESTENES_STIEFEL,


201  };


202 


203  public enum LineSearchType {


204  // Backtracking line search with polynomial interpolation or


205  // bisection.


206  ARMIJO,


207  WOLFE,


208  };


209 


210  // Ceres supports different strategies for computing the trust region


211  // step.


212  public enum TrustRegionStrategyType : int {


213  // The default trust region strategy is to use the step computation


214  // used in the LevenbergMarquardt algorithm. For more details see


215  // levenberg_marquardt_strategy.h


216  LEVENBERG_MARQUARDT,


217 


218  // Powell's dogleg algorithm interpolates between the Cauchy point


219  // and the GaussNewton step. It is particularly useful if the


220  // LEVENBERG_MARQUARDT algorithm is making a large number of


221  // unsuccessful steps. For more details see dogleg_strategy.h.


222  //


223  // NOTES:


224  //


225  // 1. This strategy has not been experimented with or tested as


226  // extensively as LEVENBERG_MARQUARDT, and therefore it should be


227  // considered EXPERIMENTAL for now.


228  //


229  // 2. For now this strategy should only be used with exact


230  // factorization based linear solvers, i.e., SPARSE_SCHUR,


231  // DENSE_SCHUR, DENSE_QR and SPARSE_NORMAL_CHOLESKY.


232  DOGLEG


233  };


234 


235  // Ceres supports two different dogleg strategies.


236  // The "traditional" dogleg method by Powell and the


237  // "subspace" method described in


238  // R. H. Byrd, R. B. Schnabel, and G. A. Shultz,


239  // "Approximate solution of the trust region problem by minimization


240  // over twodimensional subspaces", Mathematical Programming,


241  // 40 (1988), pp. 247263


242  public enum DoglegType {


243  // The traditional approach constructs a dogleg path


244  // consisting of two line segments and finds the furthest


245  // point on that path that is still inside the trust region.


246  TRADITIONAL_DOGLEG,


247 


248  // The subspace approach finds the exact minimum of the model


249  // constrained to the subspace spanned by the dogleg path.


250  SUBSPACE_DOGLEG


251  };


252 


253  }


254  }

