Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Algorithms.DataAnalysis.Glmnet/3.4/ElasticNetLinearRegression.cs @ 13940

Last change on this file since 13940 was 13940, checked in by gkronber, 8 years ago

#745:

  • added scatterplot of R² values over lambda instead of line chart,
  • normalized coefficient values in coefficient path chart
  • changed parameter lambda to LogLambda
File size: 26.6 KB
Line 
1using System;
2using System.Linq;
3using System.Runtime.InteropServices;
4using HeuristicLab.Algorithms.DataAnalysis;
5using HeuristicLab.Analysis;
6using HeuristicLab.Common;
7using HeuristicLab.Core;
8using HeuristicLab.Data;
9using HeuristicLab.Encodings.SymbolicExpressionTreeEncoding;
10using HeuristicLab.Optimization;
11using HeuristicLab.Parameters;
12using HeuristicLab.Persistence.Default.CompositeSerializers.Storable;
13using HeuristicLab.Problems.DataAnalysis;
14using HeuristicLab.Problems.DataAnalysis.Symbolic;
15using HeuristicLab.Problems.DataAnalysis.Symbolic.Regression;
16
17namespace HeuristicLab.LibGlmNet {
18  [Item("Elastic-net Linear Regression (LR)", "Linear regression with elastic-net regularization (wrapper for glmnet)")]
19  [Creatable(CreatableAttribute.Categories.DataAnalysisRegression, Priority = 110)]
20  [StorableClass]
21  public sealed class ElasticNetLinearRegression : FixedDataAnalysisAlgorithm<IRegressionProblem> {
22    private const string PenalityParameterName = "Penality";
23    private const string LogLambdaParameterName = "Log10(Lambda)";
24    #region parameters
25    public IFixedValueParameter<DoubleValue> PenalityParameter {
26      get { return (IFixedValueParameter<DoubleValue>)Parameters[PenalityParameterName]; }
27    }
28    public IValueParameter<DoubleValue> LogLambdaParameter {
29      get { return (IValueParameter<DoubleValue>)Parameters[LogLambdaParameterName]; }
30    }
31    #endregion
32    #region properties
33    public double Penality {
34      get { return PenalityParameter.Value.Value; }
35      set { PenalityParameter.Value.Value = value; }
36    }
37    public DoubleValue LogLambda {
38      get { return LogLambdaParameter.Value; }
39      set { LogLambdaParameter.Value = value; }
40    }
41    #endregion
42
43    [StorableConstructor]
44    private ElasticNetLinearRegression(bool deserializing) : base(deserializing) { }
45    private ElasticNetLinearRegression(ElasticNetLinearRegression original, Cloner cloner)
46      : base(original, cloner) {
47    }
48    public ElasticNetLinearRegression()
49      : base() {
50      Problem = new RegressionProblem();
51      Parameters.Add(new FixedValueParameter<DoubleValue>(PenalityParameterName, "Penalty factor for balancing between ridge (0.0) and lasso (1.0) regression", new DoubleValue(0.5)));
52      Parameters.Add(new OptionalValueParameter<DoubleValue>(LogLambdaParameterName, "Optional: the value of lambda for which to calculate an elastic-net solution. lambda == null => calculate the whole path of all lambdas"));
53    }
54
55    [StorableHook(HookType.AfterDeserialization)]
56    private void AfterDeserialization() { }
57
58    public override IDeepCloneable Clone(Cloner cloner) {
59      return new ElasticNetLinearRegression(this, cloner);
60    }
61
62    protected override void Run() {
63      if (LogLambda == null) {
64        CreateSolutionPath();
65      } else {
66        CreateSolution(LogLambda.Value);
67      }
68    }
69
70    private void CreateSolution(double logLambda) {
71      double trainRsq;
72      double testRsq;
73      var coeff = CreateElasticNetLinearRegressionSolution(Problem.ProblemData, Penality, Math.Pow(10, logLambda), out trainRsq, out testRsq);
74      Results.Add(new Result("R² (train)", new DoubleValue(trainRsq)));
75      Results.Add(new Result("R² (test)", new DoubleValue(testRsq)));
76
77      // copied from LR => TODO: reuse code (but skip coefficients = 0.0)
78      ISymbolicExpressionTree tree = new SymbolicExpressionTree(new ProgramRootSymbol().CreateTreeNode());
79      ISymbolicExpressionTreeNode startNode = new StartSymbol().CreateTreeNode();
80      tree.Root.AddSubtree(startNode);
81      ISymbolicExpressionTreeNode addition = new Addition().CreateTreeNode();
82      startNode.AddSubtree(addition);
83
84      int col = 0;
85      foreach (string column in Problem.ProblemData.AllowedInputVariables) {
86        if (!coeff[col].IsAlmost(0.0)) {
87          VariableTreeNode vNode = (VariableTreeNode)new HeuristicLab.Problems.DataAnalysis.Symbolic.Variable().CreateTreeNode();
88          vNode.VariableName = column;
89          vNode.Weight = coeff[col];
90          addition.AddSubtree(vNode);
91        }
92        col++;
93      }
94
95      if (!coeff[coeff.Length - 1].IsAlmost(0.0)) {
96        ConstantTreeNode cNode = (ConstantTreeNode)new Constant().CreateTreeNode();
97        cNode.Value = coeff[coeff.Length - 1];
98        addition.AddSubtree(cNode);
99      }
100
101      SymbolicRegressionSolution solution = new SymbolicRegressionSolution(new SymbolicRegressionModel(tree, new SymbolicDataAnalysisExpressionTreeInterpreter()), (IRegressionProblemData)Problem.ProblemData.Clone());
102      solution.Model.Name = "Elastic-net Linear Regression Model";
103      solution.Name = "Elastic-net Linear Regression Solution";
104
105      Results.Add(new Result(solution.Name, solution.Description, solution));
106    }
107
108    private void CreateSolutionPath() {
109      double[] lambda;
110      double[] trainRsq;
111      double[] testRsq;
112      double[,] coeff;
113      double[] intercept;
114      RunElasticNetLinearRegression(Problem.ProblemData, Penality, out lambda, out trainRsq, out testRsq, out coeff, out intercept);
115
116      var coeffTable = new DataTable("Coefficient Paths", "The paths of standarized coefficient values over different lambda values");
117      var nLambdas = lambda.Length;
118      var nCoeff = coeff.GetLength(1);
119      var dataRows = new DataRow[nCoeff];
120      var allowedVars = Problem.ProblemData.AllowedInputVariables.ToArray();
121      for (int i = 0; i < nCoeff; i++) {
122        var coeffId = allowedVars[i];
123        double sigma = Problem.ProblemData.Dataset.GetDoubleValues(coeffId).StandardDeviation();
124        var path = Enumerable.Range(0, nLambdas).Select(r => coeff[r, i] * sigma).ToArray();
125        dataRows[i] = new DataRow(coeffId, coeffId, path);
126        coeffTable.Rows.Add(dataRows[i]);
127      }
128
129      Results.Add(new Result(coeffTable.Name, coeffTable.Description, coeffTable));
130
131      var rsqPlot = new ScatterPlot("R-Squared", "Path of R² values over different lambda values");
132      rsqPlot.VisualProperties.YAxisMaximumAuto = false;
133      rsqPlot.VisualProperties.YAxisMinimumAuto = false;
134      rsqPlot.VisualProperties.XAxisMaximumAuto = false;
135      rsqPlot.VisualProperties.XAxisMinimumAuto = false;
136
137      rsqPlot.VisualProperties.YAxisMinimumFixedValue = 0;
138      rsqPlot.VisualProperties.YAxisMaximumFixedValue = 1.0;
139      rsqPlot.VisualProperties.XAxisTitle = "Log10(Lambda)";
140      rsqPlot.VisualProperties.YAxisTitle = "R²";
141      rsqPlot.Rows.Add(new ScatterPlotDataRow("R² (train)", "Path of R² values over different lambda values", lambda.Zip(trainRsq, (l, r) => new Point2D<double>(Math.Log10(l), r))));
142      rsqPlot.Rows.Add(new ScatterPlotDataRow("R² (test)", "Path of R² values over different lambda values", lambda.Zip(testRsq, (l, r) => new Point2D<double>(Math.Log10(l), r))));
143      if (lambda.Length > 2) {
144        rsqPlot.VisualProperties.XAxisMinimumFixedValue = Math.Floor(Math.Log10(lambda.Last()));
145        rsqPlot.VisualProperties.XAxisMaximumFixedValue = Math.Ceiling(Math.Log10(lambda.Skip(1).First()));
146      }
147      rsqPlot.Rows["R² (train)"].VisualProperties.PointSize = 5;
148      rsqPlot.Rows["R² (test)"].VisualProperties.PointSize = 5;
149
150      Results.Add(new Result(rsqPlot.Name, rsqPlot.Description, rsqPlot));
151    }
152
153    public static double[] CreateElasticNetLinearRegressionSolution(IRegressionProblemData problemData, double penalty, double lambda,
154            out double trainRsq, out double testRsq,
155            double coeffLowerBound = double.NegativeInfinity, double coeffUpperBound = double.PositiveInfinity) {
156      double[] trainRsqs;
157      double[] testRsqs;
158      // run for exactly one lambda
159      var coeffs = CreateElasticNetLinearRegressionSolution(problemData, penalty, new double[] { lambda }, out trainRsqs, out testRsqs, coeffLowerBound, coeffUpperBound);
160      trainRsq = trainRsqs[0];
161      testRsq = testRsqs[0];
162      return coeffs[0];
163    }
164    public static double[][] CreateElasticNetLinearRegressionSolution(IRegressionProblemData problemData, double penalty, double[] lambda,
165            out double[] trainRsq, out double[] testRsq,
166            double coeffLowerBound = double.NegativeInfinity, double coeffUpperBound = double.PositiveInfinity,
167            int maxVars = -1) {
168      // run for multiple user-supplied lambdas
169      double[,] coeff;
170      double[] intercept;
171      RunElasticNetLinearRegression(problemData, penalty, lambda.Length, 1.0, lambda, out lambda, out trainRsq, out testRsq, out coeff, out intercept, coeffLowerBound, coeffUpperBound, maxVars);
172
173      int nRows = intercept.Length;
174      int nCols = coeff.GetLength(1) + 1;
175      double[][] sols = new double[nRows][];
176      for (int solIdx = 0; solIdx < nRows; solIdx++) {
177        sols[solIdx] = new double[nCols];
178        for (int cIdx = 0; cIdx < nCols - 1; cIdx++) {
179          sols[solIdx][cIdx] = coeff[solIdx, cIdx];
180        }
181        sols[solIdx][nCols - 1] = intercept[solIdx];
182      }
183      return sols;
184    }
185
186    public static void RunElasticNetLinearRegression(IRegressionProblemData problemData, double penalty,
187      out double[] lambda, out double[] trainRsq, out double[] testRsq, out double[,] coeff, out double[] intercept,
188      double coeffLowerBound = double.NegativeInfinity, double coeffUpperBound = double.PositiveInfinity,
189      int maxVars = -1
190      ) {
191      double[] userLambda = new double[0];
192      // automatically determine lambda values (maximum 100 different lambda values)
193      RunElasticNetLinearRegression(problemData, penalty, 100, 0.0, userLambda, out lambda, out trainRsq, out testRsq, out coeff, out intercept, coeffLowerBound, coeffUpperBound, maxVars);
194    }
195
196    /// <summary>
197    /// Elastic net with squared-error-loss for dense predictor matrix, runs the full path of all lambdas
198    /// </summary>
199    /// <param name="problemData">Predictor target matrix x and target vector y</param>
200    /// <param name="penalty">Penalty for balance between ridge (0.0) and lasso (1.0) regression</param>
201    /// <param name="nlam">Maximum number of lambda values (default 100)</param>
202    /// <param name="flmin">User control of lambda values (&lt;1.0 => minimum lambda = flmin * (largest lambda value), >= 1.0 => use supplied lambda values</param>
203    /// <param name="ulam">User supplied lambda values</param>
204    /// <param name="lambda">Output lambda values</param>
205    /// <param name="trainRsq">Vector of R² values on the training set for each set of coefficients along the path</param>
206    /// <param name="testRsq">Vector of R² values on the test set for each set of coefficients along the path</param>
207    /// <param name="coeff">Vector of coefficient vectors for each solution along the path</param>
208    /// <param name="intercept">Vector of intercepts for each solution along the path</param>
209    /// <param name="coeffLowerBound">Optional lower bound for all coefficients</param>
210    /// <param name="coeffUpperBound">Optional upper bound for all coefficients</param>
211    /// <param name="maxVars">Maximum allowed number of variables in each solution along the path (-1 => all variables are allowed)</param>
212    private static void RunElasticNetLinearRegression(IRegressionProblemData problemData, double penalty,
213  int nlam, double flmin, double[] ulam, out double[] lambda, out double[] trainRsq, out double[] testRsq, out double[,] coeff, out double[] intercept,
214  double coeffLowerBound = double.NegativeInfinity, double coeffUpperBound = double.PositiveInfinity,
215  int maxVars = -1
216  ) {
217      if (penalty < 0.0 || penalty > 1.0) throw new ArgumentException("0 <= penalty <= 1", "penalty");
218
219      double[,] trainX;
220      double[,] testX;
221
222      double[] trainY;
223      double[] testY;
224      int numTrainObs, numTestObs;
225      int numVars;
226      PrepareData(problemData, out trainX, out trainY, out numTrainObs, out testX, out testY, out numTestObs, out numVars);
227
228      int ka = 1; // => covariance updating algorithm
229      double parm = penalty;
230      double[] w = Enumerable.Repeat(1.0, numTrainObs).ToArray(); // all observations have the same weight
231      int[] jd = new int[1]; // do not force to use any of the variables
232      double[] vp = Enumerable.Repeat(1.0, numVars).ToArray(); // all predictor variables are unpenalized
233      double[,] cl = new double[numVars, 2]; // use the same bounds for all coefficients
234      for (int i = 0; i < numVars; i++) {
235        cl[i, 0] = coeffLowerBound;
236        cl[i, 1] = coeffUpperBound;
237      }
238
239      int ne = maxVars > 0 ? maxVars : numVars;
240      int nx = numVars;
241      double thr = 1.0e-5; // default value as recommended in glmnet
242      int isd = 1; //  => regression on standardized predictor variables
243      int intr = 1;  // => do include intercept in model
244      int maxit = 100000; // default value as recommended in glmnet
245      // outputs
246      int lmu = -1;
247      double[,] ca;
248      int[] ia;
249      int[] nin;
250      int nlp = -99;
251      int jerr = -99;
252
253      elnet(ka, parm, numTrainObs, numVars, trainX, trainY, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, out lmu, out intercept, out ca, out ia, out nin, out trainRsq, out lambda, out nlp, out jerr);
254
255      testRsq = new double[lmu];
256      coeff = new double[lmu, numVars];
257      for (int solIdx = 0; solIdx < lmu; solIdx++) {
258        // uncompress coefficients of solution
259        int selectedNin = nin[solIdx];
260        double[] coefficients;
261        double[] selectedCa = new double[nx];
262        for (int i = 0; i < nx; i++) {
263          selectedCa[i] = ca[solIdx, i];
264        }
265
266        // apply to test set to calculate test R² values for each lambda step
267        double[] fn;
268        modval(intercept[solIdx], selectedCa, ia, selectedNin, numTestObs, testX, out fn);
269        OnlineCalculatorError error;
270        var r = OnlinePearsonsRCalculator.Calculate(testY, fn, out error);
271        if (error != OnlineCalculatorError.None) r = 0;
272        testRsq[solIdx] = r * r;
273
274        // uncompress coefficients
275        uncomp(numVars, selectedCa, ia, selectedNin, out coefficients);
276        for (int i = 0; i < coefficients.Length; i++) {
277          coeff[solIdx, i] = coefficients[i];
278        }
279      }
280    }
281
282    private static void PrepareData(IRegressionProblemData problemData, out double[,] trainX, out double[] trainY, out int numTrainObs,
283      out double[,] testX, out double[] testY, out int numTestObs, out int numVars) {
284      numVars = problemData.AllowedInputVariables.Count();
285      numTrainObs = problemData.TrainingIndices.Count();
286      numTestObs = problemData.TestIndices.Count();
287
288      trainX = new double[numVars, numTrainObs];
289      trainY = new double[numTrainObs];
290      testX = new double[numVars, numTestObs];
291      testY = new double[numTestObs];
292      var ds = problemData.Dataset;
293      var targetVar = problemData.TargetVariable;
294      // train
295      int rIdx = 0;
296      foreach (var row in problemData.TrainingIndices) {
297        int cIdx = 0;
298        foreach (var var in problemData.AllowedInputVariables) {
299          trainX[cIdx, rIdx] = ds.GetDoubleValue(var, row);
300          cIdx++;
301        }
302        trainY[rIdx] = ds.GetDoubleValue(targetVar, row);
303        rIdx++;
304      }
305      // test
306      rIdx = 0;
307      foreach (var row in problemData.TestIndices) {
308        int cIdx = 0;
309        foreach (var var in problemData.AllowedInputVariables) {
310          testX[cIdx, rIdx] = ds.GetDoubleValue(var, row);
311          cIdx++;
312        }
313        testY[rIdx] = ds.GetDoubleValue(targetVar, row);
314        rIdx++;
315      }
316    }
317
318
319    #region dllimport
320    /// <summary>Wrapper for elnet procedure in glmnet library</summary>
321    /// (see: https://cran.r-project.org/web/packages/glmnet/index.html)
322    ///
323    ///  ka = algorithm flag
324    ///       ka=1 => covariance updating algorithm
325    ///       ka=2 => naive algorithm
326    ///  parm = penalty member index(0&lt;= parm &lt;= 1)
327    ///         = 0.0 => ridge
328    ///  = 1.0 => lasso
329    ///    no = number of observations
330    ///    ni = number of predictor variables
331    ///  y(no) = response vector(overwritten)
332    ///  w(no)= observation weights(overwritten)
333    ///  jd(jd(1)+1) = predictor variable deletion flag
334    ///  jd(1) = 0  => use all variables
335    ///       jd(1) != 0 => do not use variables jd(2)...jd(jd(1)+1)
336    ///  vp(ni) = relative penalties for each predictor variable
337    ///       vp(j) = 0 => jth variable unpenalized
338    ///    cl(2, ni) = interval constraints on coefficient values(overwritten)
339    ///  cl(1, j) = lower bound for jth coefficient value(&lt;= 0.0)
340    ///  cl(2, j) = upper bound for jth coefficient value(>= 0.0)
341    ///  ne = maximum number of variables allowed to enter largest model
342    /// (stopping criterion)
343    ///  nx = maximum number of variables allowed to enter all modesl
344    ///  along path(memory allocation, nx > ne).
345    ///  nlam = (maximum)number of lamda values
346    ///    flmin = user control of lamda values(>=0)
347    ///  flmin&lt; 1.0 => minimum lamda = flmin * (largest lamda value)
348    ///  flmin >= 1.0 => use supplied lamda values(see below)
349    ///  ulam(nlam) = user supplied lamda values(ignored if flmin&lt; 1.0)
350    ///  thr = convergence threshold for each lamda solution.
351    ///  iterations stop when the maximum reduction in the criterion value
352    ///       as a result of each parameter update over a single pass
353    ///       is less than thr times the null criterion value.
354    /// (suggested value, thr= 1.0e-5)
355    ///  isd = predictor variable standarization flag:
356    ///  isd = 0 => regression on original predictor variables
357    ///       isd = 1 => regression on standardized predictor variables
358    ///       Note: output solutions always reference original
359    ///             variables locations and scales.
360    ///    intr = intercept flag
361    ///       intr = 0 / 1 => don't/do include intercept in model
362    ///  maxit = maximum allowed number of passes over the data for all lambda
363    ///  values (suggested values, maxit = 100000)
364    ///
365    ///  output:
366    ///
367    ///    lmu = actual number of lamda values(solutions)
368    ///  a0(lmu) = intercept values for each solution
369    ///  ca(nx, lmu) = compressed coefficient values for each solution
370    ///  ia(nx) = pointers to compressed coefficients
371    ///  nin(lmu) = number of compressed coefficients for each solution
372    ///  rsq(lmu) = R**2 values for each solution
373    ///  alm(lmu) = lamda values corresponding to each solution
374    ///  nlp = actual number of passes over the data for all lamda values
375    ///    jerr = error flag:
376    ///  jerr = 0 => no error
377    ///  jerr > 0 => fatal error - no output returned
378    ///          jerr&lt; 7777 => memory allocation error
379    ///          jerr = 7777 => all used predictors have zero variance
380    ///          jerr = 10000 => maxval(vp) &lt;= 0.0
381    ///  jerr&lt; 0 => non fatal error - partial output:
382    ///  Solutions for larger lamdas (1:(k-1)) returned.
383    ///  jerr = -k => convergence for kth lamda value not reached
384    ///             after maxit(see above) iterations.
385    ///  jerr = -10000 - k => number of non zero coefficients along path
386    ///             exceeds nx(see above) at kth lamda value.
387    ///
388    private static void elnet(
389      int ka,
390      double parm,
391      int no,
392      int ni,
393      double[,] x,
394      double[] y,
395      double[] w,
396      int[] jd,
397      double[] vp,
398      double[,] cl,
399      int ne,
400      int nx,
401      int nlam,
402      double flmin,
403      double[] ulam,
404      double thr,
405      int isd,
406      int intr,
407      int maxit,
408      // outputs
409      out int lmu,
410      out double[] a0,
411      out double[,] ca,
412      out int[] ia,
413      out int[] nin,
414      out double[] rsq,
415      out double[] alm,
416      out int nlp,
417      out int jerr
418      ) {
419      // initialize output values and allocate arrays big enough
420      a0 = new double[nlam];
421      ca = new double[nlam, nx];
422      ia = new int[nx];
423      nin = new int[nlam];
424      rsq = new double[nlam];
425      alm = new double[nlam];
426      nlp = -1;
427      jerr = -1;
428      lmu = -1;
429
430      // load correct version of native dll based on process (x86/x64)
431      if (Environment.Is64BitProcess) {
432        elnet_x64(ref ka, ref parm, ref no, ref ni, x, y, w, jd, vp, cl, ref ne, ref ni, ref nlam, ref flmin, ulam, ref thr, ref isd, ref intr, ref maxit, ref lmu, a0, ca, ia, nin, rsq, alm, ref nlp, ref jerr);
433      } else {
434        elnet_x86(ref ka, ref parm, ref no, ref ni, x, y, w, jd, vp, cl, ref ne, ref ni, ref nlam, ref flmin, ulam, ref thr, ref isd, ref intr, ref maxit, ref lmu, a0, ca, ia, nin, rsq, alm, ref nlp, ref jerr);
435      }
436      //  jerr = error flag:
437      //  jerr = 0 => no error
438      //  jerr > 0 => fatal error -no output returned
439      //  jerr < 7777 => memory allocation error
440      //          jerr = 7777 => all used predictors have zero variance
441      //  jerr = 10000 => maxval(vp) <= 0.0
442      //  jerr < 0 => non fatal error - partial output:
443      //      c Solutions for larger lamdas (1:(k - 1)) returned.
444      //  jerr = -k => convergence for kth lamda value not reached
445      //             after maxit(see above) iterations.
446      //          jerr = -10000 - k => number of non zero coefficients along path
447      //             exceeds nx(see above) at kth lamda value.
448      if (jerr != 0) {
449        if (jerr > 0 && jerr < 7777) throw new InvalidOperationException("glmnet: memory allocation error");
450        else if (jerr == 7777) throw new InvalidOperationException("glmnet: all used predictors have zero variance");
451        else if (jerr == 10000) throw new InvalidOperationException("glmnet: maxval(vp) <= 0.0");
452        else if (jerr < 0 && jerr > -1000) throw new InvalidOperationException(string.Format("glmnet: convergence for {0}th lamda value not reached after maxit iterations ", -jerr));
453        else if (jerr <= -10000) throw new InvalidOperationException(string.Format("glmnet: number of non zero coefficients along path exceeds number of maximally allowed variables (nx) at {0}th lamda value", -jerr - 10000));
454        else throw new InvalidOperationException(string.Format("glmnet: error {0}", jerr));
455      }
456
457
458      // resize arrays to the capacity that is acutally necessary for the results
459      Array.Resize(ref a0, lmu);
460      Array.Resize(ref nin, lmu);
461      Array.Resize(ref rsq, lmu);
462      Array.Resize(ref alm, lmu);
463    }
464
465    [DllImport("glmnet-x86.dll", EntryPoint = "elnet_", CallingConvention = CallingConvention.Cdecl)]
466    private static extern void elnet_x86(
467      ref int ka,
468      ref double parm,
469      ref int no,
470      ref int ni,
471      double[,] x,
472      double[] y,
473      double[] w,
474      int[] jd,
475      double[] vp,
476      double[,] cl,
477      ref int ne,
478      ref int nx,
479      ref int nlam,
480      ref double flmin,
481      double[] ulam,
482      ref double thr,
483      ref int isd,
484      ref int intr,
485      ref int maxit,
486      // outputs:
487      ref int lmu,
488      [Out] double[] a0,
489      [Out] double[,] ca,
490      [Out] int[] ia,
491      [Out] int[] nin,
492      [Out] double[] rsq,
493      [Out] double[] alm,
494      ref int nlp,
495      ref int jerr
496      );
497    [DllImport("glmnet-x64.dll", EntryPoint = "elnet_", CallingConvention = CallingConvention.Cdecl)]
498    private static extern void elnet_x64(
499      ref int ka,
500      ref double parm,
501      ref int no,
502      ref int ni,
503      double[,] x,
504      double[] y,
505      double[] w,
506      int[] jd,
507      double[] vp,
508      double[,] cl,
509      ref int ne,
510      ref int nx,
511      ref int nlam,
512      ref double flmin,
513      double[] ulam,
514      ref double thr,
515      ref int isd,
516      ref int intr,
517      ref int maxit,
518      // outputs:
519      ref int lmu,
520      [Out] double[] a0,
521      [Out] double[,] ca,
522      [Out] int[] ia,
523      [Out] int[] nin,
524      [Out] double[] rsq,
525      [Out] double[] alm,
526      ref int nlp,
527      ref int jerr
528      );
529
530
531    /// <summary>Wrapper for uncompress coefficient vector for particular solution in glmnet</summary>
532    /// (see: https://cran.r-project.org/web/packages/glmnet/index.html)
533    ///
534    /// call uncomp(ni, ca, ia, nin, a)
535    ///
536    /// input:
537    ///
538    ///    ni = total number of predictor variables
539    ///    ca(nx) = compressed coefficient values for the solution
540    /// ia(nx) = pointers to compressed coefficients
541    /// nin = number of compressed coefficients for the solution
542    ///
543    /// output:
544    ///
545    ///    a(ni) =  uncompressed coefficient vector
546    ///             referencing original variables
547    ///
548    private static void uncomp(int numVars, double[] ca, int[] ia, int nin, out double[] a) {
549      a = new double[numVars];
550      // load correct version of native dll based on process (x86/x64)
551      if (Environment.Is64BitProcess) {
552        uncomp_x64(ref numVars, ca, ia, ref nin, a);
553      } else {
554        uncomp_x86(ref numVars, ca, ia, ref nin, a);
555      }
556    }
557
558    [DllImport("glmnet-x86.dll", EntryPoint = "uncomp_", CallingConvention = CallingConvention.Cdecl)]
559    private static extern void uncomp_x86(ref int numVars, double[] ca, int[] ia, ref int nin, double[] a);
560    [DllImport("glmnet-x64.dll", EntryPoint = "uncomp_", CallingConvention = CallingConvention.Cdecl)]
561    private static extern void uncomp_x64(ref int numVars, double[] ca, int[] ia, ref int nin, double[] a);
562
563    private static void modval(double a0, double[] ca, int[] ia, int nin, int numObs, double[,] x, out double[] fn) {
564      fn = new double[numObs];
565      if (Environment.Is64BitProcess) {
566        modval_x64(ref a0, ca, ia, ref nin, ref numObs, x, fn);
567      } else {
568        modval_x86(ref a0, ca, ia, ref nin, ref numObs, x, fn);
569      }
570    }
571    // evaluate linear model from compressed coefficients and
572    // uncompressed predictor matrix:
573    //
574    // call modval(a0, ca, ia, nin, n, x, f);
575    //   c
576    //   c input:
577    //
578    //    a0 = intercept
579    //    ca(nx) = compressed coefficient values for a solution
580    // ia(nx) = pointers to compressed coefficients
581    // nin = number of compressed coefficients for solution
582    //    n = number of predictor vectors(observations)
583    // x(n, ni) = full(uncompressed) predictor matrix
584    //
585    // output:
586    //
587    //    f(n) = model predictions
588    [DllImport("glmnet-x86.dll", EntryPoint = "modval_", CallingConvention = CallingConvention.Cdecl)]
589    private static extern void modval_x86(ref double a0, double[] ca, int[] ia, ref int nin, ref int numObs, [Out] double[,] x, double[] fn);
590    [DllImport("glmnet-x64.dll", EntryPoint = "modval_", CallingConvention = CallingConvention.Cdecl)]
591    private static extern void modval_x64(ref double a0, double[] ca, int[] ia, ref int nin, ref int numObs, [Out] double[,] x, double[] fn);
592
593    #endregion
594  }
595}
Note: See TracBrowser for help on using the repository browser.