1 | using System;
|
---|
2 | using System.Linq;
|
---|
3 | using System.Runtime.InteropServices;
|
---|
4 | using HeuristicLab.Analysis;
|
---|
5 | using HeuristicLab.Common;
|
---|
6 | using HeuristicLab.Core;
|
---|
7 | using HeuristicLab.Data;
|
---|
8 | using HeuristicLab.Encodings.SymbolicExpressionTreeEncoding;
|
---|
9 | using HeuristicLab.Optimization;
|
---|
10 | using HeuristicLab.Parameters;
|
---|
11 | using HeuristicLab.Persistence.Default.CompositeSerializers.Storable;
|
---|
12 | using HeuristicLab.Problems.DataAnalysis;
|
---|
13 | using HeuristicLab.Problems.DataAnalysis.Symbolic;
|
---|
14 | using HeuristicLab.Problems.DataAnalysis.Symbolic.Regression;
|
---|
15 |
|
---|
16 | namespace HeuristicLab.Algorithms.DataAnalysis.Glmnet {
|
---|
17 | [Item("Elastic-net Linear Regression (LR)", "Linear regression with elastic-net regularization (wrapper for glmnet)")]
|
---|
18 | [Creatable(CreatableAttribute.Categories.DataAnalysisRegression, Priority = 110)]
|
---|
19 | [StorableClass]
|
---|
20 | public sealed class ElasticNetLinearRegression : FixedDataAnalysisAlgorithm<IRegressionProblem> {
|
---|
21 | private const string PenalityParameterName = "Penality";
|
---|
22 | private const string LogLambdaParameterName = "Log10(Lambda)";
|
---|
23 | #region parameters
|
---|
24 | public IFixedValueParameter<DoubleValue> PenalityParameter {
|
---|
25 | get { return (IFixedValueParameter<DoubleValue>)Parameters[PenalityParameterName]; }
|
---|
26 | }
|
---|
27 | public IValueParameter<DoubleValue> LogLambdaParameter {
|
---|
28 | get { return (IValueParameter<DoubleValue>)Parameters[LogLambdaParameterName]; }
|
---|
29 | }
|
---|
30 | #endregion
|
---|
31 | #region properties
|
---|
32 | public double Penality {
|
---|
33 | get { return PenalityParameter.Value.Value; }
|
---|
34 | set { PenalityParameter.Value.Value = value; }
|
---|
35 | }
|
---|
36 | public DoubleValue LogLambda {
|
---|
37 | get { return LogLambdaParameter.Value; }
|
---|
38 | set { LogLambdaParameter.Value = value; }
|
---|
39 | }
|
---|
40 | #endregion
|
---|
41 |
|
---|
42 | [StorableConstructor]
|
---|
43 | private ElasticNetLinearRegression(bool deserializing) : base(deserializing) { }
|
---|
44 | private ElasticNetLinearRegression(ElasticNetLinearRegression original, Cloner cloner)
|
---|
45 | : base(original, cloner) {
|
---|
46 | }
|
---|
47 | public ElasticNetLinearRegression()
|
---|
48 | : base() {
|
---|
49 | Problem = new RegressionProblem();
|
---|
50 | Parameters.Add(new FixedValueParameter<DoubleValue>(PenalityParameterName, "Penalty factor for balancing between ridge (0.0) and lasso (1.0) regression", new DoubleValue(0.5)));
|
---|
51 | Parameters.Add(new OptionalValueParameter<DoubleValue>(LogLambdaParameterName, "Optional: the value of lambda for which to calculate an elastic-net solution. lambda == null => calculate the whole path of all lambdas"));
|
---|
52 | }
|
---|
53 |
|
---|
54 | [StorableHook(HookType.AfterDeserialization)]
|
---|
55 | private void AfterDeserialization() { }
|
---|
56 |
|
---|
57 | public override IDeepCloneable Clone(Cloner cloner) {
|
---|
58 | return new ElasticNetLinearRegression(this, cloner);
|
---|
59 | }
|
---|
60 |
|
---|
61 | protected override void Run() {
|
---|
62 | if (LogLambda == null) {
|
---|
63 | CreateSolutionPath();
|
---|
64 | } else {
|
---|
65 | CreateSolution(LogLambda.Value);
|
---|
66 | }
|
---|
67 | }
|
---|
68 |
|
---|
69 | private void CreateSolution(double logLambda) {
|
---|
70 | double trainNMSE;
|
---|
71 | double testNMSE;
|
---|
72 | var coeff = CreateElasticNetLinearRegressionSolution(Problem.ProblemData, Penality, Math.Pow(10, logLambda), out trainNMSE, out testNMSE);
|
---|
73 | Results.Add(new Result("NMSE (train)", new DoubleValue(trainNMSE)));
|
---|
74 | Results.Add(new Result("NMSE (test)", new DoubleValue(testNMSE)));
|
---|
75 |
|
---|
76 | // copied from LR => TODO: reuse code (but skip coefficients = 0.0)
|
---|
77 | ISymbolicExpressionTree tree = new SymbolicExpressionTree(new ProgramRootSymbol().CreateTreeNode());
|
---|
78 | ISymbolicExpressionTreeNode startNode = new StartSymbol().CreateTreeNode();
|
---|
79 | tree.Root.AddSubtree(startNode);
|
---|
80 | ISymbolicExpressionTreeNode addition = new Addition().CreateTreeNode();
|
---|
81 | startNode.AddSubtree(addition);
|
---|
82 |
|
---|
83 | int col = 0;
|
---|
84 | foreach (string column in Problem.ProblemData.AllowedInputVariables) {
|
---|
85 | if (!coeff[col].IsAlmost(0.0)) {
|
---|
86 | VariableTreeNode vNode = (VariableTreeNode)new HeuristicLab.Problems.DataAnalysis.Symbolic.Variable().CreateTreeNode();
|
---|
87 | vNode.VariableName = column;
|
---|
88 | vNode.Weight = coeff[col];
|
---|
89 | addition.AddSubtree(vNode);
|
---|
90 | }
|
---|
91 | col++;
|
---|
92 | }
|
---|
93 |
|
---|
94 | if (!coeff[coeff.Length - 1].IsAlmost(0.0)) {
|
---|
95 | ConstantTreeNode cNode = (ConstantTreeNode)new Constant().CreateTreeNode();
|
---|
96 | cNode.Value = coeff[coeff.Length - 1];
|
---|
97 | addition.AddSubtree(cNode);
|
---|
98 | }
|
---|
99 |
|
---|
100 | SymbolicRegressionSolution solution = new SymbolicRegressionSolution(
|
---|
101 | new SymbolicRegressionModel(Problem.ProblemData.TargetVariable, tree, new SymbolicDataAnalysisExpressionTreeInterpreter()),
|
---|
102 | (IRegressionProblemData)Problem.ProblemData.Clone());
|
---|
103 | solution.Model.Name = "Elastic-net Linear Regression Model";
|
---|
104 | solution.Name = "Elastic-net Linear Regression Solution";
|
---|
105 |
|
---|
106 | Results.Add(new Result(solution.Name, solution.Description, solution));
|
---|
107 | }
|
---|
108 |
|
---|
109 | private void CreateSolutionPath() {
|
---|
110 | double[] lambda;
|
---|
111 | double[] trainNMSE;
|
---|
112 | double[] testNMSE;
|
---|
113 | double[,] coeff;
|
---|
114 | double[] intercept;
|
---|
115 | RunElasticNetLinearRegression(Problem.ProblemData, Penality, out lambda, out trainNMSE, out testNMSE, out coeff, out intercept);
|
---|
116 |
|
---|
117 | var coeffTable = new DataTable("Coefficient Paths", "The paths of standarized coefficient values over different lambda values");
|
---|
118 | var nLambdas = lambda.Length;
|
---|
119 | var nCoeff = coeff.GetLength(1);
|
---|
120 | var dataRows = new DataRow[nCoeff];
|
---|
121 | var allowedVars = Problem.ProblemData.AllowedInputVariables.ToArray();
|
---|
122 | for (int i = 0; i < nCoeff; i++) {
|
---|
123 | var coeffId = allowedVars[i];
|
---|
124 | double sigma = Problem.ProblemData.Dataset.GetDoubleValues(coeffId).StandardDeviation();
|
---|
125 | var path = Enumerable.Range(0, nLambdas).Select(r => coeff[r, i] * sigma).ToArray();
|
---|
126 | dataRows[i] = new DataRow(coeffId, coeffId, path);
|
---|
127 | coeffTable.Rows.Add(dataRows[i]);
|
---|
128 | }
|
---|
129 |
|
---|
130 | Results.Add(new Result(coeffTable.Name, coeffTable.Description, coeffTable));
|
---|
131 |
|
---|
132 | var nmsePlot = new ScatterPlot("NMSE", "Path of NMSE values over different lambda values");
|
---|
133 | nmsePlot.VisualProperties.YAxisMaximumAuto = false;
|
---|
134 | nmsePlot.VisualProperties.YAxisMinimumAuto = false;
|
---|
135 | nmsePlot.VisualProperties.XAxisMaximumAuto = false;
|
---|
136 | nmsePlot.VisualProperties.XAxisMinimumAuto = false;
|
---|
137 |
|
---|
138 | nmsePlot.VisualProperties.YAxisMinimumFixedValue = 0;
|
---|
139 | nmsePlot.VisualProperties.YAxisMaximumFixedValue = 1.0;
|
---|
140 | nmsePlot.VisualProperties.XAxisTitle = "Log10(Lambda)";
|
---|
141 | nmsePlot.VisualProperties.YAxisTitle = "Normalized mean of squared errors (NMSE)";
|
---|
142 | nmsePlot.Rows.Add(new ScatterPlotDataRow("NMSE (train)", "Path of NMSE values over different lambda values", lambda.Zip(trainNMSE, (l, v) => new Point2D<double>(Math.Log10(l), v))));
|
---|
143 | nmsePlot.Rows.Add(new ScatterPlotDataRow("NMSE (test)", "Path of NMSE values over different lambda values", lambda.Zip(testNMSE, (l, v) => new Point2D<double>(Math.Log10(l), v))));
|
---|
144 | if (lambda.Length > 2) {
|
---|
145 | nmsePlot.VisualProperties.XAxisMinimumFixedValue = Math.Floor(Math.Log10(lambda.Last()));
|
---|
146 | nmsePlot.VisualProperties.XAxisMaximumFixedValue = Math.Ceiling(Math.Log10(lambda.Skip(1).First()));
|
---|
147 | }
|
---|
148 | nmsePlot.Rows["NMSE (train)"].VisualProperties.PointSize = 5;
|
---|
149 | nmsePlot.Rows["NMSE (test)"].VisualProperties.PointSize = 5;
|
---|
150 |
|
---|
151 | Results.Add(new Result(nmsePlot.Name, nmsePlot.Description, nmsePlot));
|
---|
152 | }
|
---|
153 |
|
---|
154 | public static double[] CreateElasticNetLinearRegressionSolution(IRegressionProblemData problemData, double penalty, double lambda,
|
---|
155 | out double trainNMSE, out double testNMSE,
|
---|
156 | double coeffLowerBound = double.NegativeInfinity, double coeffUpperBound = double.PositiveInfinity) {
|
---|
157 | double[] trainNMSEs;
|
---|
158 | double[] testNMSEs;
|
---|
159 | // run for exactly one lambda
|
---|
160 | var coeffs = CreateElasticNetLinearRegressionSolution(problemData, penalty, new double[] { lambda }, out trainNMSEs, out testNMSEs, coeffLowerBound, coeffUpperBound);
|
---|
161 | trainNMSE = trainNMSEs[0];
|
---|
162 | testNMSE = testNMSEs[0];
|
---|
163 | return coeffs[0];
|
---|
164 | }
|
---|
165 | public static double[][] CreateElasticNetLinearRegressionSolution(IRegressionProblemData problemData, double penalty, double[] lambda,
|
---|
166 | out double[] trainRsq, out double[] testRsq,
|
---|
167 | double coeffLowerBound = double.NegativeInfinity, double coeffUpperBound = double.PositiveInfinity,
|
---|
168 | int maxVars = -1) {
|
---|
169 | // run for multiple user-supplied lambdas
|
---|
170 | double[,] coeff;
|
---|
171 | double[] intercept;
|
---|
172 | RunElasticNetLinearRegression(problemData, penalty, lambda.Length, 1.0, lambda, out lambda, out trainRsq, out testRsq, out coeff, out intercept, coeffLowerBound, coeffUpperBound, maxVars);
|
---|
173 |
|
---|
174 | int nRows = intercept.Length;
|
---|
175 | int nCols = coeff.GetLength(1) + 1;
|
---|
176 | double[][] sols = new double[nRows][];
|
---|
177 | for (int solIdx = 0; solIdx < nRows; solIdx++) {
|
---|
178 | sols[solIdx] = new double[nCols];
|
---|
179 | for (int cIdx = 0; cIdx < nCols - 1; cIdx++) {
|
---|
180 | sols[solIdx][cIdx] = coeff[solIdx, cIdx];
|
---|
181 | }
|
---|
182 | sols[solIdx][nCols - 1] = intercept[solIdx];
|
---|
183 | }
|
---|
184 | return sols;
|
---|
185 | }
|
---|
186 |
|
---|
187 | public static void RunElasticNetLinearRegression(IRegressionProblemData problemData, double penalty,
|
---|
188 | out double[] lambda, out double[] trainNMSE, out double[] testNMSE, out double[,] coeff, out double[] intercept,
|
---|
189 | double coeffLowerBound = double.NegativeInfinity, double coeffUpperBound = double.PositiveInfinity,
|
---|
190 | int maxVars = -1
|
---|
191 | ) {
|
---|
192 | double[] userLambda = new double[0];
|
---|
193 | // automatically determine lambda values (maximum 100 different lambda values)
|
---|
194 | RunElasticNetLinearRegression(problemData, penalty, 100, 0.0, userLambda, out lambda, out trainNMSE, out testNMSE, out coeff, out intercept, coeffLowerBound, coeffUpperBound, maxVars);
|
---|
195 | }
|
---|
196 |
|
---|
197 | /// <summary>
|
---|
198 | /// Elastic net with squared-error-loss for dense predictor matrix, runs the full path of all lambdas
|
---|
199 | /// </summary>
|
---|
200 | /// <param name="problemData">Predictor target matrix x and target vector y</param>
|
---|
201 | /// <param name="penalty">Penalty for balance between ridge (0.0) and lasso (1.0) regression</param>
|
---|
202 | /// <param name="nlam">Maximum number of lambda values (default 100)</param>
|
---|
203 | /// <param name="flmin">User control of lambda values (<1.0 => minimum lambda = flmin * (largest lambda value), >= 1.0 => use supplied lambda values</param>
|
---|
204 | /// <param name="ulam">User supplied lambda values</param>
|
---|
205 | /// <param name="lambda">Output lambda values</param>
|
---|
206 | /// <param name="trainNMSE">Vector of normalized mean of squared error (NMSE = Variance(res) / Variance(y)) values on the training set for each set of coefficients along the path</param>
|
---|
207 | /// <param name="testNMSE">Vector of normalized mean of squared error (NMSE = Variance(res) / Variance(y)) values on the test set for each set of coefficients along the path</param>
|
---|
208 | /// <param name="coeff">Vector of coefficient vectors for each solution along the path</param>
|
---|
209 | /// <param name="intercept">Vector of intercepts for each solution along the path</param>
|
---|
210 | /// <param name="coeffLowerBound">Optional lower bound for all coefficients</param>
|
---|
211 | /// <param name="coeffUpperBound">Optional upper bound for all coefficients</param>
|
---|
212 | /// <param name="maxVars">Maximum allowed number of variables in each solution along the path (-1 => all variables are allowed)</param>
|
---|
213 | private static void RunElasticNetLinearRegression(IRegressionProblemData problemData, double penalty,
|
---|
214 | int nlam, double flmin, double[] ulam, out double[] lambda, out double[] trainNMSE, out double[] testNMSE, out double[,] coeff, out double[] intercept,
|
---|
215 | double coeffLowerBound = double.NegativeInfinity, double coeffUpperBound = double.PositiveInfinity,
|
---|
216 | int maxVars = -1
|
---|
217 | ) {
|
---|
218 | if (penalty < 0.0 || penalty > 1.0) throw new ArgumentException("0 <= penalty <= 1", "penalty");
|
---|
219 |
|
---|
220 | double[,] trainX;
|
---|
221 | double[,] testX;
|
---|
222 |
|
---|
223 | double[] trainY;
|
---|
224 | double[] testY;
|
---|
225 | int numTrainObs, numTestObs;
|
---|
226 | int numVars;
|
---|
227 | PrepareData(problemData, out trainX, out trainY, out numTrainObs, out testX, out testY, out numTestObs, out numVars);
|
---|
228 |
|
---|
229 | int ka = 1; // => covariance updating algorithm
|
---|
230 | double parm = penalty;
|
---|
231 | double[] w = Enumerable.Repeat(1.0, numTrainObs).ToArray(); // all observations have the same weight
|
---|
232 | int[] jd = new int[1]; // do not force to use any of the variables
|
---|
233 | double[] vp = Enumerable.Repeat(1.0, numVars).ToArray(); // all predictor variables are unpenalized
|
---|
234 | double[,] cl = new double[numVars, 2]; // use the same bounds for all coefficients
|
---|
235 | for (int i = 0; i < numVars; i++) {
|
---|
236 | cl[i, 0] = coeffLowerBound;
|
---|
237 | cl[i, 1] = coeffUpperBound;
|
---|
238 | }
|
---|
239 |
|
---|
240 | int ne = maxVars > 0 ? maxVars : numVars;
|
---|
241 | int nx = numVars;
|
---|
242 | double thr = 1.0e-5; // default value as recommended in glmnet
|
---|
243 | int isd = 1; // => regression on standardized predictor variables
|
---|
244 | int intr = 1; // => do include intercept in model
|
---|
245 | int maxit = 100000; // default value as recommended in glmnet
|
---|
246 | // outputs
|
---|
247 | int lmu = -1;
|
---|
248 | double[,] ca;
|
---|
249 | int[] ia;
|
---|
250 | int[] nin;
|
---|
251 | int nlp = -99;
|
---|
252 | int jerr = -99;
|
---|
253 | double[] trainR2;
|
---|
254 | elnet(ka, parm, numTrainObs, numVars, trainX, trainY, w, jd, vp, cl, ne, nx, nlam, flmin, ulam, thr, isd, intr, maxit, out lmu, out intercept, out ca, out ia, out nin, out trainR2, out lambda, out nlp, out jerr);
|
---|
255 |
|
---|
256 | trainNMSE = new double[lmu]; // elnet returns R**2 as 1 - NMSE
|
---|
257 | testNMSE = new double[lmu];
|
---|
258 | coeff = new double[lmu, numVars];
|
---|
259 | for (int solIdx = 0; solIdx < lmu; solIdx++) {
|
---|
260 | trainNMSE[solIdx] = 1.0 - trainR2[solIdx];
|
---|
261 |
|
---|
262 | // uncompress coefficients of solution
|
---|
263 | int selectedNin = nin[solIdx];
|
---|
264 | double[] coefficients;
|
---|
265 | double[] selectedCa = new double[nx];
|
---|
266 | for (int i = 0; i < nx; i++) {
|
---|
267 | selectedCa[i] = ca[solIdx, i];
|
---|
268 | }
|
---|
269 |
|
---|
270 | // apply to test set to calculate test NMSE values for each lambda step
|
---|
271 | double[] fn;
|
---|
272 | modval(intercept[solIdx], selectedCa, ia, selectedNin, numTestObs, testX, out fn);
|
---|
273 | OnlineCalculatorError error;
|
---|
274 | var nmse = OnlineNormalizedMeanSquaredErrorCalculator.Calculate(testY, fn, out error);
|
---|
275 | if (error != OnlineCalculatorError.None) nmse = double.MaxValue;
|
---|
276 | testNMSE[solIdx] = nmse;
|
---|
277 |
|
---|
278 | // uncompress coefficients
|
---|
279 | uncomp(numVars, selectedCa, ia, selectedNin, out coefficients);
|
---|
280 | for (int i = 0; i < coefficients.Length; i++) {
|
---|
281 | coeff[solIdx, i] = coefficients[i];
|
---|
282 | }
|
---|
283 | }
|
---|
284 | }
|
---|
285 |
|
---|
286 | private static void PrepareData(IRegressionProblemData problemData, out double[,] trainX, out double[] trainY, out int numTrainObs,
|
---|
287 | out double[,] testX, out double[] testY, out int numTestObs, out int numVars) {
|
---|
288 | numVars = problemData.AllowedInputVariables.Count();
|
---|
289 | numTrainObs = problemData.TrainingIndices.Count();
|
---|
290 | numTestObs = problemData.TestIndices.Count();
|
---|
291 |
|
---|
292 | trainX = new double[numVars, numTrainObs];
|
---|
293 | trainY = new double[numTrainObs];
|
---|
294 | testX = new double[numVars, numTestObs];
|
---|
295 | testY = new double[numTestObs];
|
---|
296 | var ds = problemData.Dataset;
|
---|
297 | var targetVar = problemData.TargetVariable;
|
---|
298 | // train
|
---|
299 | int rIdx = 0;
|
---|
300 | foreach (var row in problemData.TrainingIndices) {
|
---|
301 | int cIdx = 0;
|
---|
302 | foreach (var var in problemData.AllowedInputVariables) {
|
---|
303 | trainX[cIdx, rIdx] = ds.GetDoubleValue(var, row);
|
---|
304 | cIdx++;
|
---|
305 | }
|
---|
306 | trainY[rIdx] = ds.GetDoubleValue(targetVar, row);
|
---|
307 | rIdx++;
|
---|
308 | }
|
---|
309 | // test
|
---|
310 | rIdx = 0;
|
---|
311 | foreach (var row in problemData.TestIndices) {
|
---|
312 | int cIdx = 0;
|
---|
313 | foreach (var var in problemData.AllowedInputVariables) {
|
---|
314 | testX[cIdx, rIdx] = ds.GetDoubleValue(var, row);
|
---|
315 | cIdx++;
|
---|
316 | }
|
---|
317 | testY[rIdx] = ds.GetDoubleValue(targetVar, row);
|
---|
318 | rIdx++;
|
---|
319 | }
|
---|
320 | }
|
---|
321 |
|
---|
322 |
|
---|
323 | #region dllimport
|
---|
324 | /// <summary>Wrapper for elnet procedure in glmnet library</summary>
|
---|
325 | /// (see: https://cran.r-project.org/web/packages/glmnet/index.html)
|
---|
326 | ///
|
---|
327 | /// ka = algorithm flag
|
---|
328 | /// ka=1 => covariance updating algorithm
|
---|
329 | /// ka=2 => naive algorithm
|
---|
330 | /// parm = penalty member index(0<= parm <= 1)
|
---|
331 | /// = 0.0 => ridge
|
---|
332 | /// = 1.0 => lasso
|
---|
333 | /// no = number of observations
|
---|
334 | /// ni = number of predictor variables
|
---|
335 | /// y(no) = response vector(overwritten)
|
---|
336 | /// w(no)= observation weights(overwritten)
|
---|
337 | /// jd(jd(1)+1) = predictor variable deletion flag
|
---|
338 | /// jd(1) = 0 => use all variables
|
---|
339 | /// jd(1) != 0 => do not use variables jd(2)...jd(jd(1)+1)
|
---|
340 | /// vp(ni) = relative penalties for each predictor variable
|
---|
341 | /// vp(j) = 0 => jth variable unpenalized
|
---|
342 | /// cl(2, ni) = interval constraints on coefficient values(overwritten)
|
---|
343 | /// cl(1, j) = lower bound for jth coefficient value(<= 0.0)
|
---|
344 | /// cl(2, j) = upper bound for jth coefficient value(>= 0.0)
|
---|
345 | /// ne = maximum number of variables allowed to enter largest model
|
---|
346 | /// (stopping criterion)
|
---|
347 | /// nx = maximum number of variables allowed to enter all modesl
|
---|
348 | /// along path(memory allocation, nx > ne).
|
---|
349 | /// nlam = (maximum)number of lamda values
|
---|
350 | /// flmin = user control of lamda values(>=0)
|
---|
351 | /// flmin< 1.0 => minimum lamda = flmin * (largest lamda value)
|
---|
352 | /// flmin >= 1.0 => use supplied lamda values(see below)
|
---|
353 | /// ulam(nlam) = user supplied lamda values(ignored if flmin< 1.0)
|
---|
354 | /// thr = convergence threshold for each lamda solution.
|
---|
355 | /// iterations stop when the maximum reduction in the criterion value
|
---|
356 | /// as a result of each parameter update over a single pass
|
---|
357 | /// is less than thr times the null criterion value.
|
---|
358 | /// (suggested value, thr= 1.0e-5)
|
---|
359 | /// isd = predictor variable standarization flag:
|
---|
360 | /// isd = 0 => regression on original predictor variables
|
---|
361 | /// isd = 1 => regression on standardized predictor variables
|
---|
362 | /// Note: output solutions always reference original
|
---|
363 | /// variables locations and scales.
|
---|
364 | /// intr = intercept flag
|
---|
365 | /// intr = 0 / 1 => don't/do include intercept in model
|
---|
366 | /// maxit = maximum allowed number of passes over the data for all lambda
|
---|
367 | /// values (suggested values, maxit = 100000)
|
---|
368 | ///
|
---|
369 | /// output:
|
---|
370 | ///
|
---|
371 | /// lmu = actual number of lamda values(solutions)
|
---|
372 | /// a0(lmu) = intercept values for each solution
|
---|
373 | /// ca(nx, lmu) = compressed coefficient values for each solution
|
---|
374 | /// ia(nx) = pointers to compressed coefficients
|
---|
375 | /// nin(lmu) = number of compressed coefficients for each solution
|
---|
376 | /// rsq(lmu) = R**2 values for each solution
|
---|
377 | /// alm(lmu) = lamda values corresponding to each solution
|
---|
378 | /// nlp = actual number of passes over the data for all lamda values
|
---|
379 | /// jerr = error flag:
|
---|
380 | /// jerr = 0 => no error
|
---|
381 | /// jerr > 0 => fatal error - no output returned
|
---|
382 | /// jerr< 7777 => memory allocation error
|
---|
383 | /// jerr = 7777 => all used predictors have zero variance
|
---|
384 | /// jerr = 10000 => maxval(vp) <= 0.0
|
---|
385 | /// jerr< 0 => non fatal error - partial output:
|
---|
386 | /// Solutions for larger lamdas (1:(k-1)) returned.
|
---|
387 | /// jerr = -k => convergence for kth lamda value not reached
|
---|
388 | /// after maxit(see above) iterations.
|
---|
389 | /// jerr = -10000 - k => number of non zero coefficients along path
|
---|
390 | /// exceeds nx(see above) at kth lamda value.
|
---|
391 | ///
|
---|
392 | private static void elnet(
|
---|
393 | int ka,
|
---|
394 | double parm,
|
---|
395 | int no,
|
---|
396 | int ni,
|
---|
397 | double[,] x,
|
---|
398 | double[] y,
|
---|
399 | double[] w,
|
---|
400 | int[] jd,
|
---|
401 | double[] vp,
|
---|
402 | double[,] cl,
|
---|
403 | int ne,
|
---|
404 | int nx,
|
---|
405 | int nlam,
|
---|
406 | double flmin,
|
---|
407 | double[] ulam,
|
---|
408 | double thr,
|
---|
409 | int isd,
|
---|
410 | int intr,
|
---|
411 | int maxit,
|
---|
412 | // outputs
|
---|
413 | out int lmu,
|
---|
414 | out double[] a0,
|
---|
415 | out double[,] ca,
|
---|
416 | out int[] ia,
|
---|
417 | out int[] nin,
|
---|
418 | out double[] rsq,
|
---|
419 | out double[] alm,
|
---|
420 | out int nlp,
|
---|
421 | out int jerr
|
---|
422 | ) {
|
---|
423 | // initialize output values and allocate arrays big enough
|
---|
424 | a0 = new double[nlam];
|
---|
425 | ca = new double[nlam, nx];
|
---|
426 | ia = new int[nx];
|
---|
427 | nin = new int[nlam];
|
---|
428 | rsq = new double[nlam];
|
---|
429 | alm = new double[nlam];
|
---|
430 | nlp = -1;
|
---|
431 | jerr = -1;
|
---|
432 | lmu = -1;
|
---|
433 |
|
---|
434 | // load correct version of native dll based on process (x86/x64)
|
---|
435 | if (Environment.Is64BitProcess) {
|
---|
436 | elnet_x64(ref ka, ref parm, ref no, ref ni, x, y, w, jd, vp, cl, ref ne, ref ni, ref nlam, ref flmin, ulam, ref thr, ref isd, ref intr, ref maxit, ref lmu, a0, ca, ia, nin, rsq, alm, ref nlp, ref jerr);
|
---|
437 | } else {
|
---|
438 | elnet_x86(ref ka, ref parm, ref no, ref ni, x, y, w, jd, vp, cl, ref ne, ref ni, ref nlam, ref flmin, ulam, ref thr, ref isd, ref intr, ref maxit, ref lmu, a0, ca, ia, nin, rsq, alm, ref nlp, ref jerr);
|
---|
439 | }
|
---|
440 | // jerr = error flag:
|
---|
441 | // jerr = 0 => no error
|
---|
442 | // jerr > 0 => fatal error -no output returned
|
---|
443 | // jerr < 7777 => memory allocation error
|
---|
444 | // jerr = 7777 => all used predictors have zero variance
|
---|
445 | // jerr = 10000 => maxval(vp) <= 0.0
|
---|
446 | // jerr < 0 => non fatal error - partial output:
|
---|
447 | // c Solutions for larger lamdas (1:(k - 1)) returned.
|
---|
448 | // jerr = -k => convergence for kth lamda value not reached
|
---|
449 | // after maxit(see above) iterations.
|
---|
450 | // jerr = -10000 - k => number of non zero coefficients along path
|
---|
451 | // exceeds nx(see above) at kth lamda value.
|
---|
452 | if (jerr != 0) {
|
---|
453 | if (jerr > 0 && jerr < 7777) throw new InvalidOperationException("glmnet: memory allocation error");
|
---|
454 | else if (jerr == 7777) throw new InvalidOperationException("glmnet: all used predictors have zero variance");
|
---|
455 | else if (jerr == 10000) throw new InvalidOperationException("glmnet: maxval(vp) <= 0.0");
|
---|
456 | else if (jerr < 0 && jerr > -1000) throw new InvalidOperationException(string.Format("glmnet: convergence for {0}th lamda value not reached after maxit iterations ", -jerr));
|
---|
457 | else if (jerr <= -10000) throw new InvalidOperationException(string.Format("glmnet: number of non zero coefficients along path exceeds number of maximally allowed variables (nx) at {0}th lamda value", -jerr - 10000));
|
---|
458 | else throw new InvalidOperationException(string.Format("glmnet: error {0}", jerr));
|
---|
459 | }
|
---|
460 |
|
---|
461 |
|
---|
462 | // resize arrays to the capacity that is acutally necessary for the results
|
---|
463 | Array.Resize(ref a0, lmu);
|
---|
464 | Array.Resize(ref nin, lmu);
|
---|
465 | Array.Resize(ref rsq, lmu);
|
---|
466 | Array.Resize(ref alm, lmu);
|
---|
467 | }
|
---|
468 |
|
---|
469 | [DllImport("glmnet-x86.dll", EntryPoint = "elnet_", CallingConvention = CallingConvention.Cdecl)]
|
---|
470 | private static extern void elnet_x86(
|
---|
471 | ref int ka,
|
---|
472 | ref double parm,
|
---|
473 | ref int no,
|
---|
474 | ref int ni,
|
---|
475 | double[,] x,
|
---|
476 | double[] y,
|
---|
477 | double[] w,
|
---|
478 | int[] jd,
|
---|
479 | double[] vp,
|
---|
480 | double[,] cl,
|
---|
481 | ref int ne,
|
---|
482 | ref int nx,
|
---|
483 | ref int nlam,
|
---|
484 | ref double flmin,
|
---|
485 | double[] ulam,
|
---|
486 | ref double thr,
|
---|
487 | ref int isd,
|
---|
488 | ref int intr,
|
---|
489 | ref int maxit,
|
---|
490 | // outputs:
|
---|
491 | ref int lmu,
|
---|
492 | [Out] double[] a0,
|
---|
493 | [Out] double[,] ca,
|
---|
494 | [Out] int[] ia,
|
---|
495 | [Out] int[] nin,
|
---|
496 | [Out] double[] rsq,
|
---|
497 | [Out] double[] alm,
|
---|
498 | ref int nlp,
|
---|
499 | ref int jerr
|
---|
500 | );
|
---|
501 | [DllImport("glmnet-x64.dll", EntryPoint = "elnet_", CallingConvention = CallingConvention.Cdecl)]
|
---|
502 | private static extern void elnet_x64(
|
---|
503 | ref int ka,
|
---|
504 | ref double parm,
|
---|
505 | ref int no,
|
---|
506 | ref int ni,
|
---|
507 | double[,] x,
|
---|
508 | double[] y,
|
---|
509 | double[] w,
|
---|
510 | int[] jd,
|
---|
511 | double[] vp,
|
---|
512 | double[,] cl,
|
---|
513 | ref int ne,
|
---|
514 | ref int nx,
|
---|
515 | ref int nlam,
|
---|
516 | ref double flmin,
|
---|
517 | double[] ulam,
|
---|
518 | ref double thr,
|
---|
519 | ref int isd,
|
---|
520 | ref int intr,
|
---|
521 | ref int maxit,
|
---|
522 | // outputs:
|
---|
523 | ref int lmu,
|
---|
524 | [Out] double[] a0,
|
---|
525 | [Out] double[,] ca,
|
---|
526 | [Out] int[] ia,
|
---|
527 | [Out] int[] nin,
|
---|
528 | [Out] double[] rsq,
|
---|
529 | [Out] double[] alm,
|
---|
530 | ref int nlp,
|
---|
531 | ref int jerr
|
---|
532 | );
|
---|
533 |
|
---|
534 |
|
---|
535 | /// <summary>Wrapper for uncompress coefficient vector for particular solution in glmnet</summary>
|
---|
536 | /// (see: https://cran.r-project.org/web/packages/glmnet/index.html)
|
---|
537 | ///
|
---|
538 | /// call uncomp(ni, ca, ia, nin, a)
|
---|
539 | ///
|
---|
540 | /// input:
|
---|
541 | ///
|
---|
542 | /// ni = total number of predictor variables
|
---|
543 | /// ca(nx) = compressed coefficient values for the solution
|
---|
544 | /// ia(nx) = pointers to compressed coefficients
|
---|
545 | /// nin = number of compressed coefficients for the solution
|
---|
546 | ///
|
---|
547 | /// output:
|
---|
548 | ///
|
---|
549 | /// a(ni) = uncompressed coefficient vector
|
---|
550 | /// referencing original variables
|
---|
551 | ///
|
---|
552 | private static void uncomp(int numVars, double[] ca, int[] ia, int nin, out double[] a) {
|
---|
553 | a = new double[numVars];
|
---|
554 | // load correct version of native dll based on process (x86/x64)
|
---|
555 | if (Environment.Is64BitProcess) {
|
---|
556 | uncomp_x64(ref numVars, ca, ia, ref nin, a);
|
---|
557 | } else {
|
---|
558 | uncomp_x86(ref numVars, ca, ia, ref nin, a);
|
---|
559 | }
|
---|
560 | }
|
---|
561 |
|
---|
562 | [DllImport("glmnet-x86.dll", EntryPoint = "uncomp_", CallingConvention = CallingConvention.Cdecl)]
|
---|
563 | private static extern void uncomp_x86(ref int numVars, double[] ca, int[] ia, ref int nin, double[] a);
|
---|
564 | [DllImport("glmnet-x64.dll", EntryPoint = "uncomp_", CallingConvention = CallingConvention.Cdecl)]
|
---|
565 | private static extern void uncomp_x64(ref int numVars, double[] ca, int[] ia, ref int nin, double[] a);
|
---|
566 |
|
---|
567 | private static void modval(double a0, double[] ca, int[] ia, int nin, int numObs, double[,] x, out double[] fn) {
|
---|
568 | fn = new double[numObs];
|
---|
569 | if (Environment.Is64BitProcess) {
|
---|
570 | modval_x64(ref a0, ca, ia, ref nin, ref numObs, x, fn);
|
---|
571 | } else {
|
---|
572 | modval_x86(ref a0, ca, ia, ref nin, ref numObs, x, fn);
|
---|
573 | }
|
---|
574 | }
|
---|
575 | // evaluate linear model from compressed coefficients and
|
---|
576 | // uncompressed predictor matrix:
|
---|
577 | //
|
---|
578 | // call modval(a0, ca, ia, nin, n, x, f);
|
---|
579 | // c
|
---|
580 | // c input:
|
---|
581 | //
|
---|
582 | // a0 = intercept
|
---|
583 | // ca(nx) = compressed coefficient values for a solution
|
---|
584 | // ia(nx) = pointers to compressed coefficients
|
---|
585 | // nin = number of compressed coefficients for solution
|
---|
586 | // n = number of predictor vectors(observations)
|
---|
587 | // x(n, ni) = full(uncompressed) predictor matrix
|
---|
588 | //
|
---|
589 | // output:
|
---|
590 | //
|
---|
591 | // f(n) = model predictions
|
---|
592 | [DllImport("glmnet-x86.dll", EntryPoint = "modval_", CallingConvention = CallingConvention.Cdecl)]
|
---|
593 | private static extern void modval_x86(ref double a0, double[] ca, int[] ia, ref int nin, ref int numObs, [Out] double[,] x, double[] fn);
|
---|
594 | [DllImport("glmnet-x64.dll", EntryPoint = "modval_", CallingConvention = CallingConvention.Cdecl)]
|
---|
595 | private static extern void modval_x64(ref double a0, double[] ca, int[] ia, ref int nin, ref int numObs, [Out] double[,] x, double[] fn);
|
---|
596 |
|
---|
597 | #endregion
|
---|
598 | }
|
---|
599 | }
|
---|