1 | #region License Information
|
---|
2 | /* HeuristicLab
|
---|
3 | * Copyright (C) 2002-2016 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
|
---|
4 | *
|
---|
5 | * This file is part of HeuristicLab.
|
---|
6 | *
|
---|
7 | * HeuristicLab is free software: you can redistribute it and/or modify
|
---|
8 | * it under the terms of the GNU General Public License as published by
|
---|
9 | * the Free Software Foundation, either version 3 of the License, or
|
---|
10 | * (at your option) any later version.
|
---|
11 | *
|
---|
12 | * HeuristicLab is distributed in the hope that it will be useful,
|
---|
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
---|
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
---|
15 | * GNU General Public License for more details.
|
---|
16 | *
|
---|
17 | * You should have received a copy of the GNU General Public License
|
---|
18 | * along with HeuristicLab. If not, see <http://www.gnu.org/licenses/>.
|
---|
19 | */
|
---|
20 | #endregion
|
---|
21 |
|
---|
22 | using System;
|
---|
23 | using System.Collections.Generic;
|
---|
24 | using System.Diagnostics;
|
---|
25 | using System.Linq;
|
---|
26 | using System.Text;
|
---|
27 | using HeuristicLab.Core;
|
---|
28 | using HeuristicLab.Encodings.SymbolicExpressionTreeEncoding;
|
---|
29 | using HeuristicLab.Optimization;
|
---|
30 | using HeuristicLab.Problems.DataAnalysis;
|
---|
31 | using HeuristicLab.Problems.DataAnalysis.Symbolic;
|
---|
32 | using HeuristicLab.Problems.DataAnalysis.Symbolic.Regression;
|
---|
33 | using HeuristicLab.Random;
|
---|
34 |
|
---|
35 | namespace HeuristicLab.Algorithms.DataAnalysis.MctsSymbolicRegression {
|
---|
36 | public static class MctsSymbolicRegressionStatic {
|
---|
37 | // OBJECTIVES:
|
---|
38 | // 1) solve toy problems without numeric constants (to show that structure search is effective / efficient)
|
---|
39 | // - e.g. Keijzer, Nguyen ... where no numeric constants are involved
|
---|
40 | // - assumptions:
|
---|
41 | // - we don't know the necessary operations or functions -> all available functions could be necessary
|
---|
42 | // - but we do not need to tune numeric constants -> no scaling of input variables x!
|
---|
43 | // 2) Solve toy problems with numeric constants to make the algorithm invariant concerning variable scale.
|
---|
44 | // This is important for real world applications.
|
---|
45 | // - e.g. Korns or Vladislavleva problems where numeric constants are involved
|
---|
46 | // - assumptions:
|
---|
47 | // - any numeric constant is possible (a-priori we might assume that small abs. constants are more likely)
|
---|
48 | // - standardization of variables is possible (or might be necessary) as we adjust numeric parameters of the expression anyway
|
---|
49 | // - to simplify the problem we can restrict the set of functions e.g. we assume which functions are necessary for the problem instance
|
---|
50 | // -> several steps: (a) polynomials, (b) rational polynomials, (c) exponential or logarithmic functions, rational functions with exponential and logarithmic parts
|
---|
51 | // 3) efficiency and effectiveness for real-world problems
|
---|
52 | // - e.g. Tower problem
|
---|
53 | // - (1) and (2) combined, structure search must be effective in combination with numeric optimization of constants
|
---|
54 | //
|
---|
55 |
|
---|
56 | // TODO: The samples of x1*... or x2*... do not give any information about the relevance of the interaction term x1*x2 in general!
|
---|
57 | // --> E.g. if x1, x2 ~ N(0, 1) or U(-1, 1) this is trivial to show
|
---|
58 | // --> Therefore, looking at roll-out statistics for arm selection (MCTS-style) is useless in the general case!
|
---|
59 | // --> It is necessary to rely on other features for the arm selection.
|
---|
60 | // --> TODO: Which heuristics can we apply?
|
---|
61 | // TODO: Solve Poly-10
|
---|
62 | // TODO: rename everything as this is not MCTS anymore
|
---|
63 | // TODO: when a path to an expression is explored first (e.g. x1 + x2)
|
---|
64 | // and later we find the a longer form x1 + x1 + x2 where the number of variable references
|
---|
65 | // exceeds the maximum in the automaton this leads to an error (see unit tests)
|
---|
66 | // TODO: unit tests for benchmark problems which contain log / exp / x^-1 but without numeric constants
|
---|
67 | // TODO: check if transformation of y is correct and works (Obj 2)
|
---|
68 | // TODO: The algorithm is not invariant to location and scale of variables.
|
---|
69 | // Include offset for variables as parameter (for Objective 2)
|
---|
70 | // TODO: why does LM optimization converge so slowly with exp(x), log(x), and 1/x allowed (Obj 2)?
|
---|
71 | // TODO: support e(-x) and possibly (1/-x) (Obj 1)
|
---|
72 | // TODO: is it OK to initialize all constants to 1 (Obj 2)?
|
---|
73 | // TODO: improve memory usage
|
---|
74 | // TODO: analyze / improve perf of ExprHashing (canonical form for expressions)
|
---|
75 | // TODO: support empty test partition
|
---|
76 | // TODO: the algorithm should be invariant to linear transformations of the space (y = f(x') = f( Ax ) ) for invertible transformations A --> see unit tests
|
---|
77 | #region static API
|
---|
78 |
|
---|
79 | public interface IState {
|
---|
80 | bool Done { get; }
|
---|
81 | ISymbolicRegressionModel BestModel { get; }
|
---|
82 | double BestSolutionTrainingQuality { get; }
|
---|
83 | double BestSolutionTestQuality { get; }
|
---|
84 | IEnumerable<ISymbolicRegressionSolution> ParetoBestModels { get; }
|
---|
85 | int TotalRollouts { get; }
|
---|
86 | int EffectiveRollouts { get; }
|
---|
87 | int FuncEvaluations { get; }
|
---|
88 | int GradEvaluations { get; } // number of gradient evaluations (* num parameters) to get a value representative of the effort comparable to the number of function evaluations
|
---|
89 | // TODO other stats on LM optimizer might be interesting here
|
---|
90 | }
|
---|
91 |
|
---|
92 | // created through factory method
|
---|
93 | private class State : IState {
|
---|
94 | private const int MaxParams = 100;
|
---|
95 |
|
---|
96 | // state variables used by MCTS
|
---|
97 | internal readonly Automaton automaton;
|
---|
98 | internal IRandom random { get; private set; }
|
---|
99 | internal readonly Tree tree;
|
---|
100 | internal readonly Func<byte[], int, double> evalFun;
|
---|
101 | // MCTS might get stuck. Track statistics on the number of effective roll-outs
|
---|
102 | internal int totalRollouts;
|
---|
103 | internal int effectiveRollouts;
|
---|
104 |
|
---|
105 |
|
---|
106 | // state variables used only internally (for eval function)
|
---|
107 | private readonly IRegressionProblemData problemData;
|
---|
108 | private readonly double[][] x;
|
---|
109 | private readonly double[] y;
|
---|
110 | private readonly double[][] testX;
|
---|
111 | private readonly double[] testY;
|
---|
112 | private readonly double[] scalingFactor;
|
---|
113 | private readonly double[] scalingOffset;
|
---|
114 | private readonly double yStdDev; // for scaling parameters (e.g. stopping condition for LM)
|
---|
115 | private readonly int constOptIterations;
|
---|
116 | private readonly double lambda; // weight of penalty term for regularization
|
---|
117 | private readonly double lowerEstimationLimit, upperEstimationLimit;
|
---|
118 | private readonly bool collectParetoOptimalModels;
|
---|
119 | private readonly List<ISymbolicRegressionSolution> paretoBestModels = new List<ISymbolicRegressionSolution>();
|
---|
120 | private readonly List<double[]> paretoFront = new List<double[]>(); // matching the models
|
---|
121 |
|
---|
122 | private readonly ExpressionEvaluator evaluator, testEvaluator;
|
---|
123 |
|
---|
124 | internal readonly Dictionary<Tree, List<Tree>> children = new Dictionary<Tree, List<Tree>>();
|
---|
125 | internal readonly Dictionary<Tree, List<Tree>> parents = new Dictionary<Tree, List<Tree>>();
|
---|
126 | internal readonly Dictionary<ulong, Tree> nodes = new Dictionary<ulong, Tree>();
|
---|
127 |
|
---|
128 | // values for best solution
|
---|
129 | private double bestR;
|
---|
130 | private byte[] bestCode;
|
---|
131 | private int bestNParams;
|
---|
132 | private double[] bestConsts;
|
---|
133 |
|
---|
134 | // stats
|
---|
135 | private int funcEvaluations;
|
---|
136 | private int gradEvaluations;
|
---|
137 |
|
---|
138 | // buffers
|
---|
139 | private readonly double[] ones; // vector of ones (as default params)
|
---|
140 | private readonly double[] constsBuf;
|
---|
141 | private readonly double[] predBuf, testPredBuf;
|
---|
142 | private readonly double[][] gradBuf;
|
---|
143 |
|
---|
144 | public State(IRegressionProblemData problemData, uint randSeed, int maxVariables, bool scaleVariables,
|
---|
145 | int constOptIterations, double lambda,
|
---|
146 | bool collectParetoOptimalModels = false,
|
---|
147 | double lowerEstimationLimit = double.MinValue, double upperEstimationLimit = double.MaxValue,
|
---|
148 | bool allowProdOfVars = true,
|
---|
149 | bool allowExp = true,
|
---|
150 | bool allowLog = true,
|
---|
151 | bool allowInv = true,
|
---|
152 | bool allowMultipleTerms = false) {
|
---|
153 |
|
---|
154 | if (lambda < 0) throw new ArgumentException("Lambda must be larger or equal zero", "lambda");
|
---|
155 |
|
---|
156 | this.problemData = problemData;
|
---|
157 | this.constOptIterations = constOptIterations;
|
---|
158 | this.lambda = lambda;
|
---|
159 | this.evalFun = this.Eval;
|
---|
160 | this.lowerEstimationLimit = lowerEstimationLimit;
|
---|
161 | this.upperEstimationLimit = upperEstimationLimit;
|
---|
162 | this.collectParetoOptimalModels = collectParetoOptimalModels;
|
---|
163 |
|
---|
164 | random = new MersenneTwister(randSeed);
|
---|
165 |
|
---|
166 | // prepare data for evaluation
|
---|
167 | double[][] x;
|
---|
168 | double[] y;
|
---|
169 | double[][] testX;
|
---|
170 | double[] testY;
|
---|
171 | double[] scalingFactor;
|
---|
172 | double[] scalingOffset;
|
---|
173 | // get training and test datasets (scale linearly based on training set if required)
|
---|
174 | GenerateData(problemData, scaleVariables, problemData.TrainingIndices, out x, out y, out scalingFactor, out scalingOffset);
|
---|
175 | GenerateData(problemData, problemData.TestIndices, scalingFactor, scalingOffset, out testX, out testY);
|
---|
176 | this.x = x;
|
---|
177 | this.y = y;
|
---|
178 | this.yStdDev = HeuristicLab.Common.EnumerableStatisticExtensions.StandardDeviation(y);
|
---|
179 | this.testX = testX;
|
---|
180 | this.testY = testY;
|
---|
181 | this.scalingFactor = scalingFactor;
|
---|
182 | this.scalingOffset = scalingOffset;
|
---|
183 | this.evaluator = new ExpressionEvaluator(y.Length, lowerEstimationLimit, upperEstimationLimit);
|
---|
184 | // we need a separate evaluator because the vector length for the test dataset might differ
|
---|
185 | this.testEvaluator = new ExpressionEvaluator(testY.Length, lowerEstimationLimit, upperEstimationLimit);
|
---|
186 |
|
---|
187 | this.automaton = new Automaton(x, allowProdOfVars, allowExp, allowLog, allowInv, allowMultipleTerms, maxVariables);
|
---|
188 | this.tree = new Tree() {
|
---|
189 | state = automaton.CurrentState,
|
---|
190 | expr = "",
|
---|
191 | level = 0
|
---|
192 | };
|
---|
193 |
|
---|
194 | // reset best solution
|
---|
195 | this.bestR = 0;
|
---|
196 | // code for default solution (constant model)
|
---|
197 | this.bestCode = new byte[] { (byte)OpCodes.LoadConst0, (byte)OpCodes.Exit };
|
---|
198 | this.bestNParams = 0;
|
---|
199 | this.bestConsts = null;
|
---|
200 |
|
---|
201 | // init buffers
|
---|
202 | this.ones = Enumerable.Repeat(1.0, MaxParams).ToArray();
|
---|
203 | constsBuf = new double[MaxParams];
|
---|
204 | this.predBuf = new double[y.Length];
|
---|
205 | this.testPredBuf = new double[testY.Length];
|
---|
206 |
|
---|
207 | this.gradBuf = Enumerable.Range(0, MaxParams).Select(_ => new double[y.Length]).ToArray();
|
---|
208 | }
|
---|
209 |
|
---|
210 | #region IState inferface
|
---|
211 | public bool Done { get { return tree != null && tree.Done; } }
|
---|
212 |
|
---|
213 | public double BestSolutionTrainingQuality {
|
---|
214 | get {
|
---|
215 | evaluator.Exec(bestCode, x, bestConsts, predBuf);
|
---|
216 | return Rho(y, predBuf);
|
---|
217 | }
|
---|
218 | }
|
---|
219 |
|
---|
220 | public double BestSolutionTestQuality {
|
---|
221 | get {
|
---|
222 | testEvaluator.Exec(bestCode, testX, bestConsts, testPredBuf);
|
---|
223 | return Rho(testY, testPredBuf);
|
---|
224 | }
|
---|
225 | }
|
---|
226 |
|
---|
227 | // takes the code of the best solution and creates and equivalent symbolic regression models
|
---|
228 | public ISymbolicRegressionModel BestModel {
|
---|
229 | get {
|
---|
230 | var treeGen = new SymbolicExpressionTreeGenerator(problemData.AllowedInputVariables.ToArray());
|
---|
231 | var interpreter = new SymbolicDataAnalysisExpressionTreeLinearInterpreter();
|
---|
232 |
|
---|
233 | var t = new SymbolicExpressionTree(treeGen.Exec(bestCode, bestConsts, bestNParams, scalingFactor, scalingOffset));
|
---|
234 | var model = new SymbolicRegressionModel(problemData.TargetVariable, t, interpreter, lowerEstimationLimit, upperEstimationLimit);
|
---|
235 | model.Scale(problemData); // apply linear scaling
|
---|
236 | return model;
|
---|
237 | }
|
---|
238 | }
|
---|
239 | public IEnumerable<ISymbolicRegressionSolution> ParetoBestModels {
|
---|
240 | get { return paretoBestModels; }
|
---|
241 | }
|
---|
242 |
|
---|
243 | public int TotalRollouts { get { return totalRollouts; } }
|
---|
244 | public int EffectiveRollouts { get { return effectiveRollouts; } }
|
---|
245 | public int FuncEvaluations { get { return funcEvaluations; } }
|
---|
246 | public int GradEvaluations { get { return gradEvaluations; } } // number of gradient evaluations (* num parameters) to get a value representative of the effort comparable to the number of function evaluations
|
---|
247 |
|
---|
248 | #endregion
|
---|
249 |
|
---|
250 |
|
---|
251 | #if DEBUG
|
---|
252 | public string ExprStr(Automaton automaton) {
|
---|
253 | byte[] code;
|
---|
254 | int nParams;
|
---|
255 | automaton.GetCode(out code, out nParams);
|
---|
256 | var generator = new SymbolicExpressionTreeGenerator(problemData.AllowedInputVariables.ToArray());
|
---|
257 | var @params = Enumerable.Repeat(1.0, nParams).ToArray();
|
---|
258 | var root = generator.Exec(code, @params, nParams, null, null);
|
---|
259 | var formatter = new InfixExpressionFormatter();
|
---|
260 | return formatter.Format(new SymbolicExpressionTree(root));
|
---|
261 | }
|
---|
262 | #endif
|
---|
263 |
|
---|
264 | private double Eval(byte[] code, int nParams) {
|
---|
265 | double[] optConsts;
|
---|
266 | double q;
|
---|
267 | Eval(code, nParams, out q, out optConsts);
|
---|
268 |
|
---|
269 | // single objective best
|
---|
270 | if (q > bestR) {
|
---|
271 | bestR = q;
|
---|
272 | bestNParams = nParams;
|
---|
273 | this.bestCode = new byte[code.Length];
|
---|
274 | this.bestConsts = new double[bestNParams];
|
---|
275 |
|
---|
276 | Array.Copy(code, bestCode, code.Length);
|
---|
277 | Array.Copy(optConsts, bestConsts, bestNParams);
|
---|
278 | }
|
---|
279 | if (collectParetoOptimalModels) {
|
---|
280 | // multi-objective best
|
---|
281 | var complexity = // SymbolicDataAnalysisModelComplexityCalculator.CalculateComplexity() TODO: implement Kommenda's tree complexity directly in the evaluator
|
---|
282 | Array.FindIndex(code, (opc) => opc == (byte)OpCodes.Exit); // use length of expression as surrogate for complexity
|
---|
283 | UpdateParetoFront(q, complexity, code, optConsts, nParams, scalingFactor, scalingOffset);
|
---|
284 | }
|
---|
285 | return q;
|
---|
286 | }
|
---|
287 |
|
---|
288 | private void Eval(byte[] code, int nParams, out double rho, out double[] optConsts) {
|
---|
289 | // we make a first pass to determine a valid starting configuration for all constants
|
---|
290 | // constant c in log(c + f(x)) is adjusted to guarantee that x is positive (see expression evaluator)
|
---|
291 | // scale and offset are set to optimal starting configuration
|
---|
292 | // assumes scale is the first param and offset is the last param
|
---|
293 |
|
---|
294 | // reset constants
|
---|
295 | Array.Copy(ones, constsBuf, nParams);
|
---|
296 | evaluator.Exec(code, x, constsBuf, predBuf, adjustOffsetForLogAndExp: true);
|
---|
297 | funcEvaluations++;
|
---|
298 |
|
---|
299 | if (nParams == 0 || constOptIterations < 0) {
|
---|
300 | // if we don't need to optimize parameters then we are done
|
---|
301 | // changing scale and offset does not influence r²
|
---|
302 | rho = Rho(y, predBuf);
|
---|
303 | optConsts = constsBuf;
|
---|
304 | } else {
|
---|
305 | // optimize constants using the starting point calculated above
|
---|
306 | OptimizeConstsLm(code, constsBuf, nParams, 0.0, nIters: constOptIterations);
|
---|
307 |
|
---|
308 | evaluator.Exec(code, x, constsBuf, predBuf);
|
---|
309 | funcEvaluations++;
|
---|
310 |
|
---|
311 | rho = Rho(y, predBuf);
|
---|
312 | optConsts = constsBuf;
|
---|
313 | }
|
---|
314 | }
|
---|
315 |
|
---|
316 |
|
---|
317 |
|
---|
318 | #region helpers
|
---|
319 | private static double Rho(IEnumerable<double> x, IEnumerable<double> y) {
|
---|
320 | OnlineCalculatorError error;
|
---|
321 | double r = OnlinePearsonsRCalculator.Calculate(x, y, out error);
|
---|
322 | return error == OnlineCalculatorError.None ? r : 0.0;
|
---|
323 | }
|
---|
324 |
|
---|
325 |
|
---|
326 | private void OptimizeConstsLm(byte[] code, double[] consts, int nParams, double epsF = 0.0, int nIters = 100) {
|
---|
327 | double[] optConsts = new double[nParams]; // allocate a smaller buffer for constants opt (TODO perf?)
|
---|
328 | Array.Copy(consts, optConsts, nParams);
|
---|
329 |
|
---|
330 | // direct usage of LM is recommended in alglib manual for better performance than the lsfit interface (which uses lm internally).
|
---|
331 | alglib.minlmstate state;
|
---|
332 | alglib.minlmreport rep = null;
|
---|
333 | alglib.minlmcreatevj(y.Length + 1, optConsts, out state); // +1 for penalty term
|
---|
334 | // Using the change of the gradient as stopping criterion is recommended in alglib manual.
|
---|
335 | // However, the most recent version of alglib (as of Oct 2017) only supports epsX as stopping criterion
|
---|
336 | alglib.minlmsetcond(state, epsg: 1E-6 * yStdDev, epsf: epsF, epsx: 0.0, maxits: nIters);
|
---|
337 | // alglib.minlmsetgradientcheck(state, 1E-5);
|
---|
338 | alglib.minlmoptimize(state, Func, FuncAndJacobian, null, code);
|
---|
339 | alglib.minlmresults(state, out optConsts, out rep);
|
---|
340 | funcEvaluations += rep.nfunc;
|
---|
341 | gradEvaluations += rep.njac * nParams;
|
---|
342 |
|
---|
343 | if (rep.terminationtype < 0) throw new ArgumentException("lm failed: termination type = " + rep.terminationtype);
|
---|
344 |
|
---|
345 | // only use optimized constants if successful
|
---|
346 | if (rep.terminationtype >= 0) {
|
---|
347 | Array.Copy(optConsts, consts, optConsts.Length);
|
---|
348 | }
|
---|
349 | }
|
---|
350 |
|
---|
351 | private void Func(double[] arg, double[] fi, object obj) {
|
---|
352 | var code = (byte[])obj;
|
---|
353 | int n = predBuf.Length;
|
---|
354 | evaluator.Exec(code, x, arg, predBuf); // gradients are nParams x vLen
|
---|
355 | for (int r = 0; r < n; r++) {
|
---|
356 | var res = predBuf[r] - y[r];
|
---|
357 | fi[r] = res;
|
---|
358 | }
|
---|
359 |
|
---|
360 | var penaltyIdx = fi.Length - 1;
|
---|
361 | fi[penaltyIdx] = 0.0;
|
---|
362 | // calc length of parameter vector for regularization
|
---|
363 | var aa = 0.0;
|
---|
364 | for (int i = 0; i < arg.Length; i++) {
|
---|
365 | aa += arg[i] * arg[i];
|
---|
366 | }
|
---|
367 | if (lambda > 0 && aa > 0) {
|
---|
368 | // scale lambda using stdDev(y) to make the parameter independent of the scale of y
|
---|
369 | // scale lambda using n to make parameter independent of the number of training points
|
---|
370 | // take the root because LM squares the result
|
---|
371 | fi[penaltyIdx] = Math.Sqrt(n * lambda / yStdDev * aa);
|
---|
372 | }
|
---|
373 | }
|
---|
374 |
|
---|
375 | private void FuncAndJacobian(double[] arg, double[] fi, double[,] jac, object obj) {
|
---|
376 | int n = predBuf.Length;
|
---|
377 | int nParams = arg.Length;
|
---|
378 | var code = (byte[])obj;
|
---|
379 | evaluator.ExecGradient(code, x, arg, predBuf, gradBuf); // gradients are nParams x vLen
|
---|
380 | for (int r = 0; r < n; r++) {
|
---|
381 | var res = predBuf[r] - y[r];
|
---|
382 | fi[r] = res;
|
---|
383 |
|
---|
384 | for (int k = 0; k < nParams; k++) {
|
---|
385 | jac[r, k] = gradBuf[k][r];
|
---|
386 | }
|
---|
387 | }
|
---|
388 | // calc length of parameter vector for regularization
|
---|
389 | double aa = 0.0;
|
---|
390 | for (int i = 0; i < arg.Length; i++) {
|
---|
391 | aa += arg[i] * arg[i];
|
---|
392 | }
|
---|
393 |
|
---|
394 | var penaltyIdx = fi.Length - 1;
|
---|
395 | if (lambda > 0 && aa > 0) {
|
---|
396 | fi[penaltyIdx] = 0.0;
|
---|
397 | // scale lambda using stdDev(y) to make the parameter independent of the scale of y
|
---|
398 | // scale lambda using n to make parameter independent of the number of training points
|
---|
399 | // take the root because alglib LM squares the result
|
---|
400 | fi[penaltyIdx] = Math.Sqrt(n * lambda / yStdDev * aa);
|
---|
401 |
|
---|
402 | for (int i = 0; i < arg.Length; i++) {
|
---|
403 | jac[penaltyIdx, i] = 0.5 / fi[penaltyIdx] * 2 * n * lambda / yStdDev * arg[i];
|
---|
404 | }
|
---|
405 | } else {
|
---|
406 | fi[penaltyIdx] = 0.0;
|
---|
407 | for (int i = 0; i < arg.Length; i++) {
|
---|
408 | jac[penaltyIdx, i] = 0.0;
|
---|
409 | }
|
---|
410 | }
|
---|
411 | }
|
---|
412 |
|
---|
413 |
|
---|
414 | private void UpdateParetoFront(double q, int complexity, byte[] code, double[] param, int nParam,
|
---|
415 | double[] scalingFactor, double[] scalingOffset) {
|
---|
416 | double[] best = new double[2];
|
---|
417 | double[] cur = new double[2] { q, complexity };
|
---|
418 | bool[] max = new[] { true, false };
|
---|
419 | var isNonDominated = true;
|
---|
420 | foreach (var e in paretoFront) {
|
---|
421 | var domRes = DominationCalculator<int>.Dominates(cur, e, max, true);
|
---|
422 | if (domRes == DominationResult.IsDominated) {
|
---|
423 | isNonDominated = false;
|
---|
424 | break;
|
---|
425 | }
|
---|
426 | }
|
---|
427 | if (isNonDominated) {
|
---|
428 | paretoFront.Add(cur);
|
---|
429 |
|
---|
430 | // create model
|
---|
431 | var treeGen = new SymbolicExpressionTreeGenerator(problemData.AllowedInputVariables.ToArray());
|
---|
432 | var interpreter = new SymbolicDataAnalysisExpressionTreeLinearInterpreter();
|
---|
433 |
|
---|
434 | var t = new SymbolicExpressionTree(treeGen.Exec(code, param, nParam, scalingFactor, scalingOffset));
|
---|
435 | var model = new SymbolicRegressionModel(problemData.TargetVariable, t, interpreter, lowerEstimationLimit, upperEstimationLimit);
|
---|
436 | model.Scale(problemData); // apply linear scaling
|
---|
437 |
|
---|
438 | var sol = model.CreateRegressionSolution(this.problemData);
|
---|
439 | sol.Name = string.Format("{0:N5} {1}", q, complexity);
|
---|
440 |
|
---|
441 | paretoBestModels.Add(sol);
|
---|
442 | }
|
---|
443 | for (int i = paretoFront.Count - 2; i >= 0; i--) {
|
---|
444 | var @ref = paretoFront[i];
|
---|
445 | var domRes = DominationCalculator<int>.Dominates(cur, @ref, max, true);
|
---|
446 | if (domRes == DominationResult.Dominates) {
|
---|
447 | paretoFront.RemoveAt(i);
|
---|
448 | paretoBestModels.RemoveAt(i);
|
---|
449 | }
|
---|
450 | }
|
---|
451 | }
|
---|
452 |
|
---|
453 | #endregion
|
---|
454 |
|
---|
455 |
|
---|
456 | }
|
---|
457 |
|
---|
458 |
|
---|
459 | /// <summary>
|
---|
460 | /// Static method to initialize a state for the algorithm
|
---|
461 | /// </summary>
|
---|
462 | /// <param name="problemData">The problem data</param>
|
---|
463 | /// <param name="randSeed">Random seed.</param>
|
---|
464 | /// <param name="maxVariables">Maximum number of variable references that are allowed in the expression.</param>
|
---|
465 | /// <param name="scaleVariables">Optionally scale input variables to the interval [0..1] (recommended)</param>
|
---|
466 | /// <param name="constOptIterations">Maximum number of iterations for constants optimization (Levenberg-Marquardt)</param>
|
---|
467 | /// <param name="lambda">Penalty factor for regularization (0..inf.), small penalty disabled regularization.</param>
|
---|
468 | /// <param name="policy">Tree search policy (random, ucb, eps-greedy, ...)</param>
|
---|
469 | /// <param name="collectParameterOptimalModels">Optionally collect all Pareto-optimal solutions having minimal length and error.</param>
|
---|
470 | /// <param name="lowerEstimationLimit">Optionally limit the result of the expression to this lower value.</param>
|
---|
471 | /// <param name="upperEstimationLimit">Optionally limit the result of the expression to this upper value.</param>
|
---|
472 | /// <param name="allowProdOfVars">Allow products of expressions.</param>
|
---|
473 | /// <param name="allowExp">Allow expressions with exponentials.</param>
|
---|
474 | /// <param name="allowLog">Allow expressions with logarithms</param>
|
---|
475 | /// <param name="allowInv">Allow expressions with 1/x</param>
|
---|
476 | /// <param name="allowMultipleTerms">Allow expressions which are sums of multiple terms.</param>
|
---|
477 | /// <returns></returns>
|
---|
478 |
|
---|
479 | public static IState CreateState(IRegressionProblemData problemData, uint randSeed, int maxVariables = 3,
|
---|
480 | bool scaleVariables = true, int constOptIterations = -1, double lambda = 0.0,
|
---|
481 | bool collectParameterOptimalModels = false,
|
---|
482 | double lowerEstimationLimit = double.MinValue, double upperEstimationLimit = double.MaxValue,
|
---|
483 | bool allowProdOfVars = true,
|
---|
484 | bool allowExp = true,
|
---|
485 | bool allowLog = true,
|
---|
486 | bool allowInv = true,
|
---|
487 | bool allowMultipleTerms = false
|
---|
488 | ) {
|
---|
489 | return new State(problemData, randSeed, maxVariables, scaleVariables, constOptIterations, lambda,
|
---|
490 | collectParameterOptimalModels,
|
---|
491 | lowerEstimationLimit, upperEstimationLimit,
|
---|
492 | allowProdOfVars, allowExp, allowLog, allowInv, allowMultipleTerms);
|
---|
493 | }
|
---|
494 |
|
---|
495 | // returns the quality of the evaluated solution
|
---|
496 | public static double MakeStep(IState state) {
|
---|
497 | var mctsState = state as State;
|
---|
498 | if (mctsState == null) throw new ArgumentException("state");
|
---|
499 | if (mctsState.Done) throw new NotSupportedException("The tree search has enumerated all possible solutions.");
|
---|
500 |
|
---|
501 | return TreeSearch(mctsState);
|
---|
502 | }
|
---|
503 | #endregion
|
---|
504 |
|
---|
505 | private static double TreeSearch(State mctsState) {
|
---|
506 | var automaton = mctsState.automaton;
|
---|
507 | var tree = mctsState.tree;
|
---|
508 | var eval = mctsState.evalFun;
|
---|
509 | var rand = mctsState.random;
|
---|
510 | double q = 0;
|
---|
511 | bool success = false;
|
---|
512 | do {
|
---|
513 |
|
---|
514 | automaton.Reset();
|
---|
515 | success = TryTreeSearchRec2(rand, tree, automaton, eval, mctsState, out q);
|
---|
516 | mctsState.totalRollouts++;
|
---|
517 | } while (!success && !tree.Done);
|
---|
518 | if (success) {
|
---|
519 | mctsState.effectiveRollouts++;
|
---|
520 |
|
---|
521 | #if DEBUG
|
---|
522 | Console.WriteLine(mctsState.ExprStr(automaton));
|
---|
523 | #endif
|
---|
524 |
|
---|
525 | return q;
|
---|
526 | } else return 0.0;
|
---|
527 | }
|
---|
528 |
|
---|
529 | // search forward
|
---|
530 | private static bool TryTreeSearchRec2(IRandom rand, Tree tree, Automaton automaton,
|
---|
531 | Func<byte[], int, double> eval,
|
---|
532 | State state,
|
---|
533 | out double q) {
|
---|
534 | // ROLLOUT AND EXPANSION
|
---|
535 | // We are navigating a graph (states might be reached via different paths) instead of a tree.
|
---|
536 | // State equivalence is checked through ExprHash (based on the generated code through the path).
|
---|
537 |
|
---|
538 | // We switch between rollout-mode and expansion mode.
|
---|
539 | // Rollout-mode means we are navigating an existing path through the tree (using a rollout policy, e.g. UCB).
|
---|
540 | // Expansion mode means we expand the graph, creating new nodes and edges (using an expansion policy, e.g. shortest route to a complete expression).
|
---|
541 | // In expansion mode we might re-enter the graph and switch back to rollout-mode.
|
---|
542 | // We do this until we reach a complete expression (final state).
|
---|
543 |
|
---|
544 | // Loops in the graph are prevented by checking that the level of a child must be larger than the level of the parent.
|
---|
545 | // Sub-graphs which have been completely searched are marked as done.
|
---|
546 | // Roll-out could lead to a state where all follow-states are done. In this case we call the rollout ineffective.
|
---|
547 |
|
---|
548 | while (!automaton.IsFinalState(automaton.CurrentState)) {
|
---|
549 | // Console.WriteLine(automaton.stateNames[automaton.CurrentState]);
|
---|
550 | if (state.children.ContainsKey(tree)) {
|
---|
551 | if (state.children[tree].All(ch => ch.Done)) {
|
---|
552 | tree.Done = true;
|
---|
553 | break;
|
---|
554 | }
|
---|
555 | // ROLLOUT INSIDE TREE
|
---|
556 | // UCT selection within tree
|
---|
557 | int selectedIdx = 0;
|
---|
558 | if (state.children[tree].Count > 1) {
|
---|
559 | selectedIdx = SelectInternal(state.children[tree], rand);
|
---|
560 | }
|
---|
561 |
|
---|
562 | tree = state.children[tree][selectedIdx];
|
---|
563 |
|
---|
564 | // all steps where no alternatives could be taken immediately (without expanding the tree)
|
---|
565 | // TODO: simplification of the automaton
|
---|
566 | int[] possibleFollowStates = new int[1000];
|
---|
567 | int nFs;
|
---|
568 | automaton.FollowStates(automaton.CurrentState, ref possibleFollowStates, out nFs);
|
---|
569 | Debug.Assert(possibleFollowStates.Contains(tree.state));
|
---|
570 | automaton.Goto(tree.state);
|
---|
571 | } else {
|
---|
572 | // EXPAND
|
---|
573 | int[] possibleFollowStates = new int[1000];
|
---|
574 | int nFs;
|
---|
575 | string actionString = "";
|
---|
576 | automaton.FollowStates(automaton.CurrentState, ref possibleFollowStates, out nFs);
|
---|
577 |
|
---|
578 | if (nFs == 0) {
|
---|
579 | // stuck in a dead end (no final state and no allowed follow states)
|
---|
580 | tree.Done = true;
|
---|
581 | break;
|
---|
582 | }
|
---|
583 | var newChildren = new List<Tree>(nFs);
|
---|
584 | state.children.Add(tree, newChildren);
|
---|
585 | for (int i = 0; i < nFs; i++) {
|
---|
586 | Tree child = null;
|
---|
587 | // for selected states (EvalStates) we introduce state unification (detection of equivalent states)
|
---|
588 | if (automaton.IsEvalState(possibleFollowStates[i])) {
|
---|
589 | var hc = Hashcode(automaton);
|
---|
590 | hc = ((hc << 5) + hc) ^ (ulong)tree.state; // TODO fix unit test for structure enumeration
|
---|
591 | if (!state.nodes.TryGetValue(hc, out child)) {
|
---|
592 | // Console.WriteLine("New expression (hash: {0}, state: {1})", Hashcode(automaton), automaton.stateNames[possibleFollowStates[i]]);
|
---|
593 | child = new Tree() {
|
---|
594 | state = possibleFollowStates[i],
|
---|
595 | expr = actionString + automaton.GetActionString(automaton.CurrentState, possibleFollowStates[i]),
|
---|
596 | level = tree.level + 1
|
---|
597 | };
|
---|
598 | state.nodes.Add(hc, child);
|
---|
599 | }
|
---|
600 | // only allow forward edges (don't add the child if we would go back in the graph)
|
---|
601 | else if (child.level > tree.level) {
|
---|
602 | // Console.WriteLine("Existing expression (hash: {0}, state: {1})", Hashcode(automaton), automaton.stateNames[possibleFollowStates[i]]);
|
---|
603 | // whenever we join paths we need to propagate back the statistics of the existing node through the newly created link
|
---|
604 | // to all parents
|
---|
605 | BackpropagateStatistics(tree, state, child.visits);
|
---|
606 | } else {
|
---|
607 | // Console.WriteLine("Cycle (hash: {0}, state: {1})", Hashcode(automaton), automaton.stateNames[possibleFollowStates[i]]);
|
---|
608 | // prevent cycles
|
---|
609 | Debug.Assert(child.level <= tree.level);
|
---|
610 | child = null;
|
---|
611 | }
|
---|
612 | } else {
|
---|
613 | child = new Tree() {
|
---|
614 | state = possibleFollowStates[i],
|
---|
615 | expr = actionString + automaton.GetActionString(automaton.CurrentState, possibleFollowStates[i]),
|
---|
616 | level = tree.level + 1
|
---|
617 | };
|
---|
618 | }
|
---|
619 | if (child != null)
|
---|
620 | newChildren.Add(child);
|
---|
621 | }
|
---|
622 |
|
---|
623 | if (!newChildren.Any()) {
|
---|
624 | // stuck in a dead end (no final state and no allowed follow states)
|
---|
625 | tree.Done = true;
|
---|
626 | break;
|
---|
627 | }
|
---|
628 |
|
---|
629 | foreach (var ch in newChildren) {
|
---|
630 | if (!state.parents.ContainsKey(ch)) {
|
---|
631 | state.parents.Add(ch, new List<Tree>());
|
---|
632 | }
|
---|
633 | state.parents[ch].Add(tree);
|
---|
634 | }
|
---|
635 |
|
---|
636 |
|
---|
637 | // follow one of the children
|
---|
638 | tree = SelectStateLeadingToFinal(automaton, tree, rand, state);
|
---|
639 | automaton.Goto(tree.state);
|
---|
640 | }
|
---|
641 | }
|
---|
642 |
|
---|
643 | bool success;
|
---|
644 |
|
---|
645 | // EVALUATE TREE
|
---|
646 | if (!tree.Done && automaton.IsFinalState(automaton.CurrentState)) {
|
---|
647 | tree.Done = true;
|
---|
648 | tree.expr = state.ExprStr(automaton);
|
---|
649 | byte[] code; int nParams;
|
---|
650 | automaton.GetCode(out code, out nParams);
|
---|
651 | q = eval(code, nParams);
|
---|
652 | success = true;
|
---|
653 | BackpropagateQuality(tree, q, state);
|
---|
654 | } else {
|
---|
655 | // we got stuck in roll-out (not evaluation necessary!)
|
---|
656 | q = 0.0;
|
---|
657 | success = false;
|
---|
658 | }
|
---|
659 |
|
---|
660 | // RECURSIVELY BACKPROPAGATE RESULTS TO ALL PARENTS
|
---|
661 | // Update statistics
|
---|
662 | // Set branch to done if all children are done.
|
---|
663 | BackpropagateDone(tree, state);
|
---|
664 | BackpropagateDebugStats(tree, q, state);
|
---|
665 |
|
---|
666 |
|
---|
667 | return success;
|
---|
668 | }
|
---|
669 |
|
---|
670 | private static int SelectInternal(List<Tree> list, IRandom rand) {
|
---|
671 | Debug.Assert(list.Any(t => !t.Done));
|
---|
672 |
|
---|
673 | // check if there is any node which has not been visited
|
---|
674 | for(int i=0;i<list.Count;i++) {
|
---|
675 | if (!list[i].Done && list[i].visits == 0) return i;
|
---|
676 | }
|
---|
677 |
|
---|
678 | // choose a random node.
|
---|
679 | var idx = rand.Next(list.Count);
|
---|
680 | while (list[idx].Done) { idx = rand.Next(list.Count); }
|
---|
681 | return idx;
|
---|
682 | }
|
---|
683 |
|
---|
684 | // backpropagate existing statistics to all parents
|
---|
685 | private static void BackpropagateStatistics(Tree tree, State state, int numVisits) {
|
---|
686 | tree.visits += numVisits;
|
---|
687 |
|
---|
688 | if (state.parents.ContainsKey(tree)) {
|
---|
689 | foreach (var parent in state.parents[tree]) {
|
---|
690 | BackpropagateStatistics(parent, state, numVisits);
|
---|
691 | }
|
---|
692 | }
|
---|
693 | }
|
---|
694 |
|
---|
695 | private static ulong Hashcode(Automaton automaton) {
|
---|
696 | byte[] code;
|
---|
697 | int nParams;
|
---|
698 | automaton.GetCode(out code, out nParams);
|
---|
699 | return (ulong)ExprHashSymbolic.GetHash(code, nParams);
|
---|
700 | }
|
---|
701 |
|
---|
702 | private static void BackpropagateQuality(Tree tree, double q, State state) {
|
---|
703 | tree.visits++;
|
---|
704 | // TODO: q is ignored for now
|
---|
705 |
|
---|
706 | if (state.parents.ContainsKey(tree)) {
|
---|
707 | foreach (var parent in state.parents[tree]) {
|
---|
708 | BackpropagateQuality(parent, q, state);
|
---|
709 | }
|
---|
710 | }
|
---|
711 | }
|
---|
712 |
|
---|
713 | private static void BackpropagateDone(Tree tree, State state) {
|
---|
714 | if (state.children.ContainsKey(tree) && state.children[tree].All(ch => ch.Done)) {
|
---|
715 | tree.Done = true;
|
---|
716 | // children[tree] = null; keep all nodes
|
---|
717 | }
|
---|
718 |
|
---|
719 | if (state.parents.ContainsKey(tree)) {
|
---|
720 | foreach (var parent in state.parents[tree]) {
|
---|
721 | BackpropagateDone(parent, state);
|
---|
722 | }
|
---|
723 | }
|
---|
724 | }
|
---|
725 |
|
---|
726 | private static void BackpropagateDebugStats(Tree tree, double q, State state) {
|
---|
727 | if (state.parents.ContainsKey(tree)) {
|
---|
728 | foreach (var parent in state.parents[tree]) {
|
---|
729 | BackpropagateDebugStats(parent, q, state);
|
---|
730 | }
|
---|
731 | }
|
---|
732 |
|
---|
733 | }
|
---|
734 |
|
---|
735 | private static Tree SelectStateLeadingToFinal(Automaton automaton, Tree tree, IRandom rand, State state) {
|
---|
736 | // find the child with the smallest state value (smaller values are closer to the final state)
|
---|
737 | int selectedChildIdx = 0;
|
---|
738 | var children = state.children[tree];
|
---|
739 | Tree minChild = children.First();
|
---|
740 | for (int i = 1; i < children.Count; i++) {
|
---|
741 | if (children[i].state < minChild.state)
|
---|
742 | selectedChildIdx = i;
|
---|
743 | }
|
---|
744 | return children[selectedChildIdx];
|
---|
745 | }
|
---|
746 |
|
---|
747 | // scales data and extracts values from dataset into arrays
|
---|
748 | private static void GenerateData(IRegressionProblemData problemData, bool scaleVariables, IEnumerable<int> rows,
|
---|
749 | out double[][] xs, out double[] y, out double[] scalingFactor, out double[] scalingOffset) {
|
---|
750 | xs = new double[problemData.AllowedInputVariables.Count()][];
|
---|
751 |
|
---|
752 | var i = 0;
|
---|
753 | if (scaleVariables) {
|
---|
754 | scalingFactor = new double[xs.Length + 1];
|
---|
755 | scalingOffset = new double[xs.Length + 1];
|
---|
756 | } else {
|
---|
757 | scalingFactor = null;
|
---|
758 | scalingOffset = null;
|
---|
759 | }
|
---|
760 | foreach (var var in problemData.AllowedInputVariables) {
|
---|
761 | if (scaleVariables) {
|
---|
762 | var minX = problemData.Dataset.GetDoubleValues(var, rows).Min();
|
---|
763 | var maxX = problemData.Dataset.GetDoubleValues(var, rows).Max();
|
---|
764 | var range = maxX - minX;
|
---|
765 |
|
---|
766 | // scaledX = (x - min) / range
|
---|
767 | var sf = 1.0 / range;
|
---|
768 | var offset = -minX / range;
|
---|
769 | scalingFactor[i] = sf;
|
---|
770 | scalingOffset[i] = offset;
|
---|
771 | i++;
|
---|
772 | }
|
---|
773 | }
|
---|
774 |
|
---|
775 | if (scaleVariables) {
|
---|
776 | // transform target variable to zero-mean
|
---|
777 | scalingFactor[i] = 1.0;
|
---|
778 | scalingOffset[i] = -problemData.Dataset.GetDoubleValues(problemData.TargetVariable, rows).Average();
|
---|
779 | }
|
---|
780 |
|
---|
781 | GenerateData(problemData, rows, scalingFactor, scalingOffset, out xs, out y);
|
---|
782 | }
|
---|
783 |
|
---|
784 | // extract values from dataset into arrays
|
---|
785 | private static void GenerateData(IRegressionProblemData problemData, IEnumerable<int> rows, double[] scalingFactor, double[] scalingOffset,
|
---|
786 | out double[][] xs, out double[] y) {
|
---|
787 | xs = new double[problemData.AllowedInputVariables.Count()][];
|
---|
788 |
|
---|
789 | int i = 0;
|
---|
790 | foreach (var var in problemData.AllowedInputVariables) {
|
---|
791 | var sf = scalingFactor == null ? 1.0 : scalingFactor[i];
|
---|
792 | var offset = scalingFactor == null ? 0.0 : scalingOffset[i];
|
---|
793 | xs[i++] =
|
---|
794 | problemData.Dataset.GetDoubleValues(var, rows).Select(xi => xi * sf + offset).ToArray();
|
---|
795 | }
|
---|
796 |
|
---|
797 | {
|
---|
798 | var sf = scalingFactor == null ? 1.0 : scalingFactor[i];
|
---|
799 | var offset = scalingFactor == null ? 0.0 : scalingOffset[i];
|
---|
800 | y = problemData.Dataset.GetDoubleValues(problemData.TargetVariable, rows).Select(yi => yi * sf + offset).ToArray();
|
---|
801 | }
|
---|
802 | }
|
---|
803 |
|
---|
804 | // for debugging only
|
---|
805 | #region debugging
|
---|
806 |
|
---|
807 | private static string TraceTree(Tree tree, State state) {
|
---|
808 | var sb = new StringBuilder();
|
---|
809 | sb.Append(
|
---|
810 | @"digraph {
|
---|
811 | ratio = fill;
|
---|
812 | node [style=filled];
|
---|
813 | ");
|
---|
814 | int nodeId = 0;
|
---|
815 |
|
---|
816 | TraceTreeRec(tree, 0, sb, ref nodeId, state);
|
---|
817 | sb.Append("}");
|
---|
818 | return sb.ToString();
|
---|
819 | }
|
---|
820 |
|
---|
821 | private static void TraceTreeRec(Tree tree, int parentId, StringBuilder sb, ref int nextId, State state) {
|
---|
822 | var tries = tree.visits;
|
---|
823 |
|
---|
824 | sb.AppendFormat("{0} [label=\"{1}\"]; ", parentId, tries).AppendLine();
|
---|
825 |
|
---|
826 | var list = new List<Tuple<int, int, Tree>>();
|
---|
827 | if (state.children.ContainsKey(tree)) {
|
---|
828 | foreach (var ch in state.children[tree]) {
|
---|
829 | nextId++;
|
---|
830 | tries = ch.visits;
|
---|
831 | sb.AppendFormat("{0} [label=\"{1}\"]; ", nextId, tries).AppendLine();
|
---|
832 | sb.AppendFormat("{0} -> {1} [label=\"{2}\"]", parentId, nextId, ch.expr).AppendLine();
|
---|
833 | list.Add(Tuple.Create(tries, nextId, ch));
|
---|
834 | }
|
---|
835 |
|
---|
836 | foreach (var tup in list) {
|
---|
837 | var ch = tup.Item3;
|
---|
838 | var chId = tup.Item2;
|
---|
839 | if (state.children.ContainsKey(ch) && state.children[ch].Count == 1) {
|
---|
840 | var chch = state.children[ch].First();
|
---|
841 | nextId++;
|
---|
842 | tries = chch.visits;
|
---|
843 | sb.AppendFormat("{0} [label=\"{1}\"]; ", nextId, tries).AppendLine();
|
---|
844 | sb.AppendFormat("{0} -> {1} [label=\"{2}\"]", chId, nextId, chch.expr).AppendLine();
|
---|
845 | }
|
---|
846 | }
|
---|
847 |
|
---|
848 | foreach (var tup in list.OrderByDescending(t => t.Item1).Take(1)) {
|
---|
849 | TraceTreeRec(tup.Item3, tup.Item2, sb, ref nextId, state);
|
---|
850 | }
|
---|
851 | }
|
---|
852 | }
|
---|
853 |
|
---|
854 | private static string WriteTree(Tree tree, State state) {
|
---|
855 | var sb = new System.IO.StringWriter(System.Globalization.CultureInfo.InvariantCulture);
|
---|
856 | var nodeIds = new Dictionary<Tree, int>();
|
---|
857 | sb.Write(
|
---|
858 | @"digraph {
|
---|
859 | ratio = fill;
|
---|
860 | node [style=filled];
|
---|
861 | ");
|
---|
862 | int threshold = /* state.nodes.Count > 500 ? 10 : */ 0;
|
---|
863 | foreach (var kvp in state.children) {
|
---|
864 | var parent = kvp.Key;
|
---|
865 | int parentId;
|
---|
866 | if (!nodeIds.TryGetValue(parent, out parentId)) {
|
---|
867 | parentId = nodeIds.Count + 1;
|
---|
868 | var tries = parent.visits;
|
---|
869 | if (tries > threshold)
|
---|
870 | sb.Write("{0} [label=\"{1}\"]; ", parentId, tries);
|
---|
871 | nodeIds.Add(parent, parentId);
|
---|
872 | }
|
---|
873 | foreach (var child in kvp.Value) {
|
---|
874 | int childId;
|
---|
875 | if (!nodeIds.TryGetValue(child, out childId)) {
|
---|
876 | childId = nodeIds.Count + 1;
|
---|
877 | nodeIds.Add(child, childId);
|
---|
878 | }
|
---|
879 | var tries = child.visits;
|
---|
880 | if (tries < 1) continue;
|
---|
881 | if (tries > threshold) {
|
---|
882 | sb.Write("{0} [label=\"{1}\"]; ", childId, tries);
|
---|
883 | var edgeLabel = child.expr;
|
---|
884 | // if (parent.expr.Length > 0) edgeLabel = edgeLabel.Replace(parent.expr, "");
|
---|
885 | sb.Write("{0} -> {1} [label=\"{2}\"]", parentId, childId, edgeLabel);
|
---|
886 | }
|
---|
887 | }
|
---|
888 | }
|
---|
889 |
|
---|
890 | sb.Write("}");
|
---|
891 | return sb.ToString();
|
---|
892 | }
|
---|
893 | #endregion
|
---|
894 | }
|
---|
895 | }
|
---|