Free cookie consent management tool by TermsFeed Policy Generator

source: branches/MCTS-SymbReg-2796/HeuristicLab.Algorithms.DataAnalysis/3.4/MctsSymbolicRegression/MctsSymbolicRegressionAlgorithm.cs @ 15416

Last change on this file since 15416 was 15416, checked in by gkronber, 7 years ago

#2796 worked on MCTS for symbreg

File size: 17.6 KB
Line 
1#region License Information
2/* HeuristicLab
3 * Copyright (C) 2002-2016 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
4 *
5 * This file is part of HeuristicLab.
6 *
7 * HeuristicLab is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * HeuristicLab is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with HeuristicLab. If not, see <http://www.gnu.org/licenses/>.
19 */
20#endregion
21
22using System;
23using System.Linq;
24using System.Threading;
25using HeuristicLab.Algorithms.DataAnalysis.MctsSymbolicRegression.Policies;
26using HeuristicLab.Analysis;
27using HeuristicLab.Common;
28using HeuristicLab.Core;
29using HeuristicLab.Data;
30using HeuristicLab.Optimization;
31using HeuristicLab.Parameters;
32using HeuristicLab.Persistence.Default.CompositeSerializers.Storable;
33using HeuristicLab.Problems.DataAnalysis;
34using HeuristicLab.Problems.DataAnalysis.Symbolic.Regression;
35
36namespace HeuristicLab.Algorithms.DataAnalysis.MctsSymbolicRegression {
37  // TODO: support pause (persisting/cloning the state)
38  [Item("MCTS Symbolic Regression", "Monte carlo tree search for symbolic regression.")]
39  [StorableClass]
40  [Creatable(CreatableAttribute.Categories.DataAnalysisRegression, Priority = 250)]
41  public class MctsSymbolicRegressionAlgorithm : FixedDataAnalysisAlgorithm<IRegressionProblem> {
42
43    #region ParameterNames
44    private const string IterationsParameterName = "Iterations";
45    private const string MaxVariablesParameterName = "Maximum variables";
46    private const string ScaleVariablesParameterName = "Scale variables";
47    private const string AllowedFactorsParameterName = "Allowed factors";
48    private const string ConstantOptimizationIterationsParameterName = "Iterations (constant optimization)";
49    private const string PolicyParameterName = "Policy";
50    private const string SeedParameterName = "Seed";
51    private const string SetSeedRandomlyParameterName = "SetSeedRandomly";
52    private const string UpdateIntervalParameterName = "UpdateInterval";
53    private const string CreateSolutionParameterName = "CreateSolution";
54    private const string PunishmentFactorParameterName = "PunishmentFactor";
55    private const string CollectParetoOptimalSolutionsParameterName = "CollectParetoOptimalSolutions";
56    private const string LambdaParameterName = "Lambda";
57
58    private const string VariableProductFactorName = "x * y * ...";
59    private const string ExpFactorName = "exp(c * x * y ...)";
60    private const string LogFactorName = "log(c + c1 x + c2 x + ...)";
61    private const string InvFactorName = "1 / (1 + c1 x + c2 x + ...)";
62    private const string FactorSumsName = "t1(x) + t2(x) + ... ";
63    #endregion
64
65    #region ParameterProperties
66    public IFixedValueParameter<IntValue> IterationsParameter {
67      get { return (IFixedValueParameter<IntValue>)Parameters[IterationsParameterName]; }
68    }
69    public IFixedValueParameter<IntValue> MaxVariableReferencesParameter {
70      get { return (IFixedValueParameter<IntValue>)Parameters[MaxVariablesParameterName]; }
71    }
72    public IFixedValueParameter<BoolValue> ScaleVariablesParameter {
73      get { return (IFixedValueParameter<BoolValue>)Parameters[ScaleVariablesParameterName]; }
74    }
75    public IFixedValueParameter<IntValue> ConstantOptimizationIterationsParameter {
76      get { return (IFixedValueParameter<IntValue>)Parameters[ConstantOptimizationIterationsParameterName]; }
77    }
78    public IValueParameter<IPolicy> PolicyParameter {
79      get { return (IValueParameter<IPolicy>)Parameters[PolicyParameterName]; }
80    }
81    public IFixedValueParameter<DoubleValue> PunishmentFactorParameter {
82      get { return (IFixedValueParameter<DoubleValue>)Parameters[PunishmentFactorParameterName]; }
83    }
84    public IValueParameter<ICheckedItemList<StringValue>> AllowedFactorsParameter {
85      get { return (IValueParameter<ICheckedItemList<StringValue>>)Parameters[AllowedFactorsParameterName]; }
86    }
87    public IFixedValueParameter<IntValue> SeedParameter {
88      get { return (IFixedValueParameter<IntValue>)Parameters[SeedParameterName]; }
89    }
90    public FixedValueParameter<BoolValue> SetSeedRandomlyParameter {
91      get { return (FixedValueParameter<BoolValue>)Parameters[SetSeedRandomlyParameterName]; }
92    }
93    public IFixedValueParameter<IntValue> UpdateIntervalParameter {
94      get { return (IFixedValueParameter<IntValue>)Parameters[UpdateIntervalParameterName]; }
95    }
96    public IFixedValueParameter<BoolValue> CreateSolutionParameter {
97      get { return (IFixedValueParameter<BoolValue>)Parameters[CreateSolutionParameterName]; }
98    }
99    public IFixedValueParameter<BoolValue> CollectParetoOptimalSolutionsParameter {
100      get { return (IFixedValueParameter<BoolValue>)Parameters[CollectParetoOptimalSolutionsParameterName]; }
101    }
102    public IFixedValueParameter<DoubleValue> LambdaParameter {
103      get { return (IFixedValueParameter<DoubleValue>)Parameters[LambdaParameterName]; }
104    }
105    #endregion
106
107    #region Properties
108    public int Iterations {
109      get { return IterationsParameter.Value.Value; }
110      set { IterationsParameter.Value.Value = value; }
111    }
112    public int Seed {
113      get { return SeedParameter.Value.Value; }
114      set { SeedParameter.Value.Value = value; }
115    }
116    public bool SetSeedRandomly {
117      get { return SetSeedRandomlyParameter.Value.Value; }
118      set { SetSeedRandomlyParameter.Value.Value = value; }
119    }
120    public int MaxVariableReferences {
121      get { return MaxVariableReferencesParameter.Value.Value; }
122      set { MaxVariableReferencesParameter.Value.Value = value; }
123    }
124    public IPolicy Policy {
125      get { return PolicyParameter.Value; }
126      set { PolicyParameter.Value = value; }
127    }
128    public double PunishmentFactor {
129      get { return PunishmentFactorParameter.Value.Value; }
130      set { PunishmentFactorParameter.Value.Value = value; }
131    }
132    public ICheckedItemList<StringValue> AllowedFactors {
133      get { return AllowedFactorsParameter.Value; }
134    }
135    public int ConstantOptimizationIterations {
136      get { return ConstantOptimizationIterationsParameter.Value.Value; }
137      set { ConstantOptimizationIterationsParameter.Value.Value = value; }
138    }
139    public bool ScaleVariables {
140      get { return ScaleVariablesParameter.Value.Value; }
141      set { ScaleVariablesParameter.Value.Value = value; }
142    }
143    public bool CreateSolution {
144      get { return CreateSolutionParameter.Value.Value; }
145      set { CreateSolutionParameter.Value.Value = value; }
146    }
147    public bool CollectParetoOptimalSolutions {
148      get { return CollectParetoOptimalSolutionsParameter.Value.Value; }
149      set { CollectParetoOptimalSolutionsParameter.Value.Value = value; }
150    }
151    public double Lambda {
152      get { return LambdaParameter.Value.Value; }
153      set { LambdaParameter.Value.Value = value; }
154    }
155    #endregion
156
157    [StorableConstructor]
158    protected MctsSymbolicRegressionAlgorithm(bool deserializing) : base(deserializing) { }
159
160    protected MctsSymbolicRegressionAlgorithm(MctsSymbolicRegressionAlgorithm original, Cloner cloner)
161      : base(original, cloner) {
162    }
163
164    public override IDeepCloneable Clone(Cloner cloner) {
165      return new MctsSymbolicRegressionAlgorithm(this, cloner);
166    }
167
168    public MctsSymbolicRegressionAlgorithm() {
169      Problem = new RegressionProblem(); // default problem
170
171      var defaultFactorsList = new CheckedItemList<StringValue>(
172        new string[] { VariableProductFactorName, ExpFactorName, LogFactorName, InvFactorName, FactorSumsName }
173        .Select(s => new StringValue(s).AsReadOnly())
174        ).AsReadOnly();
175      defaultFactorsList.SetItemCheckedState(defaultFactorsList.First(s => s.Value == FactorSumsName), false);
176
177      Parameters.Add(new FixedValueParameter<IntValue>(IterationsParameterName,
178        "Number of iterations", new IntValue(100000)));
179      Parameters.Add(new FixedValueParameter<IntValue>(SeedParameterName,
180        "The random seed used to initialize the new pseudo random number generator.", new IntValue(0)));
181      Parameters.Add(new FixedValueParameter<BoolValue>(SetSeedRandomlyParameterName,
182        "True if the random seed should be set to a random value, otherwise false.", new BoolValue(true)));
183      Parameters.Add(new FixedValueParameter<IntValue>(MaxVariablesParameterName,
184        "Maximal number of variables references in the symbolic regression models (multiple usages of the same variable are counted)", new IntValue(5)));
185      // Parameters.Add(new FixedValueParameter<DoubleValue>(CParameterName,
186      //   "Balancing parameter in UCT formula (0 < c < 1000). Small values: greedy search. Large values: enumeration. Default: 1.0", new DoubleValue(1.0)));
187      Parameters.Add(new ValueParameter<IPolicy>(PolicyParameterName,
188        "The policy to use for selecting nodes in MCTS (e.g. Ucb)", new Ucb()));
189      PolicyParameter.Hidden = true;
190      Parameters.Add(new ValueParameter<ICheckedItemList<StringValue>>(AllowedFactorsParameterName,
191        "Choose which expressions are allowed as factors in the model.", defaultFactorsList));
192
193      Parameters.Add(new FixedValueParameter<IntValue>(ConstantOptimizationIterationsParameterName,
194        "Number of iterations for constant optimization. A small number of iterations should be sufficient for most models. " +
195        "Set to 0 to let the algorithm stop automatically when it converges. Set to -1 to disable constants optimization.", new IntValue(10)));
196      Parameters.Add(new FixedValueParameter<BoolValue>(ScaleVariablesParameterName,
197        "Set to true to all input variables to the range [0..1]", new BoolValue(true)));
198      Parameters[ScaleVariablesParameterName].Hidden = true;
199      Parameters.Add(new FixedValueParameter<DoubleValue>(PunishmentFactorParameterName, "Estimations of models can be bounded. The estimation limits are calculated in the following way (lb = mean(y) - punishmentFactor*range(y), ub = mean(y) + punishmentFactor*range(y))", new DoubleValue(10)));
200      Parameters[PunishmentFactorParameterName].Hidden = true;
201      Parameters.Add(new FixedValueParameter<IntValue>(UpdateIntervalParameterName,
202        "Number of iterations until the results are updated", new IntValue(100)));
203      Parameters[UpdateIntervalParameterName].Hidden = true;
204      Parameters.Add(new FixedValueParameter<BoolValue>(CreateSolutionParameterName,
205        "Optionally produce a solution at the end of the run", new BoolValue(true)));
206      Parameters[CreateSolutionParameterName].Hidden = true;
207
208      Parameters.Add(new FixedValueParameter<BoolValue>(CollectParetoOptimalSolutionsParameterName,
209        "Optionally collect a set of Pareto-optimal solutions minimizing error and complexity.", new BoolValue(false)));
210      Parameters[CollectParetoOptimalSolutionsParameterName].Hidden = true;
211
212      Parameters.Add(new FixedValueParameter<DoubleValue>(LambdaParameterName,
213        "Lambda is the factor for the regularization term in the objective function (Obj = (y - f(x,p))² + lambda * |p|²)", new DoubleValue(0.0)));
214    }
215
216    [StorableHook(HookType.AfterDeserialization)]
217    private void AfterDeserialization() {
218    }
219
220    // TODO: support pause and restart
221    protected override void Run(CancellationToken cancellationToken) {
222      // Set up the algorithm
223      if (SetSeedRandomly) Seed = new System.Random().Next();
224      var collectPareto = CollectParetoOptimalSolutions;
225
226      // Set up the results display
227      var iterations = new IntValue(0);
228      Results.Add(new Result("Iterations", iterations));
229
230      var bestSolutionIteration = new IntValue(0);
231      Results.Add(new Result("Best solution iteration", bestSolutionIteration));
232
233      var table = new DataTable("Qualities");
234      table.Rows.Add(new DataRow("Best quality"));
235      table.Rows.Add(new DataRow("Current best quality"));
236      table.Rows.Add(new DataRow("Average quality"));
237      Results.Add(new Result("Qualities", table));
238
239      var bestQuality = new DoubleValue();
240      Results.Add(new Result("Best quality", bestQuality));
241
242      var curQuality = new DoubleValue();
243      Results.Add(new Result("Current best quality", curQuality));
244
245      var avgQuality = new DoubleValue();
246      Results.Add(new Result("Average quality", avgQuality));
247
248      var totalRollouts = new IntValue();
249      Results.Add(new Result("Total rollouts", totalRollouts));
250      var effRollouts = new IntValue();
251      Results.Add(new Result("Effective rollouts", effRollouts));
252      var funcEvals = new IntValue();
253      Results.Add(new Result("Function evaluations", funcEvals));
254      var gradEvals = new IntValue();
255      Results.Add(new Result("Gradient evaluations", gradEvals));
256
257      Result paretoBestModelsResult = new Result("ParetoBestModels", typeof(ItemList<ISymbolicRegressionSolution>));
258      if (collectPareto) {
259        Results.Add(paretoBestModelsResult);
260      }
261
262      // same as in SymbolicRegressionSingleObjectiveProblem
263      var y = Problem.ProblemData.Dataset.GetDoubleValues(Problem.ProblemData.TargetVariable,
264        Problem.ProblemData.TrainingIndices);
265      var avgY = y.Average();
266      var minY = y.Min();
267      var maxY = y.Max();
268      var range = maxY - minY;
269      var lowerLimit = avgY - PunishmentFactor * range;
270      var upperLimit = avgY + PunishmentFactor * range;
271
272      // init
273      var problemData = (IRegressionProblemData)Problem.ProblemData.Clone();
274      if (!AllowedFactors.CheckedItems.Any()) throw new ArgumentException("At least on type of factor must be allowed");
275      var state = MctsSymbolicRegressionStatic.CreateState(problemData, (uint)Seed, MaxVariableReferences, ScaleVariables,
276        ConstantOptimizationIterations, Lambda,
277        Policy, collectPareto,
278        lowerLimit, upperLimit,
279        allowProdOfVars: AllowedFactors.CheckedItems.Any(s => s.Value.Value == VariableProductFactorName),
280        allowExp: AllowedFactors.CheckedItems.Any(s => s.Value.Value == ExpFactorName),
281        allowLog: AllowedFactors.CheckedItems.Any(s => s.Value.Value == LogFactorName),
282        allowInv: AllowedFactors.CheckedItems.Any(s => s.Value.Value == InvFactorName),
283        allowMultipleTerms: AllowedFactors.CheckedItems.Any(s => s.Value.Value == FactorSumsName)
284        );
285
286      var updateInterval = UpdateIntervalParameter.Value.Value;
287      double sumQ = 0.0;
288      double bestQ = 0.0;
289      double curBestQ = 0.0;
290      int n = 0;
291
292      // cancelled before we acutally started
293      cancellationToken.ThrowIfCancellationRequested();
294
295      // Loop until iteration limit reached or canceled.
296      for (int i = 0; i < Iterations && !state.Done && !cancellationToken.IsCancellationRequested; i++) {
297        var q = MctsSymbolicRegressionStatic.MakeStep(state);
298        sumQ += q; // sum of qs in the last updateinterval iterations
299        curBestQ = Math.Max(q, curBestQ); // the best q in the last updateinterval iterations
300        bestQ = Math.Max(q, bestQ); // the best q overall
301        n++;
302        // iteration results
303        if (n == updateInterval) {
304          if (bestQ > bestQuality.Value) {
305            bestSolutionIteration.Value = i;
306            if (state.BestSolutionTrainingQuality > 0.99999) break;
307          }
308          bestQuality.Value = bestQ;
309          curQuality.Value = curBestQ;
310          avgQuality.Value = sumQ / n;
311          sumQ = 0.0;
312          curBestQ = 0.0;
313
314          funcEvals.Value = state.FuncEvaluations;
315          gradEvals.Value = state.GradEvaluations;
316          effRollouts.Value = state.EffectiveRollouts;
317          totalRollouts.Value = state.TotalRollouts;
318
319          if (collectPareto) {
320            paretoBestModelsResult.Value = new ItemList<ISymbolicRegressionSolution>(state.ParetoBestModels);
321          }
322
323          table.Rows["Best quality"].Values.Add(bestQuality.Value);
324          table.Rows["Current best quality"].Values.Add(curQuality.Value);
325          table.Rows["Average quality"].Values.Add(avgQuality.Value);
326          iterations.Value += n;
327          n = 0;
328        }
329      }
330
331      // final results (assumes that at least one iteration was calculated)
332      if (n > 0) {       
333        if (bestQ > bestQuality.Value) {
334          bestSolutionIteration.Value = iterations.Value + n;
335        }
336        bestQuality.Value = bestQ;
337        curQuality.Value = curBestQ;
338        avgQuality.Value = sumQ / n;
339
340        funcEvals.Value = state.FuncEvaluations;
341        gradEvals.Value = state.GradEvaluations;
342        effRollouts.Value = state.EffectiveRollouts;
343        totalRollouts.Value = state.TotalRollouts;
344
345        table.Rows["Best quality"].Values.Add(bestQuality.Value);
346        table.Rows["Current best quality"].Values.Add(curQuality.Value);
347        table.Rows["Average quality"].Values.Add(avgQuality.Value);
348        iterations.Value = iterations.Value + n;
349
350      }
351
352
353      Results.Add(new Result("Best solution quality (train)", new DoubleValue(state.BestSolutionTrainingQuality)));
354      Results.Add(new Result("Best solution quality (test)", new DoubleValue(state.BestSolutionTestQuality)));
355
356
357      // produce solution
358      if (CreateSolution) {
359        var model = state.BestModel;
360
361        // otherwise we produce a regression solution
362        Results.Add(new Result("Solution", model.CreateRegressionSolution(problemData)));
363      }
364    }
365  }
366}
Note: See TracBrowser for help on using the repository browser.