Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Problems.GrammaticalOptimization/Main/Program.cs @ 11977

Last change on this file since 11977 was 11977, checked in by gkronber, 9 years ago

#2283 commit for 'realistic' (same settings for ant and symbreg) experiment

File size: 22.6 KB
Line 
1using System;
2using System.Collections.Generic;
3using System.Diagnostics;
4using System.Globalization;
5using System.Runtime.Remoting.Messaging;
6using System.Text;
7using System.Threading;
8using System.Threading.Tasks;
9using HeuristicLab.Algorithms.Bandits;
10using HeuristicLab.Algorithms.Bandits.BanditPolicies;
11using HeuristicLab.Algorithms.Bandits.GrammarPolicies;
12using HeuristicLab.Algorithms.Bandits.Models;
13using HeuristicLab.Algorithms.GeneticProgramming;
14using HeuristicLab.Algorithms.GrammaticalOptimization;
15using HeuristicLab.Common;
16using HeuristicLab.Problems.GrammaticalOptimization;
17using HeuristicLab.Problems.GrammaticalOptimization.SymbReg;
18using BoltzmannExplorationPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.BoltzmannExplorationPolicy;
19using EpsGreedyPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.EpsGreedyPolicy;
20using IProblem = HeuristicLab.Problems.GrammaticalOptimization.IProblem;
21using RandomPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.RandomPolicy;
22using UCTPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.UCTPolicy;
23
24namespace Main {
25  class Program {
26    static void Main(string[] args) {
27      CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;
28
29      //RunDemo();
30      //RunGpDemo();
31      // RunGridTest();
32      //RunGpGridTest();
33      RunFunApproxTest();
34    }
35
36    private static void RunGridTest() {
37      int maxIterations = 200000; // for poly-10 with 50000 evaluations no successful try with hl yet
38      //var globalRandom = new Random(31415);
39      var localRandSeed = new Random().Next();
40      var reps = 20;
41
42      var policyFactories = new Func<IBanditPolicy>[]
43        {
44         //() => new RandomPolicy(),
45         // () => new ActiveLearningPolicy(), 
46         //() => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
47         //() => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
48         //() => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
49         //() => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
50         ////() => new GaussianThompsonSamplingPolicy(),
51         //() => new GaussianThompsonSamplingPolicy(true),
52         //() => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)),
53         //() => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)),
54         ////() => new BernoulliThompsonSamplingPolicy(),
55         //() => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)),
56         //() => new EpsGreedyPolicy(0.01),
57         //() => new EpsGreedyPolicy(0.05),
58         //() => new EpsGreedyPolicy(0.1),
59         //() => new EpsGreedyPolicy(0.2),
60         //() => new EpsGreedyPolicy(0.5),
61         //() => new UCTPolicy(0.01),
62         //() => new UCTPolicy(0.05),
63         //() => new UCTPolicy(0.1),
64         //() => new UCTPolicy(0.5),
65         //() => new UCTPolicy(1),
66         //() => new UCTPolicy(2),
67         //() => new UCTPolicy( 5),
68         //() => new UCTPolicy( 10),
69         //() => new ModifiedUCTPolicy(0.01),
70         //() => new ModifiedUCTPolicy(0.05),
71         //() => new ModifiedUCTPolicy(0.1),
72         //() => new ModifiedUCTPolicy(0.5),
73         //() => new ModifiedUCTPolicy(1),
74         //() => new ModifiedUCTPolicy(2),
75         //() => new ModifiedUCTPolicy( 5),
76         //() => new ModifiedUCTPolicy( 10),
77         //() => new UCB1Policy(),
78         //() => new UCB1TunedPolicy(),
79         //() => new UCBNormalPolicy(),
80         //() => new BoltzmannExplorationPolicy(1),
81         //() => new BoltzmannExplorationPolicy(10),
82         //() => new BoltzmannExplorationPolicy(20),
83         //() => new BoltzmannExplorationPolicy(100),
84         //() => new BoltzmannExplorationPolicy(200),
85         //() => new BoltzmannExplorationPolicy(500),
86         // () => new ChernoffIntervalEstimationPolicy( 0.01),
87         // () => new ChernoffIntervalEstimationPolicy( 0.05),
88         // () => new ChernoffIntervalEstimationPolicy( 0.1),
89         // () => new ChernoffIntervalEstimationPolicy( 0.2),
90         //() => new ThresholdAscentPolicy(5, 0.01),
91         //() => new ThresholdAscentPolicy(5, 0.05),
92         //() => new ThresholdAscentPolicy(5, 0.1),
93         //() => new ThresholdAscentPolicy(5, 0.2),
94         //() => new ThresholdAscentPolicy(10, 0.01),
95         //() => new ThresholdAscentPolicy(10, 0.05),
96         //() => new ThresholdAscentPolicy(10, 0.1),
97         //() => new ThresholdAscentPolicy(10, 0.2),
98         //() => new ThresholdAscentPolicy(50, 0.01),
99         //() => new ThresholdAscentPolicy(50, 0.05),
100         //() => new ThresholdAscentPolicy(50, 0.1),
101         //() => new ThresholdAscentPolicy(50, 0.2),
102         //() => new ThresholdAscentPolicy(100, 0.01),
103         () => new ThresholdAscentPolicy(100, 0.05),
104         //() => new ThresholdAscentPolicy(100, 0.1),
105         //() => new ThresholdAscentPolicy(100, 0.2),
106         //() => new ThresholdAscentPolicy(500, 0.01),
107         //() => new ThresholdAscentPolicy(500, 0.05),
108         //() => new ThresholdAscentPolicy(500, 0.1),
109         //() => new ThresholdAscentPolicy(500, 0.2),
110         //() => new ThresholdAscentPolicy(5000, 0.01),
111         //() => new ThresholdAscentPolicy(10000, 0.01),
112        };
113
114      var instanceFactories = new Func<Random, Tuple<IProblem, int>>[]
115      {
116        //(rand) => Tuple.Create((IProblem)new SantaFeAntProblem(), 17),
117        //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:0, correctReward:1, decoyReward:0, phrasesAsSets:false ), 15),
118        //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:0, correctReward:1, decoyReward:0, phrasesAsSets:true ), 15),
119        //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:200, correctReward:1, decoyReward:0.5, phrasesAsSets:false), 15),
120        //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:200, correctReward:1, decoyReward:0.5, phrasesAsSets:true), 15),
121        (rand) => Tuple.Create((IProblem)new SymbolicRegressionPoly10Problem(), 23)
122      };
123
124      foreach (var instanceFactory in instanceFactories) {
125        foreach (var useCanonical in new bool[] { true /*, false */ }) {
126          foreach (var randomTries in new int[] { 1 /*, 1, 10 /*, /* 5, 100 /*, 500, 1000 */}) {
127            foreach (var policyFactory in policyFactories) {
128              var myRandomTries = randomTries;
129              var localRand = new Random(localRandSeed);
130              var options = new ParallelOptions();
131              options.MaxDegreeOfParallelism = 1;
132              Parallel.For(0, reps, options, (i) => {
133                Random myLocalRand;
134                lock (localRand)
135                  myLocalRand = new Random(localRand.Next());
136
137                int iterations = 0;
138                var globalStatistics = new SentenceSetStatistics();
139
140                // var problem = new SymbolicRegressionPoly10Problem();
141                // var problem = new SantaFeAntProblem();
142                //var problem = new PalindromeProblem();
143                //var problem = new HardPalindromeProblem();
144                //var problem = new RoyalPairProblem();
145                //var problem = new EvenParityProblem();
146                // var alg = new MctsSampler(problem.Item1, problem.Item2, myLocalRand, myRandomTries, policy());
147                var instance = instanceFactory(myLocalRand);
148                var problem = instance.Item1;
149                var maxLen = instance.Item2;
150                var alg = new SequentialSearch(problem, maxLen, myLocalRand, myRandomTries,
151                  new GenericGrammarPolicy(problem, policyFactory(), useCanonical));
152                // var alg = new SequentialSearch(problem, maxLen, myLocalRand,
153                //   myRandomTries,
154                //   new GenericFunctionApproximationGrammarPolicy(problem,
155                //     useCanonical));
156                //var alg = new ExhaustiveBreadthFirstSearch(problem, 25);
157                //var alg = new AlternativesContextSampler(problem, 25);
158
159                alg.SolutionEvaluated += (sentence, quality) => {
160                  iterations++;
161                  globalStatistics.AddSentence(sentence, quality);
162                  if (iterations % 1000 == 0) {
163                    Console.WriteLine("{0,3} {1,5} \"{2,25}\" {3} {4} {5}", i, myRandomTries, policyFactory(), useCanonical, problem.ToString(), globalStatistics);
164                  }
165                };
166                alg.FoundNewBestSolution += (sentence, quality) => {
167                  //Console.WriteLine("{0,5} {1,25} {2} {3}",
168                  //  myRandomTries, policyFactory(), useCanonical,
169                  //  globalStatistics);
170                };
171
172                alg.Run(maxIterations);
173              });
174            }
175          }
176        }
177      }
178    }
179
180    private static void RunDemo() {
181      // TODO: unify MCTS, TD and ContextMCTS Solvers (stateInfos)
182      // TODO: test with eps-greedy using max instead of average as value (seems to work well for symb-reg! explore further!)
183      // TODO: separate value function from policy
184      // TODO: warum funktioniert die alte Implementierung von GaussianThompson besser fÃŒr SantaFe als neue? Siehe Vergleich: alte vs. neue implementierung GaussianThompsonSampling
185      // TODO: why does GaussianThompsonSampling work so well with MCTS for the artificial ant problem?
186      // TODO: research thompson sampling for max bandit?
187      // TODO: verify TA implementation using example from the original paper     
188      // TODO: implement thompson sampling for gaussian mixture models
189      // TODO: gleichzeitige modellierung von transformierter zielvariable (y, 1/y, log(y), exp(y), sqrt(y), ...)
190      // TODO: vergleich bei complete-randomly möglichst kurze sÀtze generieren vs. einfach zufÀllig alternativen wÀhlen
191      // TODO: reward discounting (fÃŒr verÀnderliche reward distributions ÃŒber zeit). speziellen unit-test dafÃŒr erstellen
192      // TODO: constant optimization
193
194
195      int maxIterations = 1000000;
196      int iterations = 0;
197      var sw = new Stopwatch();
198
199      var globalStatistics = new SentenceSetStatistics();
200      var random = new Random();
201
202
203      //var problem = new RoyalSequenceProblem(random, 10, 30, 2, 1, 0);
204      // var phraseLen = 3;
205      // var numPhrases = 5;
206      // var problem = new RoyalPhraseSequenceProblem(random, 10, numPhrases, phraseLen: phraseLen, numCorrectPhrases: 1, correctReward: 1, incorrectReward: 0.0, phrasesAsSets: false);
207
208      //var phraseLen = 3;
209      //var numPhrases = 5;
210      //var problem = new FindPhrasesProblem(random, 10, numPhrases, phraseLen, numOptimalPhrases: numPhrases, numDecoyPhrases: 0, correctReward: 1.0, decoyReward: 0, phrasesAsSets: false);
211
212      // good results for symb-reg
213      // prev results: e.g. 10 randomtries and EpsGreedyPolicy(0.2, (aInfo)=>aInfo.MaxReward)
214      // 2015 01 19: grid test with canonical states:
215      // - EpsGreedyPolicy(0.20,max)
216      // - GenericThompsonSamplingPolicy("")
217      // - UCTPolicy(0.10) (5 of 5 runs, 35000 iters avg.), 10 successful runs of 10 with rand-tries 0, bei 40000 iters 9 / 10, bei 30000 1 / 10
218      // 2015 01 22: symb-reg: grid test on find-phrases problem showed good results for UCB1TunedPolicy and SequentialSearch with canonical states
219      // - symb-reg: consistent results with UCB1Tuned. finds optimal solution in ~50k iters (new GenericGrammarPolicy(problem, new UCB1TunedPolicy(), true));
220      // 2015 01 23: grid test with canonical states:
221      // - UCTPolicy(0.10) und UCBNormalPolicy 10/10 optimale Lösungen bei max. 50k iters, etwas schlechter: generic-thompson with variable sigma und bolzmannexploration (100)
222
223
224      // good results for artificial ant:
225      // prev results:
226      // - var alg = new MctsSampler(problem, 17, random, 1, (rand, numActions) => new ThresholdAscentPolicy(numActions, 500, 0.01));
227      // - GaussianModelWithUnknownVariance (and Q= 0.99-quantil) also works well for Ant
228      // 2015 01 19: grid test with canonical states (non-canonical slightly worse)
229      // - ant: Threshold Ascent (best 100, 0.01; all variants relatively good)
230      // - ant: Policies where the variance has a large weight compared to the mean? (Gaussian(compatible), Gaussian with fixed variance, UCT with large c, alle TA)
231      // - ant: UCB1Tuned with canonical states also works very well for the artificial ant! constistent solutions in less than 10k iters     
232
233      //var problem = new SymbolicRegressionPoly10Problem();
234      //var problem = new SantaFeAntProblem();
235      var problem = new SymbolicRegressionProblem(random, "Breiman");
236      //var problem = new PalindromeProblem();
237      //var problem = new HardPalindromeProblem();
238      //var problem = new RoyalPairProblem();
239      //var problem = new EvenParityProblem();
240      // symbreg length = 11 q = 0.824522210419616
241      //var alg = new MctsSampler(problem, 23, random, 0, new BoltzmannExplorationPolicy(100));
242      //var alg = new MctsSampler(problem, 23, random, 0, new EpsGreedyPolicy(0.1));
243      //var alg = new SequentialSearch(problem, 23, random, 0,
244      //  new HeuristicLab.Algorithms.Bandits.GrammarPolicies.QLearningGrammarPolicy(problem, new BoltzmannExplorationPolicy(10),
245      //    1, 1, true));
246      //var alg = new SequentialSearch(problem, 23, random, 0,
247      //  new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericContextualGrammarPolicy(problem, new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)), true));
248      var alg = new SequentialSearch(problem, 30, random, 0,
249        new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericFunctionApproximationGrammarPolicy(problem, true));
250      //var alg = new MctsQLearningSampler(problem, sentenceLen, random, 0, null);
251      //var alg = new MctsQLearningSampler(problem, 30, random, 0, new EpsGreedyPolicy(0.2));
252      //var alg = new MctsContextualSampler(problem, 23, random, 0); // must visit each canonical solution only once
253      //var alg = new TemporalDifferenceTreeSearchSampler(problem, 30, random, 1);
254      //var alg = new ExhaustiveBreadthFirstSearch(problem, 7);
255      //var alg = new AlternativesContextSampler(problem, random, 17, 4, (rand, numActions) => new RandomPolicy(rand, numActions));
256      //var alg = new ExhaustiveDepthFirstSearch(problem, 17);
257      // var alg = new AlternativesSampler(problem, 17);
258      // var alg = new RandomSearch(problem, random, 17);
259      //var alg = new ExhaustiveRandomFirstSearch(problem, random, 17);
260
261      alg.FoundNewBestSolution += (sentence, quality) => {
262        //Console.WriteLine("{0}", globalStatistics);
263        //Console.ReadLine();
264      };
265      alg.SolutionEvaluated += (sentence, quality) => {
266        iterations++;
267        globalStatistics.AddSentence(sentence, quality);
268
269        //if (iterations % 100 == 0) {
270        //  if (iterations % 10000 == 0) Console.Clear();
271        //  Console.SetCursorPosition(0, 0);
272        //  alg.PrintStats();
273        //}
274
275        //Console.WriteLine(sentence);
276
277        if (iterations % 100 == 0) {
278          Console.WriteLine("{0}", globalStatistics);
279        }
280      };
281
282
283      sw.Start();
284
285      alg.Run(maxIterations);
286
287      sw.Stop();
288
289      Console.Clear();
290      alg.PrintStats();
291      Console.WriteLine(globalStatistics);
292      Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol",
293        sw.Elapsed.TotalSeconds,
294        maxIterations / (double)sw.Elapsed.TotalSeconds,
295        (double)sw.ElapsedMilliseconds * 1000 / maxIterations);
296    }
297
298    public static void RunGpDemo() {
299      int iterations = 0;
300      const int seed = 31415;
301      const int maxIterations = 100000;
302
303      //var prob = new SymbolicRegressionProblem(new Random(31415), "Tower");
304      var prob = new SymbolicRegressionPoly10Problem();
305      var sgp = new OffspringSelectionGP(prob, new Random(seed), true);
306      RunGP(sgp, prob, 200000, 500, 0.15, 50);
307    }
308
309
310    private static void RunFunApproxTest() {
311      const int nReps = 30;
312      const int seed = 31415;
313      //const int maxIters = 50000;
314      var rand = new Random();
315      var problemFactories = new Func<Tuple<int, int, ISymbolicExpressionTreeProblem>>[]
316      {
317        () => Tuple.Create(100000, 23,  (ISymbolicExpressionTreeProblem)new SymbolicRegressionPoly10Problem()),
318        () => Tuple.Create(100000, 17, (ISymbolicExpressionTreeProblem)new SantaFeAntProblem()),
319        //() => Tuple.Create(50000, 32,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
320        //() => Tuple.Create(50000, 64, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
321        //() => Tuple.Create(50000, 64,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
322        //() => Tuple.Create(50000, 128, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
323        //() => Tuple.Create(50000, 128,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
324        //() => Tuple.Create(50000, 256, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
325        //() => Tuple.Create(50000, 256,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
326        //() => new RoyalPairProblem(),
327        //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 0, 1, 0, true),
328        //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 0, 1, 0, false),
329        //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 50, 1, 0.8, false),
330      };
331
332      // skip experiments that are already done
333      for (int i = 0; i < nReps; i++) {
334        foreach (var problemFactory in problemFactories) {
335          {
336            var solverSeed = rand.Next();
337            var tupel = problemFactory();
338            var maxIters = tupel.Item1;
339            var maxSize = tupel.Item2;
340            var prob = tupel.Item3;
341
342            var alg = new SequentialSearch(prob, maxSize, new Random(solverSeed), 0,
343          new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericFunctionApproximationGrammarPolicy(prob, true));
344
345            int iterations = 0;
346            double bestQuality = double.NegativeInfinity;
347            var globalStatistics = new SentenceSetStatistics(prob.BestKnownQuality(maxSize));
348            var algName = alg.GetType().Name;
349            var probName = prob.GetType().Name;
350            alg.SolutionEvaluated += (sentence, quality) => {
351              bestQuality = Math.Max(bestQuality, quality);
352              iterations++;
353              globalStatistics.AddSentence(sentence, quality);
354              //if (iterations % 100 == 0) {
355              //  Console.Clear();
356              //  Console.SetCursorPosition(0, 0);
357              //  alg.PrintStats();
358              //}
359              //Console.WriteLine("{0:N5} {1}", quality, sentence);
360              if (iterations % 200 == 0) {
361                Console.WriteLine("\"{0,25}\" {1} \"{2,25}\" {3}", algName, maxSize, probName, globalStatistics);
362                if (bestQuality.IsAlmost(1.0)) {
363                  alg.StopRequested = true;
364                }
365              }
366            };
367
368            alg.Run(maxIters);
369
370          }
371
372        }
373      }
374    }
375
376    private static void RunGpGridTest() {
377      const int nReps = 20;
378      const int seed = 31415;
379      //const int maxIters = 50000;
380      var rand = new Random(seed);
381      var problemFactories = new Func<Tuple<int, int, ISymbolicExpressionTreeProblem>>[]
382      {
383        () => Tuple.Create(50000, 32, (ISymbolicExpressionTreeProblem)new PermutationProblem()),
384        () => Tuple.Create(50000, 32, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
385        () => Tuple.Create(50000, 32,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
386        () => Tuple.Create(50000, 64, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
387        () => Tuple.Create(50000, 64,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
388        () => Tuple.Create(50000, 128, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
389        () => Tuple.Create(50000, 128,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
390        () => Tuple.Create(50000, 256, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
391        () => Tuple.Create(50000, 256,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
392        //() => new RoyalPairProblem(),
393        //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 0, 1, 0, true),
394        //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 0, 1, 0, false),
395        //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 50, 1, 0.8, false),
396      };
397
398      foreach (var popSize in new int[] { 100 /*, 250, 500, 1000, 2500, 5000, 10000 */ }) {
399        foreach (var mutationRate in new double[] { /* 0.05, /* 0.10, */ 0.15, /* 0.25, 0.3 */}) {
400          // skip experiments that are already done
401          foreach (var problemFactory in problemFactories) {
402            for (int i = 0; i < nReps; i++) {
403              {
404                var solverSeed = rand.Next();
405                var tupel = problemFactory();
406                var maxIters = tupel.Item1;
407                var maxSize = tupel.Item2;
408                var prob = tupel.Item3;
409                var sgp = new StandardGP(prob, new Random(solverSeed));
410                RunGP(sgp, prob, maxIters, popSize, mutationRate, maxSize);
411              }
412              //{
413              //  var prob = problemFactory();
414              //  var osgp = new OffspringSelectionGP(prob, new Random(solverSeed));
415              //  RunGP(osgp, prob, maxIters, popSize, mutationRate, maxSize);
416              //}
417            }
418          }
419        }
420
421      }
422    }
423
424    private static void RunGP(IGPSolver gp, ISymbolicExpressionTreeProblem prob, int maxIters, int popSize, double mutationRate, int maxSize) {
425      int iterations = 0;
426      var globalStatistics = new SentenceSetStatistics(prob.BestKnownQuality(maxSize));
427      var gpName = gp.GetType().Name;
428      var probName = prob.GetType().Name;
429      gp.SolutionEvaluated += (sentence, quality) => {
430        iterations++;
431        globalStatistics.AddSentence(sentence, quality);
432
433        if (iterations % 100 == 0) {
434          Console.WriteLine("\"{0,25}\" {1} {2:N2} {3} \"{4,25}\" {5}", gpName, popSize, mutationRate, maxSize, probName, globalStatistics);
435        }
436      };
437
438      gp.PopulationSize = popSize;
439      gp.MutationRate = mutationRate;
440      gp.MaxSolutionSize = maxSize + 2;
441      gp.MaxSolutionDepth = maxSize + 2;
442
443      gp.Run(maxIters);
444    }
445  }
446}
Note: See TracBrowser for help on using the repository browser.