Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Problems.GrammaticalOptimization/Main/Program.cs @ 11733

Last change on this file since 11733 was 11732, checked in by gkronber, 9 years ago

#2283: refactoring and bug fixes

File size: 10.9 KB
Line 
1using System;
2using System.Collections.Generic;
3using System.Data;
4using System.Diagnostics;
5using System.Globalization;
6using System.Linq;
7using System.Text;
8using System.Threading.Tasks;
9using HeuristicLab.Algorithms.Bandits;
10using HeuristicLab.Algorithms.Bandits.Models;
11using HeuristicLab.Algorithms.GrammaticalOptimization;
12using HeuristicLab.Problems.GrammaticalOptimization;
13using HeuristicLab.Problems.GrammaticalOptimization.SymbReg;
14
15namespace Main {
16  class Program {
17    static void Main(string[] args) {
18      CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;
19
20      RunDemo();
21      //RunGridTest();
22    }
23
24    private static void RunGridTest() {
25      int maxIterations = 200000; // for poly-10 with 50000 evaluations no successful try with hl yet
26      //var globalRandom = new Random(31415);
27      var localRandSeed = 31415;
28      var reps = 20;
29
30      var policies = new Func<IPolicy>[]
31        {
32         () => new GaussianThompsonSamplingPolicy(),
33         () => new GaussianThompsonSamplingPolicy(true),
34         () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 1)),
35         () => new BernoulliThompsonSamplingPolicy(),
36         () => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)),
37         () => new RandomPolicy(),
38         () => new EpsGreedyPolicy(0.01),
39         () => new EpsGreedyPolicy(0.05),
40         () => new EpsGreedyPolicy(0.1),
41         () => new EpsGreedyPolicy(0.2),
42         () => new EpsGreedyPolicy(0.5),
43         () => new UCTPolicy(0.1),
44         () => new UCTPolicy(0.5),
45         () => new UCTPolicy(1),
46         () => new UCTPolicy(2),
47         () => new UCTPolicy( 5),
48         () => new UCTPolicy( 10),
49         () => new UCB1Policy(),
50         () => new UCB1TunedPolicy(),
51         () => new UCBNormalPolicy(),
52         () => new BoltzmannExplorationPolicy(0.1),
53         () => new BoltzmannExplorationPolicy(0.5),
54         () => new BoltzmannExplorationPolicy(1),
55         () => new BoltzmannExplorationPolicy(5),
56         () => new BoltzmannExplorationPolicy(10),
57         () => new BoltzmannExplorationPolicy(20),
58         () => new BoltzmannExplorationPolicy(100),
59         () => new ChernoffIntervalEstimationPolicy( 0.01),
60         () => new ChernoffIntervalEstimationPolicy( 0.05),
61         () => new ChernoffIntervalEstimationPolicy( 0.1),
62         () => new ChernoffIntervalEstimationPolicy( 0.2),
63         // (rand) => new ThresholdAscentPolicy(10, 0.01),
64         // (rand) => new ThresholdAscentPolicy(10, 0.05),
65         // (rand) => new ThresholdAscentPolicy(10, 0.1),
66         // (rand) => new ThresholdAscentPolicy(10, 0.2),
67         // (rand) => new ThresholdAscentPolicy(100, 0.01),
68         // (rand) => new ThresholdAscentPolicy(100, 0.05),
69         // (rand) => new ThresholdAscentPolicy(100, 0.1),
70         // (rand) => new ThresholdAscentPolicy(100, 0.2),
71         // (rand) => new ThresholdAscentPolicy(1000, 0.01),
72         // (rand) => new ThresholdAscentPolicy(1000, 0.05),
73         // (rand) => new ThresholdAscentPolicy(1000, 0.1),
74         // (rand) => new ThresholdAscentPolicy(1000, 0.2),
75         // (rand) => new ThresholdAscentPolicy(5000, 0.01),
76         // (rand) => new ThresholdAscentPolicy(10000, 0.01),
77        };
78
79      foreach (var problem in new Tuple<IProblem, int>[]
80        {
81          Tuple.Create((IProblem)new SantaFeAntProblem(), 17),
82          Tuple.Create((IProblem)new SymbolicRegressionPoly10Problem(), 23),
83        })
84        foreach (var randomTries in new int[] { 1, 10, /* 5, 100 /*, 500, 1000 */}) {
85          foreach (var policy in policies) {
86            var myRandomTries = randomTries;
87            var localRand = new Random(localRandSeed);
88            var options = new ParallelOptions();
89            options.MaxDegreeOfParallelism = 1;
90            Parallel.For(0, reps, options, (i) => {
91              //var t = Task.Run(() => {
92              Random myLocalRand;
93              lock (localRand)
94                myLocalRand = new Random(localRand.Next());
95
96              //for (int i = 0; i < reps; i++) {
97
98              int iterations = 0;
99              var globalStatistics = new SentenceSetStatistics();
100
101              // var problem = new SymbolicRegressionPoly10Problem();
102              // var problem = new SantaFeAntProblem();
103              //var problem = new PalindromeProblem();
104              //var problem = new HardPalindromeProblem();
105              //var problem = new RoyalPairProblem();
106              //var problem = new EvenParityProblem();
107              var alg = new MctsSampler(problem.Item1, problem.Item2, myLocalRand, myRandomTries, policy()); // TODO: Make sure we generate the same random numbers for each experiment
108              //var alg = new ExhaustiveBreadthFirstSearch(problem, 25);
109              //var alg = new AlternativesContextSampler(problem, 25);
110
111              alg.SolutionEvaluated += (sentence, quality) => {
112                iterations++;
113                globalStatistics.AddSentence(sentence, quality);
114                if (iterations % 10000 == 0) {
115                  Console.WriteLine("{0,4} {1,7} {2,5} {3,25} {4}", alg.treeDepth, alg.treeSize, myRandomTries, policy(), globalStatistics);
116                }
117              };
118
119
120              alg.Run(maxIterations);
121
122              //Console.WriteLine("{0,5} {1} {2}", randomTries, policyFactory(1), globalStatistics);
123              //}
124              //});
125              //tasks.Add(t);
126            });
127          }
128        }
129      //Task.WaitAll(tasks.ToArray());
130    }
131
132    private static void RunDemo() {
133      // TODO: test with eps-greedy using max instead of average as value (seems to work well for symb-reg! explore further!)
134      // TODO: implement GaussianWithUnknownMeanAndVariance Model for Thompson Sampling (verify with unit test if correct mean and variance is identified)
135      // TODO: separate value function from policy
136      // TODO: debug and verify implementation variants of Gaussian Thompson Sampling with unit test
137      // TODO: refactor Policies to use banditInfos (policies are factories for bandit infos and bandit info only has an update routine, each policy works only with it's type of banditinfo)
138      // TODO: in contextual MCTS store a bandit info for each node in the _graph_ and also update all bandit infos of all parents
139      // TODO: exhaustive search with priority list
140      // TODO: warum funktioniert die alte Implementierung von GaussianThompson besser für SantaFe als alte? Siehe Vergleich: alte vs. neue implementierung GaussianThompsonSampling
141      // TODO: why does GaussianThompsonSampling work so well with MCTS for the artificial ant problem?
142      // TODO: wie kann ich sampler noch vergleichen bzw. was kann man messen um die qualität des samplers abzuschätzen (bis auf qualität und iterationen bis zur besten lösung) => ziel schnellere iterationen zu gutem ergebnis
143      // TODO: likelihood für R=1 bei Gaussian oder GaussianMixture einfach berechenbar?
144      // TODO: research thompson sampling for max bandit?
145      // TODO: ausführlicher test von strategien für k-armed max bandit
146      // TODO: verify TA implementation using example from the original paper     
147      // TODO: compare results for different policies also for the symb-reg problem
148      // TODO: separate policy from MCTS tree data structure to allow sharing of information over disconnected parts of the tree (semantic equivalence)
149      // TODO: implement thompson sampling for gaussian mixture models
150      // TODO: implement inspection for MCTS (eventuell interactive command line für statistiken aus dem baum anzeigen)
151      // TODO: implement ACO-style bandit policy
152      // TODO: implement sequences that can be manipulated in-place (instead of strings), alternatives are also stored as sequences, for a sequence the index of the first NT-symb can be stored
153      // TODO: gleichzeitige modellierung von transformierter zielvariable (y, 1/y, log(y), exp(y), sqrt(y), ...)
154      // TODO: vergleich bei complete-randomly möglichst kurze sätze generieren vs. einfach zufällig alternativen wählen
155      // TODO: reward discounting (für veränderliche reward distributions über zeit). speziellen unit-test dafür erstellen
156      // TODO: constant optimization
157
158
159      int maxIterations = 100000;
160      int iterations = 0;
161      var sw = new Stopwatch();
162      double bestQuality = 0;
163      string bestSentence = "";
164      var globalStatistics = new SentenceSetStatistics();
165      var random = new Random();
166
167      var problem = new SymbolicRegressionPoly10Problem();
168      //var problem = new SantaFeAntProblem(); // good results e.g. with       var alg = new MctsSampler(problem, 17, random, 1, (rand, numActions) => new ThresholdAscentPolicy(numActions, 500, 0.01));
169      //var problem = new SymbolicRegressionProblem("Tower"); // very good results e.g. new EpsGreedyPolicy(0.2) using max reward as quality !!!
170      //var problem = new PalindromeProblem();
171      //var problem = new HardPalindromeProblem();
172      //var problem = new RoyalPairProblem();
173      //var problem = new EvenParityProblem();
174      var alg = new MctsSampler(problem, 23, random, 10, new EpsGreedyPolicy(0.2)); // GaussianModelWithUnknownVariance (and Q= 0.99-quantil) works well for Ant
175      //var alg = new ExhaustiveBreadthFirstSearch(problem, 17);
176      //var alg = new AlternativesContextSampler(problem, random, 17, 4, (rand, numActions) => new RandomPolicy(rand, numActions));
177      //var alg = new ExhaustiveDepthFirstSearch(problem, 17);
178      // var alg = new AlternativesSampler(problem, 17);
179      // var alg = new RandomSearch(problem, random, 17);
180      // var alg = new ExhaustiveRandomFirstSearch(problem, random, 17);
181
182      alg.FoundNewBestSolution += (sentence, quality) => {
183        bestQuality = quality;
184        bestSentence = sentence;
185        Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
186      };
187      alg.SolutionEvaluated += (sentence, quality) => {
188        iterations++;
189        globalStatistics.AddSentence(sentence, quality);
190        if (iterations % 1000 == 0) {
191          alg.PrintStats();
192        }
193        if (iterations % 10000 == 0) {
194          //Console.WriteLine("{0,10} {1,10:F5} {2,10:F5} {3}", iterations, bestQuality, quality, sentence);
195          //Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
196          Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
197        }
198      };
199
200
201      sw.Start();
202
203      alg.Run(maxIterations);
204
205      sw.Stop();
206
207      Console.WriteLine("{0,10} Best soultion: {1,10:F5} {2}", iterations, bestQuality, bestSentence);
208      Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol",
209        sw.Elapsed.TotalSeconds,
210        maxIterations / (double)sw.Elapsed.TotalSeconds,
211        (double)sw.ElapsedMilliseconds * 1000 / maxIterations);
212    }
213  }
214}
Note: See TracBrowser for help on using the repository browser.