Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Problems.GrammaticalOptimization/Main/Program.cs @ 11792

Last change on this file since 11792 was 11792, checked in by gkronber, 9 years ago

#2283 work-in-progress commit (does not compile)

File size: 13.2 KB
Line 
1using System;
2using System.Collections.Generic;
3using System.Data;
4using System.Diagnostics;
5using System.Globalization;
6using System.Linq;
7using System.Text;
8using System.Threading.Tasks;
9using HeuristicLab.Algorithms.Bandits;
10using HeuristicLab.Algorithms.Bandits.BanditPolicies;
11using HeuristicLab.Algorithms.Bandits.GrammarPolicies;
12using HeuristicLab.Algorithms.Bandits.Models;
13using HeuristicLab.Algorithms.GrammaticalOptimization;
14using HeuristicLab.Problems.GrammaticalOptimization;
15using HeuristicLab.Problems.GrammaticalOptimization.SymbReg;
16using BoltzmannExplorationPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.BoltzmannExplorationPolicy;
17using EpsGreedyPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.EpsGreedyPolicy;
18using RandomPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.RandomPolicy;
19using UCTPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.UCTPolicy;
20
21namespace Main {
22  class Program {
23    static void Main(string[] args) {
24      CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;
25
26      RunDemo();
27      RunGridTest();
28    }
29
30    private static void RunGridTest() {
31      int maxIterations = 50000; // for poly-10 with 50000 evaluations no successful try with hl yet
32      //var globalRandom = new Random(31415);
33      var localRandSeed = 31415;
34      var reps = 5;
35
36      var policies = new Func<IBanditPolicy>[]
37        {
38         () => new RandomPolicy(),
39          () => new ActiveLearningPolicy(), 
40         () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
41         () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
42         () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
43         () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
44         //() => new GaussianThompsonSamplingPolicy(),
45         () => new GaussianThompsonSamplingPolicy(true),
46         () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)),
47         () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)),
48         //() => new BernoulliThompsonSamplingPolicy(),
49         () => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)),
50         () => new EpsGreedyPolicy(0.01),
51         () => new EpsGreedyPolicy(0.05),
52         () => new EpsGreedyPolicy(0.1),
53         () => new EpsGreedyPolicy(0.2),
54         () => new EpsGreedyPolicy(0.5),
55         () => new UCTPolicy(0.1),
56         () => new UCTPolicy(0.5),
57         () => new UCTPolicy(1),
58         () => new UCTPolicy(2),
59         () => new UCTPolicy( 5),
60         () => new UCTPolicy( 10),
61         () => new UCB1Policy(),
62         () => new UCB1TunedPolicy(),
63         () => new UCBNormalPolicy(),
64         () => new BoltzmannExplorationPolicy(1),
65         () => new BoltzmannExplorationPolicy(10),
66         () => new BoltzmannExplorationPolicy(20),
67         () => new BoltzmannExplorationPolicy(100),
68         () => new BoltzmannExplorationPolicy(200),
69         () => new BoltzmannExplorationPolicy(500),
70         () => new ChernoffIntervalEstimationPolicy( 0.01),
71         () => new ChernoffIntervalEstimationPolicy( 0.05),
72         () => new ChernoffIntervalEstimationPolicy( 0.1),
73         () => new ChernoffIntervalEstimationPolicy( 0.2),
74         () => new ThresholdAscentPolicy(10, 0.01),
75         () => new ThresholdAscentPolicy(10, 0.05),
76         () => new ThresholdAscentPolicy(10, 0.1),
77         () => new ThresholdAscentPolicy(10, 0.2),
78         () => new ThresholdAscentPolicy(100, 0.01),
79         () => new ThresholdAscentPolicy(100, 0.05),
80         () => new ThresholdAscentPolicy(100, 0.1),
81         () => new ThresholdAscentPolicy(100, 0.2),
82         () => new ThresholdAscentPolicy(100, 0.01),
83         () => new ThresholdAscentPolicy(100, 0.05),
84         () => new ThresholdAscentPolicy(100, 0.1),
85         () => new ThresholdAscentPolicy(100, 0.2),
86         //() => new ThresholdAscentPolicy(1000, 0.01),
87         //() => new ThresholdAscentPolicy(1000, 0.05),
88         //() => new ThresholdAscentPolicy(1000, 0.1),
89         //() => new ThresholdAscentPolicy(1000, 0.2),
90         //() => new ThresholdAscentPolicy(5000, 0.01),
91         //() => new ThresholdAscentPolicy(10000, 0.01),
92        };
93
94      foreach (var problem in new Tuple<IProblem, int>[]
95        {
96          Tuple.Create((IProblem)new SantaFeAntProblem(), 17),
97          Tuple.Create((IProblem)new SymbolicRegressionPoly10Problem(), 23),
98        })
99        foreach (var useCanonical in new bool[] { true, false })
100          foreach (var randomTries in new int[] { 0, /*1, 10, /* 5, 100 /*, 500, 1000 */}) {
101            foreach (var policy in policies) {
102              var myRandomTries = randomTries;
103              var localRand = new Random(localRandSeed);
104              var options = new ParallelOptions();
105              options.MaxDegreeOfParallelism = 1;
106              Parallel.For(0, reps, options, (i) => {
107                //var t = Task.Run(() => {
108                Random myLocalRand;
109                lock (localRand)
110                  myLocalRand = new Random(localRand.Next());
111
112                //for (int i = 0; i < reps; i++) {
113
114                int iterations = 0;
115                var globalStatistics = new SentenceSetStatistics();
116
117                // var problem = new SymbolicRegressionPoly10Problem();
118                // var problem = new SantaFeAntProblem();
119                //var problem = new PalindromeProblem();
120                //var problem = new HardPalindromeProblem();
121                //var problem = new RoyalPairProblem();
122                //var problem = new EvenParityProblem();
123                // var alg = new MctsSampler(problem.Item1, problem.Item2, myLocalRand, myRandomTries, policy()); // TODO: Make sure we generate the same random numbers for each
124                var alg = new SequentialSearch(problem.Item1, problem.Item2, myLocalRand, myRandomTries, new GenericGrammarPolicy(problem.Item1, policy(), useCanonical));
125                //var alg = new ExhaustiveBreadthFirstSearch(problem, 25);
126                //var alg = new AlternativesContextSampler(problem, 25);
127
128                alg.SolutionEvaluated += (sentence, quality) => {
129                  iterations++;
130                  globalStatistics.AddSentence(sentence, quality);
131                  if (iterations % 1000 == 0) {
132                    Console.WriteLine("{0,5} {1,25} {2} {3}", myRandomTries, policy(), useCanonical, globalStatistics);
133                  }
134                };
135                alg.FoundNewBestSolution += (sentence, quality) => {
136                  Console.WriteLine("{0,5} {1,25} {2} {3}", myRandomTries, policy(), useCanonical, globalStatistics);
137                };
138
139
140                alg.Run(maxIterations);
141
142                //Console.WriteLine("{0,5} {1} {2}", randomTries, policyFactory(1), globalStatistics);
143                //}
144                //});
145                //tasks.Add(t);
146              });
147            }
148          }
149      //Task.WaitAll(tasks.ToArray());
150    }
151
152    private static void RunDemo() {
153      // TODO: clone problem for parallel grid test
154      // TODO: move problem instances into a separate folder
155      // TODO: improve performance of SequentialSearch (memory allocations related to sequences)
156      // TODO: implement bridge to HL-GP
157      // TODO: unify MCTS, TD and ContextMCTS Solvers (stateInfos)
158      // TODO: test with eps-greedy using max instead of average as value (seems to work well for symb-reg! explore further!)
159      // TODO: separate value function from policy
160      // TODO: in contextual MCTS store a bandit info for each node in the _graph_ and also update all bandit infos of all parents
161      // TODO: exhaustive search with priority list
162      // TODO: warum funktioniert die alte Implementierung von GaussianThompson besser für SantaFe als neue? Siehe Vergleich: alte vs. neue implementierung GaussianThompsonSampling
163      // TODO: why does GaussianThompsonSampling work so well with MCTS for the artificial ant problem?
164      // TODO: wie kann ich sampler noch vergleichen bzw. was kann man messen um die qualität des samplers abzuschätzen (bis auf qualität und iterationen bis zur besten lösung) => ziel schnellere iterationen zu gutem ergebnis
165      // TODO: research thompson sampling for max bandit?
166      // TODO: ausführlicher test von strategien für numCorrectPhrases-armed max bandit
167      // TODO: verify TA implementation using example from the original paper     
168      // TODO: separate policy from MCTS tree data structure to allow sharing of information over disconnected parts of the tree (semantic equivalence)
169      // TODO: implement thompson sampling for gaussian mixture models
170      // TODO: implement inspection for MCTS (eventuell interactive command line für statistiken aus dem baum anzeigen)
171      // TODO: implement ACO-style bandit policy
172      // TODO: gleichzeitige modellierung von transformierter zielvariable (y, 1/y, log(y), exp(y), sqrt(y), ...)
173      // TODO: vergleich bei complete-randomly möglichst kurze sätze generieren vs. einfach zufällig alternativen wählen
174      // TODO: reward discounting (für veränderliche reward distributions über zeit). speziellen unit-test dafür erstellen
175      // TODO: constant optimization
176
177
178      int maxIterations = 100000;
179      int iterations = 0;
180      var sw = new Stopwatch();
181
182      var globalStatistics = new SentenceSetStatistics();
183      var random = new Random();
184
185      //var phraseLen = 3;
186      //var numPhrases = 5;
187      //var problem = new RoyalPhraseSequenceProblem(random, 15, numPhrases, phraseLen: phraseLen, numCorrectPhrases: 1, correctReward: 1, incorrectReward: 0.0, phrasesAsSets: true);
188
189      // var phraseLen = 2;
190      // var numPhrases = 5;
191      // var problem = new FindPhrasesProblem(random, 15, numPhrases, phraseLen, numOptimalPhrases: numPhrases, numDecoyPhrases: 0, correctReward: 1.0, decoyReward: 0.0, phrasesAsSets: true);
192
193      var problem = new SymbolicRegressionPoly10Problem();   // good results e.g. 10 randomtries and EpsGreedyPolicy(0.2, (aInfo)=>aInfo.MaxReward)
194      // Ant
195      // good results e.g. with       var alg = new MctsSampler(problem, 17, random, 1, (rand, numActions) => new ThresholdAscentPolicy(numActions, 500, 0.01));
196      // GaussianModelWithUnknownVariance (and Q= 0.99-quantil) also works well for Ant
197      // very good results with:       var alg = new SequentialSearch(problem, 17, random, 0,
198      // new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericGrammarPolicy(problem, new UCB1TunedPolicy(), true));
199
200      //var problem = new SantaFeAntProblem();
201      //var problem = new SymbolicRegressionProblem("Tower");
202      //var problem = new PalindromeProblem();
203      //var problem = new HardPalindromeProblem();
204      //var problem = new RoyalPairProblem();
205      //var problem = new EvenParityProblem();
206      // symbreg length = 11 q = 0.824522210419616
207      //var alg = new MctsSampler(problem, 23, random, 0, new BoltzmannExplorationPolicy(100));
208      //var alg = new MctsSampler(problem, 23, random, 0, new EpsGreedyPolicy(0.1));
209      var alg = new SequentialSearch(problem, 23, random, 0,
210        new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericGrammarPolicy(problem, new EpsGreedyPolicy(0.2), true));
211      //var alg = new MctsQLearningSampler(problem, sentenceLen, random, 0, null);
212      //var alg = new MctsQLearningSampler(problem, 30, random, 0, new EpsGreedyPolicy(0.2));
213      //var alg = new MctsContextualSampler(problem, 23, random, 0); // must visit each canonical solution only once
214      //var alg = new TemporalDifferenceTreeSearchSampler(problem, 30, random, 1);
215      //var alg = new ExhaustiveBreadthFirstSearch(problem, 7);
216      //var alg = new AlternativesContextSampler(problem, random, 17, 4, (rand, numActions) => new RandomPolicy(rand, numActions));
217      //var alg = new ExhaustiveDepthFirstSearch(problem, 17);
218      // var alg = new AlternativesSampler(problem, 17);
219      // var alg = new RandomSearch(problem, random, 17);
220      //var alg = new ExhaustiveRandomFirstSearch(problem, random, 17);
221
222      alg.FoundNewBestSolution += (sentence, quality) => {
223        //Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
224        //Console.ReadLine();
225      };
226      alg.SolutionEvaluated += (sentence, quality) => {
227        iterations++;
228        globalStatistics.AddSentence(sentence, quality);
229        if (iterations % 100 == 0) {
230          if (iterations % 1000 == 0) Console.Clear();
231          Console.SetCursorPosition(0, 0);
232          alg.PrintStats();
233        }
234        //Console.WriteLine(sentence);
235
236        if (iterations % 10000 == 0) {
237          //Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
238        }
239      };
240
241
242      sw.Start();
243
244      alg.Run(maxIterations);
245
246      sw.Stop();
247
248      Console.Clear();
249      alg.PrintStats();
250      Console.WriteLine(globalStatistics);
251      Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol",
252        sw.Elapsed.TotalSeconds,
253        maxIterations / (double)sw.Elapsed.TotalSeconds,
254        (double)sw.ElapsedMilliseconds * 1000 / maxIterations);
255    }
256  }
257}
Note: See TracBrowser for help on using the repository browser.