Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Problems.GrammaticalOptimization/Main/Program.cs @ 11775

Last change on this file since 11775 was 11770, checked in by gkronber, 10 years ago

#2283: worked on generic sequential search alg with bandit policy as parameter

File size: 12.0 KB
Line 
1using System;
2using System.Collections.Generic;
3using System.Data;
4using System.Diagnostics;
5using System.Globalization;
6using System.Linq;
7using System.Text;
8using System.Threading.Tasks;
9using HeuristicLab.Algorithms.Bandits;
10using HeuristicLab.Algorithms.Bandits.BanditPolicies;
11using HeuristicLab.Algorithms.Bandits.Models;
12using HeuristicLab.Algorithms.GrammaticalOptimization;
13using HeuristicLab.Problems.GrammaticalOptimization;
14using HeuristicLab.Problems.GrammaticalOptimization.SymbReg;
15
16namespace Main {
17  class Program {
18    static void Main(string[] args) {
19      CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;
20
21      RunDemo();
22      //RunGridTest();
23    }
24
25    private static void RunGridTest() {
26      int maxIterations = 200000; // for poly-10 with 50000 evaluations no successful try with hl yet
27      //var globalRandom = new Random(31415);
28      var localRandSeed = 31415;
29      var reps = 8;
30
31      var policies = new Func<IBanditPolicy>[]
32        {
33         () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
34         () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
35         () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
36         () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
37         //() => new GaussianThompsonSamplingPolicy(),
38         () => new GaussianThompsonSamplingPolicy(true),
39         () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)),
40         () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)),
41         //() => new BernoulliThompsonSamplingPolicy(),
42         () => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)),
43         () => new RandomPolicy(),
44         () => new EpsGreedyPolicy(0.01),
45         () => new EpsGreedyPolicy(0.05),
46         () => new EpsGreedyPolicy(0.1),
47         () => new EpsGreedyPolicy(0.2),
48         () => new EpsGreedyPolicy(0.5),
49         () => new UCTPolicy(0.1),
50         () => new UCTPolicy(0.5),
51         () => new UCTPolicy(1),
52         () => new UCTPolicy(2),
53         () => new UCTPolicy( 5),
54         () => new UCTPolicy( 10),
55         () => new UCB1Policy(),
56         () => new UCB1TunedPolicy(),
57         () => new UCBNormalPolicy(),
58         () => new BoltzmannExplorationPolicy(0.1),
59         () => new BoltzmannExplorationPolicy(0.5),
60         () => new BoltzmannExplorationPolicy(1),
61         () => new BoltzmannExplorationPolicy(5),
62         () => new BoltzmannExplorationPolicy(10),
63         () => new BoltzmannExplorationPolicy(20),
64         () => new BoltzmannExplorationPolicy(100),
65         () => new ChernoffIntervalEstimationPolicy( 0.01),
66         () => new ChernoffIntervalEstimationPolicy( 0.05),
67         () => new ChernoffIntervalEstimationPolicy( 0.1),
68         () => new ChernoffIntervalEstimationPolicy( 0.2),
69         () => new ThresholdAscentPolicy(10, 0.01),
70         () => new ThresholdAscentPolicy(10, 0.05),
71         () => new ThresholdAscentPolicy(10, 0.1),
72         () => new ThresholdAscentPolicy(10, 0.2),
73         () => new ThresholdAscentPolicy(100, 0.01),
74         () => new ThresholdAscentPolicy(100, 0.05),
75         () => new ThresholdAscentPolicy(100, 0.1),
76         () => new ThresholdAscentPolicy(100, 0.2),
77         () => new ThresholdAscentPolicy(1000, 0.01),
78         () => new ThresholdAscentPolicy(1000, 0.05),
79         () => new ThresholdAscentPolicy(1000, 0.1),
80         () => new ThresholdAscentPolicy(1000, 0.2),
81         () => new ThresholdAscentPolicy(5000, 0.01),
82         () => new ThresholdAscentPolicy(10000, 0.01),
83        };
84
85      foreach (var problem in new Tuple<IProblem, int>[]
86        {
87          //Tuple.Create((IProblem)new SantaFeAntProblem(), 17),
88          Tuple.Create((IProblem)new SymbolicRegressionPoly10Problem(), 23),
89        })
90        foreach (var randomTries in new int[] { 0, 1, 10, /* 5, 100 /*, 500, 1000 */}) {
91          foreach (var policy in policies) {
92            var myRandomTries = randomTries;
93            var localRand = new Random(localRandSeed);
94            var options = new ParallelOptions();
95            options.MaxDegreeOfParallelism = 4;
96            Parallel.For(0, reps, options, (i) => {
97              //var t = Task.Run(() => {
98              Random myLocalRand;
99              lock (localRand)
100                myLocalRand = new Random(localRand.Next());
101
102              //for (int i = 0; i < reps; i++) {
103
104              int iterations = 0;
105              var globalStatistics = new SentenceSetStatistics();
106
107              // var problem = new SymbolicRegressionPoly10Problem();
108              // var problem = new SantaFeAntProblem();
109              //var problem = new PalindromeProblem();
110              //var problem = new HardPalindromeProblem();
111              //var problem = new RoyalPairProblem();
112              //var problem = new EvenParityProblem();
113              var alg = new MctsSampler(problem.Item1, problem.Item2, myLocalRand, myRandomTries, policy()); // TODO: Make sure we generate the same random numbers for each experiment
114              //var alg = new ExhaustiveBreadthFirstSearch(problem, 25);
115              //var alg = new AlternativesContextSampler(problem, 25);
116
117              alg.SolutionEvaluated += (sentence, quality) => {
118                iterations++;
119                globalStatistics.AddSentence(sentence, quality);
120                if (iterations % 10000 == 0) {
121                  Console.WriteLine("{0,4} {1,7} {2,5} {3,25} {4}", alg.treeDepth, alg.treeSize, myRandomTries, policy(), globalStatistics);
122                }
123              };
124
125
126              alg.Run(maxIterations);
127
128              //Console.WriteLine("{0,5} {1} {2}", randomTries, policyFactory(1), globalStatistics);
129              //}
130              //});
131              //tasks.Add(t);
132            });
133          }
134        }
135      //Task.WaitAll(tasks.ToArray());
136    }
137
138    private static void RunDemo() {
139      // TODO: move problem instances into a separate folder
140      // TODO: improve performance of SequentialSearch (memory allocations related to sequences)
141      // TODO: implement bridge to HL-GP
142      // TODO: unify MCTS, TD and ContextMCTS Solvers (stateInfos)
143      // TODO: test with eps-greedy using max instead of average as value (seems to work well for symb-reg! explore further!)
144      // TODO: separate value function from policy
145      // TODO: in contextual MCTS store a bandit info for each node in the _graph_ and also update all bandit infos of all parents
146      // TODO: exhaustive search with priority list
147      // TODO: warum funktioniert die alte Implementierung von GaussianThompson besser für SantaFe als neue? Siehe Vergleich: alte vs. neue implementierung GaussianThompsonSampling
148      // TODO: why does GaussianThompsonSampling work so well with MCTS for the artificial ant problem?
149      // TODO: wie kann ich sampler noch vergleichen bzw. was kann man messen um die qualität des samplers abzuschätzen (bis auf qualität und iterationen bis zur besten lösung) => ziel schnellere iterationen zu gutem ergebnis
150      // TODO: research thompson sampling for max bandit?
151      // TODO: ausführlicher test von strategien für numCorrectPhrases-armed max bandit
152      // TODO: verify TA implementation using example from the original paper     
153      // TODO: separate policy from MCTS tree data structure to allow sharing of information over disconnected parts of the tree (semantic equivalence)
154      // TODO: implement thompson sampling for gaussian mixture models
155      // TODO: implement inspection for MCTS (eventuell interactive command line für statistiken aus dem baum anzeigen)
156      // TODO: implement ACO-style bandit policy
157      // TODO: gleichzeitige modellierung von transformierter zielvariable (y, 1/y, log(y), exp(y), sqrt(y), ...)
158      // TODO: vergleich bei complete-randomly möglichst kurze sätze generieren vs. einfach zufällig alternativen wählen
159      // TODO: reward discounting (für veränderliche reward distributions über zeit). speziellen unit-test dafür erstellen
160      // TODO: constant optimization
161
162
163      int maxIterations = 100000;
164      int iterations = 0;
165      var sw = new Stopwatch();
166
167      var globalStatistics = new SentenceSetStatistics();
168      var random = new Random();
169
170      //var phraseLen = 3;
171      //var numPhrases = 5;
172      //var problem = new RoyalPhraseSequenceProblem(random, 15, numPhrases, phraseLen: phraseLen, numCorrectPhrases: 1, correctReward: 1, incorrectReward: 0.0, phrasesAsSets: true);
173
174      // var phraseLen = 2;
175      // var numPhrases = 5;
176      // var problem = new FindPhrasesProblem(random, 15, numPhrases, phraseLen, numOptimalPhrases: numPhrases, numDecoyPhrases: 0, correctReward: 1.0, decoyReward: 0.0, phrasesAsSets: true);
177
178      //var problem = new SymbolicRegressionPoly10Problem();   // good results e.g. 10 randomtries and EpsGreedyPolicy(0.2, (aInfo)=>aInfo.MaxReward)
179      // Ant
180      // good results e.g. with       var alg = new MctsSampler(problem, 17, random, 1, (rand, numActions) => new ThresholdAscentPolicy(numActions, 500, 0.01));
181      // GaussianModelWithUnknownVariance (and Q= 0.99-quantil) also works well for Ant
182      // very good results with:       var alg = new SequentialSearch(problem, 17, random, 0,
183      // new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericGrammarPolicy(problem, new UCB1TunedPolicy(), true));
184
185      var problem = new SantaFeAntProblem();
186      //var problem = new SymbolicRegressionProblem("Tower");
187      //var problem = new PalindromeProblem();
188      //var problem = new HardPalindromeProblem();
189      //var problem = new RoyalPairProblem();
190      //var problem = new EvenParityProblem();
191      // symbreg length = 11 q = 0.824522210419616
192      //var alg = new MctsSampler(problem, 23, random, 0, new BoltzmannExplorationPolicy(100));
193      //var alg = new MctsSampler(problem, 23, random, 0, new EpsGreedyPolicy(0.1));
194      var alg = new SequentialSearch(problem, 10, random, 0,
195        new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericGrammarPolicy(problem, new GaussianThompsonSamplingPolicy(true), true));
196      //var alg = new MctsQLearningSampler(problem, sentenceLen, random, 0, null);
197      //var alg = new MctsQLearningSampler(problem, 30, random, 0, new EpsGreedyPolicy(0.2));
198      //var alg = new MctsContextualSampler(problem, 23, random, 0); // must visit each canonical solution only once
199      //var alg = new TemporalDifferenceTreeSearchSampler(problem, 30, random, 1);
200      //var alg = new ExhaustiveBreadthFirstSearch(problem, 7);
201      //var alg = new AlternativesContextSampler(problem, random, 17, 4, (rand, numActions) => new RandomPolicy(rand, numActions));
202      //var alg = new ExhaustiveDepthFirstSearch(problem, 17);
203      // var alg = new AlternativesSampler(problem, 17);
204      // var alg = new RandomSearch(problem, random, 17);
205      //var alg = new ExhaustiveRandomFirstSearch(problem, random, 17);
206
207      alg.FoundNewBestSolution += (sentence, quality) => {
208        //Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
209        //Console.ReadLine();
210      };
211      alg.SolutionEvaluated += (sentence, quality) => {
212        iterations++;
213        globalStatistics.AddSentence(sentence, quality);
214        if (iterations % 100 == 0) {
215          if (iterations % 1000 == 0) Console.Clear();
216          Console.SetCursorPosition(0, 0);
217          alg.PrintStats();
218        }
219        //Console.WriteLine(sentence);
220
221        if (iterations % 10000 == 0) {
222          //Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
223        }
224      };
225
226
227      sw.Start();
228
229      alg.Run(maxIterations);
230
231      sw.Stop();
232
233      Console.Clear();
234      alg.PrintStats();
235      Console.WriteLine(globalStatistics);
236      Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol",
237        sw.Elapsed.TotalSeconds,
238        maxIterations / (double)sw.Elapsed.TotalSeconds,
239        (double)sw.ElapsedMilliseconds * 1000 / maxIterations);
240    }
241  }
242}
Note: See TracBrowser for help on using the repository browser.