Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Problems.GrammaticalOptimization/Main/Program.cs @ 11806

Last change on this file since 11806 was 11806, checked in by gkronber, 10 years ago

#2283: separated value-states from done-states in GenericGrammarPolicy and removed disabling of actions from bandit policies

File size: 14.8 KB
Line 
1using System;
2using System.Collections.Generic;
3using System.Data;
4using System.Diagnostics;
5using System.Globalization;
6using System.Linq;
7using System.Text;
8using System.Threading.Tasks;
9using HeuristicLab.Algorithms.Bandits;
10using HeuristicLab.Algorithms.Bandits.BanditPolicies;
11using HeuristicLab.Algorithms.Bandits.GrammarPolicies;
12using HeuristicLab.Algorithms.Bandits.Models;
13using HeuristicLab.Algorithms.GrammaticalOptimization;
14using HeuristicLab.Problems.GrammaticalOptimization;
15using HeuristicLab.Problems.GrammaticalOptimization.SymbReg;
16using BoltzmannExplorationPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.BoltzmannExplorationPolicy;
17using EpsGreedyPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.EpsGreedyPolicy;
18using RandomPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.RandomPolicy;
19using UCTPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.UCTPolicy;
20
21namespace Main {
22  class Program {
23    static void Main(string[] args) {
24      CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;
25
26      RunDemo();
27      //RunGridTest();
28    }
29
30    private static void RunGridTest() {
31      int maxIterations = 50000; // for poly-10 with 50000 evaluations no successful try with hl yet
32      //var globalRandom = new Random(31415);
33      var localRandSeed = 31415;
34      var reps = 10;
35
36      var policyFactories = new Func<IBanditPolicy>[]
37        {
38         () => new RandomPolicy(),
39          () => new ActiveLearningPolicy(), 
40         () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
41         () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
42         () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
43         () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
44         //() => new GaussianThompsonSamplingPolicy(),
45         () => new GaussianThompsonSamplingPolicy(true),
46         () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)),
47         () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)),
48         //() => new BernoulliThompsonSamplingPolicy(),
49         () => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)),
50         () => new EpsGreedyPolicy(0.01),
51         () => new EpsGreedyPolicy(0.05),
52         () => new EpsGreedyPolicy(0.1),
53         () => new EpsGreedyPolicy(0.2),
54         () => new EpsGreedyPolicy(0.5),
55         () => new UCTPolicy(0.01),
56         () => new UCTPolicy(0.05),
57         () => new UCTPolicy(0.1),
58         () => new UCTPolicy(0.5),
59         () => new UCTPolicy(1),
60         () => new UCTPolicy(2),
61         () => new UCTPolicy( 5),
62         () => new UCTPolicy( 10),
63         () => new ModifiedUCTPolicy(0.01),
64         () => new ModifiedUCTPolicy(0.05),
65         () => new ModifiedUCTPolicy(0.1),
66         () => new ModifiedUCTPolicy(0.5),
67         () => new ModifiedUCTPolicy(1),
68         () => new ModifiedUCTPolicy(2),
69         () => new ModifiedUCTPolicy( 5),
70         () => new ModifiedUCTPolicy( 10),
71         () => new UCB1Policy(),
72         () => new UCB1TunedPolicy(),
73         () => new UCBNormalPolicy(),
74         () => new BoltzmannExplorationPolicy(1),
75         () => new BoltzmannExplorationPolicy(10),
76         () => new BoltzmannExplorationPolicy(20),
77         () => new BoltzmannExplorationPolicy(100),
78         () => new BoltzmannExplorationPolicy(200),
79         () => new BoltzmannExplorationPolicy(500),
80         () => new ChernoffIntervalEstimationPolicy( 0.01),
81         () => new ChernoffIntervalEstimationPolicy( 0.05),
82         () => new ChernoffIntervalEstimationPolicy( 0.1),
83         () => new ChernoffIntervalEstimationPolicy( 0.2),
84         () => new ThresholdAscentPolicy(5, 0.01),
85         () => new ThresholdAscentPolicy(5, 0.05),
86         () => new ThresholdAscentPolicy(5, 0.1),
87         () => new ThresholdAscentPolicy(5, 0.2),
88         () => new ThresholdAscentPolicy(10, 0.01),
89         () => new ThresholdAscentPolicy(10, 0.05),
90         () => new ThresholdAscentPolicy(10, 0.1),
91         () => new ThresholdAscentPolicy(10, 0.2),
92         () => new ThresholdAscentPolicy(50, 0.01),
93         () => new ThresholdAscentPolicy(50, 0.05),
94         () => new ThresholdAscentPolicy(50, 0.1),
95         () => new ThresholdAscentPolicy(50, 0.2),
96         () => new ThresholdAscentPolicy(100, 0.01),
97         () => new ThresholdAscentPolicy(100, 0.05),
98         () => new ThresholdAscentPolicy(100, 0.1),
99         () => new ThresholdAscentPolicy(100, 0.2),
100         () => new ThresholdAscentPolicy(500, 0.01),
101         () => new ThresholdAscentPolicy(500, 0.05),
102         () => new ThresholdAscentPolicy(500, 0.1),
103         () => new ThresholdAscentPolicy(500, 0.2),
104         //() => new ThresholdAscentPolicy(5000, 0.01),
105         //() => new ThresholdAscentPolicy(10000, 0.01),
106        };
107
108      var instanceFactories = new Func<Random, Tuple<IProblem, int>>[]
109      {
110        //(rand) => Tuple.Create((IProblem)new SantaFeAntProblem(), 17),
111        (rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:0, correctReward:1, decoyReward:0, phrasesAsSets:false ), 15),
112        (rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:0, correctReward:1, decoyReward:0, phrasesAsSets:true ), 15),
113        (rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:200, correctReward:1, decoyReward:0.5, phrasesAsSets:false), 15),
114        (rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:200, correctReward:1, decoyReward:0.5, phrasesAsSets:true), 15),
115        //(rand) => Tuple.Create((IProblem)new SymbolicRegressionPoly10Problem(), 23)
116      };
117
118      foreach (var instanceFactory in instanceFactories) {
119        foreach (var useCanonical in new bool[] { true /*, false */ }) {
120          foreach (var randomTries in new int[] { 0, /* 1, 10, /* 5, 100 /*, 500, 1000 */}) {
121            foreach (var policyFactory in policyFactories) {
122              var myRandomTries = randomTries;
123              var localRand = new Random(localRandSeed);
124              var options = new ParallelOptions();
125              options.MaxDegreeOfParallelism = 4;
126              Parallel.For(0, reps, options, (i) => {
127                Random myLocalRand;
128                lock (localRand)
129                  myLocalRand = new Random(localRand.Next());
130
131                int iterations = 0;
132                var globalStatistics = new SentenceSetStatistics();
133
134                // var problem = new SymbolicRegressionPoly10Problem();
135                // var problem = new SantaFeAntProblem();
136                //var problem = new PalindromeProblem();
137                //var problem = new HardPalindromeProblem();
138                //var problem = new RoyalPairProblem();
139                //var problem = new EvenParityProblem();
140                // var alg = new MctsSampler(problem.Item1, problem.Item2, myLocalRand, myRandomTries, policy());
141                var instance = instanceFactory(myLocalRand);
142                var problem = instance.Item1;
143                var maxLen = instance.Item2;
144                var alg = new SequentialSearch(problem, maxLen, myLocalRand, myRandomTries,
145                  new GenericGrammarPolicy(problem, policyFactory(), useCanonical));
146                //var alg = new ExhaustiveBreadthFirstSearch(problem, 25);
147                //var alg = new AlternativesContextSampler(problem, 25);
148
149                alg.SolutionEvaluated += (sentence, quality) => {
150                  iterations++;
151                  globalStatistics.AddSentence(sentence, quality);
152                  if (iterations % 10000 == 0) {
153                    Console.WriteLine("{0,3} {1,5} \"{2,25}\" {3} {4}", i, myRandomTries, policyFactory(), useCanonical, globalStatistics);
154                  }
155                };
156                alg.FoundNewBestSolution += (sentence, quality) => {
157                  //Console.WriteLine("{0,5} {1,25} {2} {3}",
158                  //  myRandomTries, policyFactory(), useCanonical,
159                  //  globalStatistics);
160                };
161
162                alg.Run(maxIterations);
163              });
164            }
165          }
166        }
167      }
168    }
169
170    private static void RunDemo() {
171      // TODO: implement bridge to HL-GP
172      // TODO: unify MCTS, TD and ContextMCTS Solvers (stateInfos)
173      // TODO: test with eps-greedy using max instead of average as value (seems to work well for symb-reg! explore further!)
174      // TODO: separate value function from policy
175      // TODO: in contextual MCTS store a bandit info for each node in the _graph_ and also update all bandit infos of all parents
176      // TODO: exhaustive search with priority list
177      // TODO: warum funktioniert die alte Implementierung von GaussianThompson besser fÃŒr SantaFe als neue? Siehe Vergleich: alte vs. neue implementierung GaussianThompsonSampling
178      // TODO: why does GaussianThompsonSampling work so well with MCTS for the artificial ant problem?
179      // TODO: research thompson sampling for max bandit?
180      // TODO: ausfÃŒhrlicher test von strategien fÃŒr numCorrectPhrases-armed max bandit
181      // TODO: verify TA implementation using example from the original paper     
182      // TODO: separate policy from MCTS tree data structure to allow sharing of information over disconnected parts of the tree (semantic equivalence)
183      // TODO: implement thompson sampling for gaussian mixture models
184      // TODO: implement inspection for MCTS (eventuell interactive command line fÃŒr statistiken aus dem baum anzeigen)
185      // TODO: implement ACO-style bandit policy
186      // TODO: gleichzeitige modellierung von transformierter zielvariable (y, 1/y, log(y), exp(y), sqrt(y), ...)
187      // TODO: vergleich bei complete-randomly möglichst kurze sÀtze generieren vs. einfach zufÀllig alternativen wÀhlen
188      // TODO: reward discounting (fÃŒr verÀnderliche reward distributions ÃŒber zeit). speziellen unit-test dafÃŒr erstellen
189      // TODO: constant optimization
190
191
192      int maxIterations = 100000;
193      int iterations = 0;
194      var sw = new Stopwatch();
195
196      var globalStatistics = new SentenceSetStatistics();
197      var random = new Random();
198
199
200      //var problem = new RoyalSequenceProblem(random, 10, 30, 2, 1, 0);
201      //var phraseLen = 3;
202      //var numPhrases = 5;
203      //var problem = new RoyalPhraseSequenceProblem(random, 15, numPhrases, phraseLen: phraseLen, numCorrectPhrases: 1, correctReward: 1, incorrectReward: 0.0, phrasesAsSets: true);
204
205      // var phraseLen = 3;
206      // var numPhrases = 5;
207      // var problem = new FindPhrasesProblem(random, 10, numPhrases, phraseLen, numOptimalPhrases: numPhrases, numDecoyPhrases: 200, correctReward: 1.0, decoyReward: 0.5, phrasesAsSets: true);
208
209      // good results for symb-reg
210      // prev results: e.g. 10 randomtries and EpsGreedyPolicy(0.2, (aInfo)=>aInfo.MaxReward)
211      // 2015 01 19: grid test with canonical states:
212      // - EpsGreedyPolicy(0.20,max)
213      // - GenericThompsonSamplingPolicy("")
214      // - UCTPolicy(0.10) (5 of 5 runs, 35000 iters avg.), 10 successful runs of 10 with rand-tries 0, bei 40000 iters 9 / 10, bei 30000 1 / 10
215
216      // good results for artificial ant:
217      // prev results:
218      // - var alg = new MctsSampler(problem, 17, random, 1, (rand, numActions) => new ThresholdAscentPolicy(numActions, 500, 0.01));
219      // - GaussianModelWithUnknownVariance (and Q= 0.99-quantil) also works well for Ant
220      // 2015 01 19: grid test with canonical states (non-canonical slightly worse)
221      // - Threshold Ascent (best 100, 0.01; all variants relatively good)
222      // - Policies where the variance has a large weight compared to the mean? (Gaussian(compatible), Gaussian with fixed variance, UCT with large c, alle TA)
223
224      //var problem = new SymbolicRegressionPoly10Problem();
225
226      var problem = new SantaFeAntProblem();
227      //var problem = new SymbolicRegressionProblem("Tower");
228      //var problem = new PalindromeProblem();
229      //var problem = new HardPalindromeProblem();
230      //var problem = new RoyalPairProblem();
231      //var problem = new EvenParityProblem();
232      // symbreg length = 11 q = 0.824522210419616
233      //var alg = new MctsSampler(problem, 23, random, 0, new BoltzmannExplorationPolicy(100));
234      //var alg = new MctsSampler(problem, 23, random, 0, new EpsGreedyPolicy(0.1));
235      //var alg = new SequentialSearch(problem, 23, random, 0,
236      //  new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericGrammarPolicy(problem, new ModifiedUCTPolicy(0.1), true));
237      var alg = new SequentialSearch(problem, 17, random, 0,
238        new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericTDPolicy(problem, true));
239      //var alg = new MctsQLearningSampler(problem, sentenceLen, random, 0, null);
240      //var alg = new MctsQLearningSampler(problem, 30, random, 0, new EpsGreedyPolicy(0.2));
241      //var alg = new MctsContextualSampler(problem, 23, random, 0); // must visit each canonical solution only once
242      //var alg = new TemporalDifferenceTreeSearchSampler(problem, 30, random, 1);
243      //var alg = new ExhaustiveBreadthFirstSearch(problem, 7);
244      //var alg = new AlternativesContextSampler(problem, random, 17, 4, (rand, numActions) => new RandomPolicy(rand, numActions));
245      //var alg = new ExhaustiveDepthFirstSearch(problem, 17);
246      // var alg = new AlternativesSampler(problem, 17);
247      // var alg = new RandomSearch(problem, random, 17);
248      //var alg = new ExhaustiveRandomFirstSearch(problem, random, 17);
249
250      alg.FoundNewBestSolution += (sentence, quality) => {
251        //Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
252        //Console.ReadLine();
253      };
254      alg.SolutionEvaluated += (sentence, quality) => {
255        iterations++;
256        globalStatistics.AddSentence(sentence, quality);
257        if (iterations % 1000 == 0) {
258          if (iterations % 10000 == 0) Console.Clear();
259          Console.SetCursorPosition(0, 0);
260          alg.PrintStats();
261        }
262        //Console.WriteLine(sentence);
263
264        if (iterations % 10000 == 0) {
265          //Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
266        }
267      };
268
269
270      sw.Start();
271
272      alg.Run(maxIterations);
273
274      sw.Stop();
275
276      Console.Clear();
277      alg.PrintStats();
278      Console.WriteLine(globalStatistics);
279      Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol",
280        sw.Elapsed.TotalSeconds,
281        maxIterations / (double)sw.Elapsed.TotalSeconds,
282        (double)sw.ElapsedMilliseconds * 1000 / maxIterations);
283    }
284  }
285}
Note: See TracBrowser for help on using the repository browser.