Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Problems.GrammaticalOptimization/Main/Program.cs @ 11842

Last change on this file since 11842 was 11832, checked in by gkronber, 9 years ago

linear value function approximation and good results for poly-10 benchmark

File size: 16.0 KB
Line 
1using System;
2using System.Collections.Generic;
3using System.Data;
4using System.Diagnostics;
5using System.Globalization;
6using System.Linq;
7using System.Text;
8using System.Threading.Tasks;
9using HeuristicLab.Algorithms.Bandits;
10using HeuristicLab.Algorithms.Bandits.BanditPolicies;
11using HeuristicLab.Algorithms.Bandits.GrammarPolicies;
12using HeuristicLab.Algorithms.Bandits.Models;
13using HeuristicLab.Algorithms.GrammaticalOptimization;
14using HeuristicLab.Problems.GrammaticalOptimization;
15using HeuristicLab.Problems.GrammaticalOptimization.SymbReg;
16using BoltzmannExplorationPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.BoltzmannExplorationPolicy;
17using EpsGreedyPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.EpsGreedyPolicy;
18using RandomPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.RandomPolicy;
19using UCTPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.UCTPolicy;
20
21namespace Main {
22  class Program {
23    static void Main(string[] args) {
24      CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;
25
26      //RunDemo();
27      RunGridTest();
28    }
29
30    private static void RunGridTest() {
31      int maxIterations = 70000; // for poly-10 with 50000 evaluations no successful try with hl yet
32      //var globalRandom = new Random(31415);
33      var localRandSeed = 31415;
34      var reps = 30;
35
36      var policyFactories = new Func<IBanditPolicy>[]
37        {
38         () => new RandomPolicy(),
39          () => new ActiveLearningPolicy(), 
40         () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
41         () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
42         () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
43         () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
44         //() => new GaussianThompsonSamplingPolicy(),
45         () => new GaussianThompsonSamplingPolicy(true),
46         () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)),
47         () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)),
48         //() => new BernoulliThompsonSamplingPolicy(),
49         () => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)),
50         () => new EpsGreedyPolicy(0.01),
51         () => new EpsGreedyPolicy(0.05),
52         () => new EpsGreedyPolicy(0.1),
53         () => new EpsGreedyPolicy(0.2),
54         () => new EpsGreedyPolicy(0.5),
55         () => new UCTPolicy(0.01),
56         () => new UCTPolicy(0.05),
57         () => new UCTPolicy(0.1),
58         () => new UCTPolicy(0.5),
59         () => new UCTPolicy(1),
60         () => new UCTPolicy(2),
61         () => new UCTPolicy( 5),
62         () => new UCTPolicy( 10),
63         () => new ModifiedUCTPolicy(0.01),
64         () => new ModifiedUCTPolicy(0.05),
65         () => new ModifiedUCTPolicy(0.1),
66         () => new ModifiedUCTPolicy(0.5),
67         () => new ModifiedUCTPolicy(1),
68         () => new ModifiedUCTPolicy(2),
69         () => new ModifiedUCTPolicy( 5),
70         () => new ModifiedUCTPolicy( 10),
71         () => new UCB1Policy(),
72         () => new UCB1TunedPolicy(),
73         () => new UCBNormalPolicy(),
74         () => new BoltzmannExplorationPolicy(1),
75         () => new BoltzmannExplorationPolicy(10),
76         () => new BoltzmannExplorationPolicy(20),
77         () => new BoltzmannExplorationPolicy(100),
78         () => new BoltzmannExplorationPolicy(200),
79         () => new BoltzmannExplorationPolicy(500),
80         () => new ChernoffIntervalEstimationPolicy( 0.01),
81         () => new ChernoffIntervalEstimationPolicy( 0.05),
82         () => new ChernoffIntervalEstimationPolicy( 0.1),
83         () => new ChernoffIntervalEstimationPolicy( 0.2),
84         () => new ThresholdAscentPolicy(5, 0.01),
85         () => new ThresholdAscentPolicy(5, 0.05),
86         () => new ThresholdAscentPolicy(5, 0.1),
87         () => new ThresholdAscentPolicy(5, 0.2),
88         () => new ThresholdAscentPolicy(10, 0.01),
89         () => new ThresholdAscentPolicy(10, 0.05),
90         () => new ThresholdAscentPolicy(10, 0.1),
91         () => new ThresholdAscentPolicy(10, 0.2),
92         () => new ThresholdAscentPolicy(50, 0.01),
93         () => new ThresholdAscentPolicy(50, 0.05),
94         () => new ThresholdAscentPolicy(50, 0.1),
95         () => new ThresholdAscentPolicy(50, 0.2),
96         () => new ThresholdAscentPolicy(100, 0.01),
97         () => new ThresholdAscentPolicy(100, 0.05),
98         () => new ThresholdAscentPolicy(100, 0.1),
99         () => new ThresholdAscentPolicy(100, 0.2),
100         () => new ThresholdAscentPolicy(500, 0.01),
101         () => new ThresholdAscentPolicy(500, 0.05),
102         () => new ThresholdAscentPolicy(500, 0.1),
103         () => new ThresholdAscentPolicy(500, 0.2),
104         //() => new ThresholdAscentPolicy(5000, 0.01),
105         //() => new ThresholdAscentPolicy(10000, 0.01),
106        };
107
108      var instanceFactories = new Func<Random, Tuple<IProblem, int>>[]
109      {
110        //(rand) => Tuple.Create((IProblem)new SantaFeAntProblem(), 17),
111        //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:0, correctReward:1, decoyReward:0, phrasesAsSets:false ), 15),
112        //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:0, correctReward:1, decoyReward:0, phrasesAsSets:true ), 15),
113        //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:200, correctReward:1, decoyReward:0.5, phrasesAsSets:false), 15),
114        //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:200, correctReward:1, decoyReward:0.5, phrasesAsSets:true), 15),
115        (rand) => Tuple.Create((IProblem)new SymbolicRegressionPoly10Problem(), 23)
116      };
117
118      foreach (var instanceFactory in instanceFactories) {
119        foreach (var useCanonical in new bool[] { true /*, false */}) {
120          foreach (var randomTries in new int[] { 0 /*, 1, 10 /*, /* 5, 100 /*, 500, 1000 */}) {
121            foreach (var policyFactory in policyFactories) {
122              var myRandomTries = randomTries;
123              var localRand = new Random(localRandSeed);
124              var options = new ParallelOptions();
125              options.MaxDegreeOfParallelism = 4;
126              Parallel.For(0, reps, options, (i) => {
127                Random myLocalRand;
128                lock (localRand)
129                  myLocalRand = new Random(localRand.Next());
130
131                int iterations = 0;
132                var globalStatistics = new SentenceSetStatistics();
133
134                // var problem = new SymbolicRegressionPoly10Problem();
135                // var problem = new SantaFeAntProblem();
136                //var problem = new PalindromeProblem();
137                //var problem = new HardPalindromeProblem();
138                //var problem = new RoyalPairProblem();
139                //var problem = new EvenParityProblem();
140                // var alg = new MctsSampler(problem.Item1, problem.Item2, myLocalRand, myRandomTries, policy());
141                var instance = instanceFactory(myLocalRand);
142                var problem = instance.Item1;
143                var maxLen = instance.Item2;
144                //var alg = new SequentialSearch(problem, maxLen, myLocalRand, myRandomTries,
145                //  new GenericGrammarPolicy(problem, policyFactory(), useCanonical));
146                var alg = new SequentialSearch(problem, maxLen, myLocalRand,
147                  myRandomTries,
148                  new GenericFunctionApproximationGrammarPolicy(problem,
149                    useCanonical));
150                //var alg = new ExhaustiveBreadthFirstSearch(problem, 25);
151                //var alg = new AlternativesContextSampler(problem, 25);
152
153                alg.SolutionEvaluated += (sentence, quality) => {
154                  iterations++;
155                  globalStatistics.AddSentence(sentence, quality);
156                  if (iterations % 1000 == 0) {
157                    Console.WriteLine("{0,3} {1,5} \"{2,25}\" {3} {4} {5}", i, myRandomTries, policyFactory(), useCanonical, problem.ToString(), globalStatistics);
158                  }
159                };
160                alg.FoundNewBestSolution += (sentence, quality) => {
161                  //Console.WriteLine("{0,5} {1,25} {2} {3}",
162                  //  myRandomTries, policyFactory(), useCanonical,
163                  //  globalStatistics);
164                };
165
166                alg.Run(maxIterations);
167              });
168            }
169          }
170        }
171      }
172    }
173
174    private static void RunDemo() {
175      // TODO: implement bridge to HL-GP
176      // TODO: unify MCTS, TD and ContextMCTS Solvers (stateInfos)
177      // TODO: test with eps-greedy using max instead of average as value (seems to work well for symb-reg! explore further!)
178      // TODO: separate value function from policy
179      // TODO: in contextual MCTS store a bandit info for each node in the _graph_ and also update all bandit infos of all parents
180      // TODO: exhaustive search with priority list
181      // TODO: warum funktioniert die alte Implementierung von GaussianThompson besser fÃŒr SantaFe als neue? Siehe Vergleich: alte vs. neue implementierung GaussianThompsonSampling
182      // TODO: why does GaussianThompsonSampling work so well with MCTS for the artificial ant problem?
183      // TODO: research thompson sampling for max bandit?
184      // TODO: ausfÃŒhrlicher test von strategien fÃŒr numCorrectPhrases-armed max bandit
185      // TODO: verify TA implementation using example from the original paper     
186      // TODO: separate policy from MCTS tree data structure to allow sharing of information over disconnected parts of the tree (semantic equivalence)
187      // TODO: implement thompson sampling for gaussian mixture models
188      // TODO: implement inspection for MCTS (eventuell interactive command line fÃŒr statistiken aus dem baum anzeigen)
189      // TODO: implement ACO-style bandit policy
190      // TODO: gleichzeitige modellierung von transformierter zielvariable (y, 1/y, log(y), exp(y), sqrt(y), ...)
191      // TODO: vergleich bei complete-randomly möglichst kurze sÀtze generieren vs. einfach zufÀllig alternativen wÀhlen
192      // TODO: reward discounting (fÃŒr verÀnderliche reward distributions ÃŒber zeit). speziellen unit-test dafÃŒr erstellen
193      // TODO: constant optimization
194
195
196      int maxIterations = 1000000;
197      int iterations = 0;
198      var sw = new Stopwatch();
199
200      var globalStatistics = new SentenceSetStatistics();
201      var random = new Random();
202
203
204      //var problem = new RoyalSequenceProblem(random, 10, 30, 2, 1, 0);
205      // var phraseLen = 3;
206      // var numPhrases = 5;
207      // var problem = new RoyalPhraseSequenceProblem(random, 10, numPhrases, phraseLen: phraseLen, numCorrectPhrases: 1, correctReward: 1, incorrectReward: 0.0, phrasesAsSets: false);
208
209      //var phraseLen = 3;
210      //var numPhrases = 5;
211      //var problem = new FindPhrasesProblem(random, 10, numPhrases, phraseLen, numOptimalPhrases: numPhrases, numDecoyPhrases: 0, correctReward: 1.0, decoyReward: 0, phrasesAsSets: false);
212
213      // good results for symb-reg
214      // prev results: e.g. 10 randomtries and EpsGreedyPolicy(0.2, (aInfo)=>aInfo.MaxReward)
215      // 2015 01 19: grid test with canonical states:
216      // - EpsGreedyPolicy(0.20,max)
217      // - GenericThompsonSamplingPolicy("")
218      // - UCTPolicy(0.10) (5 of 5 runs, 35000 iters avg.), 10 successful runs of 10 with rand-tries 0, bei 40000 iters 9 / 10, bei 30000 1 / 10
219      // 2015 01 22: symb-reg: grid test on find-phrases problem showed good results for UCB1TunedPolicy and SequentialSearch with canonical states
220      // - symb-reg: consistent results with UCB1Tuned. finds optimal solution in ~50k iters (new GenericGrammarPolicy(problem, new UCB1TunedPolicy(), true));
221      // 2015 01 23: grid test with canonical states:
222      // - UCTPolicy(0.10) und UCBNormalPolicy 10/10 optimale Lösungen bei max. 50k iters, etwas schlechter: generic-thompson with variable sigma und bolzmannexploration (100)
223
224
225      // good results for artificial ant:
226      // prev results:
227      // - var alg = new MctsSampler(problem, 17, random, 1, (rand, numActions) => new ThresholdAscentPolicy(numActions, 500, 0.01));
228      // - GaussianModelWithUnknownVariance (and Q= 0.99-quantil) also works well for Ant
229      // 2015 01 19: grid test with canonical states (non-canonical slightly worse)
230      // - ant: Threshold Ascent (best 100, 0.01; all variants relatively good)
231      // - ant: Policies where the variance has a large weight compared to the mean? (Gaussian(compatible), Gaussian with fixed variance, UCT with large c, alle TA)
232      // - ant: UCB1Tuned with canonical states also works very well for the artificial ant! constistent solutions in less than 10k iters     
233
234      var problem = new SymbolicRegressionPoly10Problem();
235      //var problem = new SantaFeAntProblem();
236      //var problem = new SymbolicRegressionProblem(random, "Tower");
237      //var problem = new PalindromeProblem();
238      //var problem = new HardPalindromeProblem();
239      //var problem = new RoyalPairProblem();
240      //var problem = new EvenParityProblem();
241      // symbreg length = 11 q = 0.824522210419616
242      //var alg = new MctsSampler(problem, 23, random, 0, new BoltzmannExplorationPolicy(100));
243      //var alg = new MctsSampler(problem, 23, random, 0, new EpsGreedyPolicy(0.1));
244      //var alg = new SequentialSearch(problem, 23, random, 0,
245      //  new HeuristicLab.Algorithms.Bandits.GrammarPolicies.QLearningGrammarPolicy(problem, new BoltzmannExplorationPolicy(10),
246      //    1, 1, true));
247      //var alg = new SequentialSearch(problem, 23, random, 0,
248      //  new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericContextualGrammarPolicy(problem, new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)), true));
249      var alg = new SequentialSearch(problem, 23, random, 0,
250        new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericFunctionApproximationGrammarPolicy(problem, true));
251      //var alg = new MctsQLearningSampler(problem, sentenceLen, random, 0, null);
252      //var alg = new MctsQLearningSampler(problem, 30, random, 0, new EpsGreedyPolicy(0.2));
253      //var alg = new MctsContextualSampler(problem, 23, random, 0); // must visit each canonical solution only once
254      //var alg = new TemporalDifferenceTreeSearchSampler(problem, 30, random, 1);
255      //var alg = new ExhaustiveBreadthFirstSearch(problem, 7);
256      //var alg = new AlternativesContextSampler(problem, random, 17, 4, (rand, numActions) => new RandomPolicy(rand, numActions));
257      //var alg = new ExhaustiveDepthFirstSearch(problem, 17);
258      // var alg = new AlternativesSampler(problem, 17);
259      // var alg = new RandomSearch(problem, random, 17);
260      //var alg = new ExhaustiveRandomFirstSearch(problem, random, 17);
261
262      alg.FoundNewBestSolution += (sentence, quality) => {
263        //Console.WriteLine("{0}", globalStatistics);
264        //Console.ReadLine();
265      };
266      alg.SolutionEvaluated += (sentence, quality) => {
267        iterations++;
268        globalStatistics.AddSentence(sentence, quality);
269
270        if (iterations % 1000 == 0) {
271          if (iterations % 10000 == 0) Console.Clear();
272          Console.SetCursorPosition(0, 0);
273          alg.PrintStats();
274        }
275
276        //Console.WriteLine(sentence);
277
278        //if (iterations % 10000 == 0) {
279        //  Console.WriteLine("{0}", globalStatistics);
280        //}
281      };
282
283
284      sw.Start();
285
286      alg.Run(maxIterations);
287
288      sw.Stop();
289
290      Console.Clear();
291      alg.PrintStats();
292      Console.WriteLine(globalStatistics);
293      Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol",
294        sw.Elapsed.TotalSeconds,
295        maxIterations / (double)sw.Elapsed.TotalSeconds,
296        (double)sw.ElapsedMilliseconds * 1000 / maxIterations);
297    }
298  }
299}
Note: See TracBrowser for help on using the repository browser.