1 | using System;
|
---|
2 | using System.Collections.Generic;
|
---|
3 | using System.Data;
|
---|
4 | using System.Diagnostics;
|
---|
5 | using System.Globalization;
|
---|
6 | using System.Linq;
|
---|
7 | using System.Text;
|
---|
8 | using System.Threading.Tasks;
|
---|
9 | using HeuristicLab.Algorithms.Bandits;
|
---|
10 | using HeuristicLab.Algorithms.Bandits.BanditPolicies;
|
---|
11 | using HeuristicLab.Algorithms.Bandits.Models;
|
---|
12 | using HeuristicLab.Algorithms.GrammaticalOptimization;
|
---|
13 | using HeuristicLab.Problems.GrammaticalOptimization;
|
---|
14 | using HeuristicLab.Problems.GrammaticalOptimization.SymbReg;
|
---|
15 |
|
---|
16 | namespace Main {
|
---|
17 | class Program {
|
---|
18 | static void Main(string[] args) {
|
---|
19 | CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;
|
---|
20 |
|
---|
21 | RunDemo();
|
---|
22 | //RunGridTest();
|
---|
23 | }
|
---|
24 |
|
---|
25 | private static void RunGridTest() {
|
---|
26 | int maxIterations = 200000; // for poly-10 with 50000 evaluations no successful try with hl yet
|
---|
27 | //var globalRandom = new Random(31415);
|
---|
28 | var localRandSeed = 31415;
|
---|
29 | var reps = 8;
|
---|
30 |
|
---|
31 | var policies = new Func<IBanditPolicy>[]
|
---|
32 | {
|
---|
33 | () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
|
---|
34 | () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
|
---|
35 | () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
|
---|
36 | () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
|
---|
37 | //() => new GaussianThompsonSamplingPolicy(),
|
---|
38 | () => new GaussianThompsonSamplingPolicy(true),
|
---|
39 | () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)),
|
---|
40 | () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)),
|
---|
41 | //() => new BernoulliThompsonSamplingPolicy(),
|
---|
42 | () => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)),
|
---|
43 | () => new RandomPolicy(),
|
---|
44 | () => new EpsGreedyPolicy(0.01),
|
---|
45 | () => new EpsGreedyPolicy(0.05),
|
---|
46 | () => new EpsGreedyPolicy(0.1),
|
---|
47 | () => new EpsGreedyPolicy(0.2),
|
---|
48 | () => new EpsGreedyPolicy(0.5),
|
---|
49 | () => new UCTPolicy(0.1),
|
---|
50 | () => new UCTPolicy(0.5),
|
---|
51 | () => new UCTPolicy(1),
|
---|
52 | () => new UCTPolicy(2),
|
---|
53 | () => new UCTPolicy( 5),
|
---|
54 | () => new UCTPolicy( 10),
|
---|
55 | () => new UCB1Policy(),
|
---|
56 | () => new UCB1TunedPolicy(),
|
---|
57 | () => new UCBNormalPolicy(),
|
---|
58 | () => new BoltzmannExplorationPolicy(0.1),
|
---|
59 | () => new BoltzmannExplorationPolicy(0.5),
|
---|
60 | () => new BoltzmannExplorationPolicy(1),
|
---|
61 | () => new BoltzmannExplorationPolicy(5),
|
---|
62 | () => new BoltzmannExplorationPolicy(10),
|
---|
63 | () => new BoltzmannExplorationPolicy(20),
|
---|
64 | () => new BoltzmannExplorationPolicy(100),
|
---|
65 | () => new ChernoffIntervalEstimationPolicy( 0.01),
|
---|
66 | () => new ChernoffIntervalEstimationPolicy( 0.05),
|
---|
67 | () => new ChernoffIntervalEstimationPolicy( 0.1),
|
---|
68 | () => new ChernoffIntervalEstimationPolicy( 0.2),
|
---|
69 | () => new ThresholdAscentPolicy(10, 0.01),
|
---|
70 | () => new ThresholdAscentPolicy(10, 0.05),
|
---|
71 | () => new ThresholdAscentPolicy(10, 0.1),
|
---|
72 | () => new ThresholdAscentPolicy(10, 0.2),
|
---|
73 | () => new ThresholdAscentPolicy(100, 0.01),
|
---|
74 | () => new ThresholdAscentPolicy(100, 0.05),
|
---|
75 | () => new ThresholdAscentPolicy(100, 0.1),
|
---|
76 | () => new ThresholdAscentPolicy(100, 0.2),
|
---|
77 | () => new ThresholdAscentPolicy(1000, 0.01),
|
---|
78 | () => new ThresholdAscentPolicy(1000, 0.05),
|
---|
79 | () => new ThresholdAscentPolicy(1000, 0.1),
|
---|
80 | () => new ThresholdAscentPolicy(1000, 0.2),
|
---|
81 | () => new ThresholdAscentPolicy(5000, 0.01),
|
---|
82 | () => new ThresholdAscentPolicy(10000, 0.01),
|
---|
83 | };
|
---|
84 |
|
---|
85 | foreach (var problem in new Tuple<IProblem, int>[]
|
---|
86 | {
|
---|
87 | //Tuple.Create((IProblem)new SantaFeAntProblem(), 17),
|
---|
88 | Tuple.Create((IProblem)new SymbolicRegressionPoly10Problem(), 23),
|
---|
89 | })
|
---|
90 | foreach (var randomTries in new int[] { 0, 1, 10, /* 5, 100 /*, 500, 1000 */}) {
|
---|
91 | foreach (var policy in policies) {
|
---|
92 | var myRandomTries = randomTries;
|
---|
93 | var localRand = new Random(localRandSeed);
|
---|
94 | var options = new ParallelOptions();
|
---|
95 | options.MaxDegreeOfParallelism = 4;
|
---|
96 | Parallel.For(0, reps, options, (i) => {
|
---|
97 | //var t = Task.Run(() => {
|
---|
98 | Random myLocalRand;
|
---|
99 | lock (localRand)
|
---|
100 | myLocalRand = new Random(localRand.Next());
|
---|
101 |
|
---|
102 | //for (int i = 0; i < reps; i++) {
|
---|
103 |
|
---|
104 | int iterations = 0;
|
---|
105 | var globalStatistics = new SentenceSetStatistics();
|
---|
106 |
|
---|
107 | // var problem = new SymbolicRegressionPoly10Problem();
|
---|
108 | // var problem = new SantaFeAntProblem();
|
---|
109 | //var problem = new PalindromeProblem();
|
---|
110 | //var problem = new HardPalindromeProblem();
|
---|
111 | //var problem = new RoyalPairProblem();
|
---|
112 | //var problem = new EvenParityProblem();
|
---|
113 | var alg = new MctsSampler(problem.Item1, problem.Item2, myLocalRand, myRandomTries, policy()); // TODO: Make sure we generate the same random numbers for each experiment
|
---|
114 | //var alg = new ExhaustiveBreadthFirstSearch(problem, 25);
|
---|
115 | //var alg = new AlternativesContextSampler(problem, 25);
|
---|
116 |
|
---|
117 | alg.SolutionEvaluated += (sentence, quality) => {
|
---|
118 | iterations++;
|
---|
119 | globalStatistics.AddSentence(sentence, quality);
|
---|
120 | if (iterations % 10000 == 0) {
|
---|
121 | Console.WriteLine("{0,4} {1,7} {2,5} {3,25} {4}", alg.treeDepth, alg.treeSize, myRandomTries, policy(), globalStatistics);
|
---|
122 | }
|
---|
123 | };
|
---|
124 |
|
---|
125 |
|
---|
126 | alg.Run(maxIterations);
|
---|
127 |
|
---|
128 | //Console.WriteLine("{0,5} {1} {2}", randomTries, policyFactory(1), globalStatistics);
|
---|
129 | //}
|
---|
130 | //});
|
---|
131 | //tasks.Add(t);
|
---|
132 | });
|
---|
133 | }
|
---|
134 | }
|
---|
135 | //Task.WaitAll(tasks.ToArray());
|
---|
136 | }
|
---|
137 |
|
---|
138 | private static void RunDemo() {
|
---|
139 | // TODO: unify MCTS, TD and ContextMCTS Solvers (stateInfos)
|
---|
140 | // TODO: test with eps-greedy using max instead of average as value (seems to work well for symb-reg! explore further!)
|
---|
141 | // TODO: separate value function from policy
|
---|
142 | // TODO: in contextual MCTS store a bandit info for each node in the _graph_ and also update all bandit infos of all parents
|
---|
143 | // TODO: exhaustive search with priority list
|
---|
144 | // TODO: warum funktioniert die alte Implementierung von GaussianThompson besser für SantaFe als neue? Siehe Vergleich: alte vs. neue implementierung GaussianThompsonSampling
|
---|
145 | // TODO: why does GaussianThompsonSampling work so well with MCTS for the artificial ant problem?
|
---|
146 | // TODO: wie kann ich sampler noch vergleichen bzw. was kann man messen um die qualität des samplers abzuschätzen (bis auf qualität und iterationen bis zur besten lösung) => ziel schnellere iterationen zu gutem ergebnis
|
---|
147 | // TODO: research thompson sampling for max bandit?
|
---|
148 | // TODO: ausführlicher test von strategien für numCorrectPhrases-armed max bandit
|
---|
149 | // TODO: verify TA implementation using example from the original paper
|
---|
150 | // TODO: separate policy from MCTS tree data structure to allow sharing of information over disconnected parts of the tree (semantic equivalence)
|
---|
151 | // TODO: implement thompson sampling for gaussian mixture models
|
---|
152 | // TODO: implement inspection for MCTS (eventuell interactive command line für statistiken aus dem baum anzeigen)
|
---|
153 | // TODO: implement ACO-style bandit policy
|
---|
154 | // TODO: gleichzeitige modellierung von transformierter zielvariable (y, 1/y, log(y), exp(y), sqrt(y), ...)
|
---|
155 | // TODO: vergleich bei complete-randomly möglichst kurze sätze generieren vs. einfach zufällig alternativen wählen
|
---|
156 | // TODO: reward discounting (für veränderliche reward distributions über zeit). speziellen unit-test dafür erstellen
|
---|
157 | // TODO: constant optimization
|
---|
158 |
|
---|
159 |
|
---|
160 | int maxIterations = 100000;
|
---|
161 | int iterations = 0;
|
---|
162 | var sw = new Stopwatch();
|
---|
163 | double bestQuality = 0;
|
---|
164 | string bestSentence = "";
|
---|
165 | var globalStatistics = new SentenceSetStatistics();
|
---|
166 | var random = new Random();
|
---|
167 |
|
---|
168 | //var phraseLen = 3;
|
---|
169 | //var numPhrases = 5;
|
---|
170 | //var problem = new RoyalPhraseSequenceProblem(random, 10, numPhrases, phraseLen: phraseLen, numCorrectPhrases: 1, correctReward: 1, incorrectReward: 0.0, phrasesAsSets: true);
|
---|
171 |
|
---|
172 | //var phraseLen = 4;
|
---|
173 | //var numPhrases = 5;
|
---|
174 | //var problem = new FindPhrasesProblem(random, 15, numPhrases, phraseLen, numOptimalPhrases: numPhrases, numDecoyPhrases: 500, correctReward: 1.0, decoyReward: 0.2, phrasesAsSets: true);
|
---|
175 |
|
---|
176 | var problem = new SymbolicRegressionPoly10Problem(); // good results e.g. 10 randomtries and EpsGreedyPolicy(0.2, (aInfo)=>aInfo.MaxReward)
|
---|
177 | // Ant
|
---|
178 | // good results e.g. with var alg = new MctsSampler(problem, 17, random, 1, (rand, numActions) => new ThresholdAscentPolicy(numActions, 500, 0.01));
|
---|
179 | // GaussianModelWithUnknownVariance (and Q= 0.99-quantil) also works well for Ant
|
---|
180 | //var problem = new SantaFeAntProblem();
|
---|
181 | //var problem = new SymbolicRegressionProblem("Tower");
|
---|
182 | //var problem = new PalindromeProblem();
|
---|
183 | //var problem = new HardPalindromeProblem();
|
---|
184 | //var problem = new RoyalPairProblem();
|
---|
185 | //var problem = new EvenParityProblem();
|
---|
186 | // symbreg length = 11 q = 0.824522210419616
|
---|
187 | //var alg = new MctsSampler(problem, 23, random, 0, new BoltzmannExplorationPolicy(100));
|
---|
188 | var alg = new MctsSampler(problem, 23, random, 0, new EpsGreedyPolicy(0.1));
|
---|
189 | //var alg = new MctsQLearningSampler(problem, sentenceLen, random, 0, null);
|
---|
190 | //var alg = new MctsQLearningSampler(problem, 30, random, 0, new EpsGreedyPolicy(0.2));
|
---|
191 | //var alg = new MctsContextualSampler(problem, 23, random, 0); // must visit each canonical solution only once
|
---|
192 | //var alg = new TemporalDifferenceTreeSearchSampler(problem, 30, random, 1);
|
---|
193 | //var alg = new ExhaustiveBreadthFirstSearch(problem, 7);
|
---|
194 | //var alg = new AlternativesContextSampler(problem, random, 17, 4, (rand, numActions) => new RandomPolicy(rand, numActions));
|
---|
195 | //var alg = new ExhaustiveDepthFirstSearch(problem, 17);
|
---|
196 | // var alg = new AlternativesSampler(problem, 17);
|
---|
197 | // var alg = new RandomSearch(problem, random, 17);
|
---|
198 | //var alg = new ExhaustiveRandomFirstSearch(problem, random, 17);
|
---|
199 |
|
---|
200 | alg.FoundNewBestSolution += (sentence, quality) => {
|
---|
201 | bestQuality = quality;
|
---|
202 | bestSentence = sentence;
|
---|
203 | //Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
|
---|
204 | //Console.ReadLine();
|
---|
205 | };
|
---|
206 | alg.SolutionEvaluated += (sentence, quality) => {
|
---|
207 | iterations++;
|
---|
208 | globalStatistics.AddSentence(sentence, quality);
|
---|
209 | if (iterations % 100 == 0) {
|
---|
210 | //if (iterations % 1000 == 0) Console.Clear();
|
---|
211 | Console.SetCursorPosition(0, 0);
|
---|
212 | alg.PrintStats();
|
---|
213 | }
|
---|
214 | //Console.WriteLine(sentence);
|
---|
215 |
|
---|
216 | if (iterations % 10000 == 0) {
|
---|
217 | //Console.WriteLine("{0,4} {1,7} {2}", alg.treeDepth, alg.treeSize, globalStatistics);
|
---|
218 | }
|
---|
219 | };
|
---|
220 |
|
---|
221 |
|
---|
222 | sw.Start();
|
---|
223 |
|
---|
224 | alg.Run(maxIterations);
|
---|
225 |
|
---|
226 | sw.Stop();
|
---|
227 |
|
---|
228 | Console.WriteLine("{0,10} Best soultion: {1,10:F5} {2}", iterations, bestQuality, bestSentence);
|
---|
229 | Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol",
|
---|
230 | sw.Elapsed.TotalSeconds,
|
---|
231 | maxIterations / (double)sw.Elapsed.TotalSeconds,
|
---|
232 | (double)sw.ElapsedMilliseconds * 1000 / maxIterations);
|
---|
233 | }
|
---|
234 | }
|
---|
235 | }
|
---|