Free cookie consent management tool by TermsFeed Policy Generator

Ignore:
Timestamp:
02/11/15 20:11:35 (10 years ago)
Author:
gkronber
Message:

#2283: cleanup and included HeuristicLab.dlls to create a self-contained branch

Location:
branches/HeuristicLab.Problems.GrammaticalOptimization/Main
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • branches/HeuristicLab.Problems.GrammaticalOptimization/Main/Main.csproj

    r11977 r11981  
    4848      <Name>HeuristicLab.Algorithms.Bandits</Name>
    4949    </ProjectReference>
    50     <ProjectReference Include="..\HeuristicLab.Algorithms.GeneticProgramming\HeuristicLab.Algorithms.GeneticProgramming.csproj">
    51       <Project>{14BEC23F-63FD-4954-B8AE-E2F4962E9B57}</Project>
    52       <Name>HeuristicLab.Algorithms.GeneticProgramming</Name>
    53     </ProjectReference>
    5450    <ProjectReference Include="..\HeuristicLab.Algorithms.GrammaticalOptimization\HeuristicLab.Algorithms.GrammaticalOptimization.csproj">
    5551      <Project>{eea07488-1a51-412a-a52c-53b754a628b3}</Project>
    5652      <Name>HeuristicLab.Algorithms.GrammaticalOptimization</Name>
    57     </ProjectReference>
    58     <ProjectReference Include="..\HeuristicLab.Common\HeuristicLab.Common.csproj">
    59       <Project>{3A2FBBCB-F9DF-4970-87F3-F13337D941AD}</Project>
    60       <Name>HeuristicLab.Common</Name>
    61     </ProjectReference>
    62     <ProjectReference Include="..\HeuristicLab.Distributions\HeuristicLab.Distributions.csproj">
    63       <Project>{31171165-E16F-4A1A-A8AB-25C6AB3A71B9}</Project>
    64       <Name>HeuristicLab.Distributions</Name>
    65     </ProjectReference>
    66     <ProjectReference Include="..\HeuristicLab.Problems.GrammaticalOptimization.SymbReg\HeuristicLab.Problems.GrammaticalOptimization.SymbReg.csproj">
    67       <Project>{17A7A380-86CE-482D-8D22-CBD70CC97F0D}</Project>
    68       <Name>HeuristicLab.Problems.GrammaticalOptimization.SymbReg</Name>
    6953    </ProjectReference>
    7054    <ProjectReference Include="..\HeuristicLab.Problems.GrammaticalOptimization\HeuristicLab.Problems.GrammaticalOptimization.csproj">
  • branches/HeuristicLab.Problems.GrammaticalOptimization/Main/Program.cs

    r11980 r11981  
    11using System;
    2 using System.Collections.Generic;
    32using System.Diagnostics;
    43using System.Globalization;
    5 using System.Runtime.Remoting.Messaging;
    6 using System.Text;
    7 using System.Threading;
    8 using System.Threading.Tasks;
    9 using HeuristicLab.Algorithms.Bandits;
    104using HeuristicLab.Algorithms.Bandits.BanditPolicies;
    11 using HeuristicLab.Algorithms.Bandits.GrammarPolicies;
    12 using HeuristicLab.Algorithms.Bandits.Models;
    13 using HeuristicLab.Algorithms.GeneticProgramming;
    145using HeuristicLab.Algorithms.GrammaticalOptimization;
    15 using HeuristicLab.Common;
    166using HeuristicLab.Problems.GrammaticalOptimization;
    17 using HeuristicLab.Problems.GrammaticalOptimization.SymbReg;
    18 using BoltzmannExplorationPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.BoltzmannExplorationPolicy;
    19 using EpsGreedyPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.EpsGreedyPolicy;
    20 using IProblem = HeuristicLab.Problems.GrammaticalOptimization.IProblem;
    21 using RandomPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.RandomPolicy;
    22 using UCTPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.UCTPolicy;
     7
     8// NOTES: gkronber
     9// TODO: feature extraction for full symbolic expressions and experiment for all benchmark problems
     10// TODO: why does GaussianThompsonSampling work so well with MCTS for the artificial ant problem?
     11// TODO: research thompson sampling for max bandit?
     12// TODO: verify TA implementation using example from the original paper     
     13// TODO: implement thompson sampling for gaussian mixture models
     14// TODO: gleichzeitige modellierung von transformierter zielvariable (y, 1/y, log(y), exp(y), sqrt(y), ...)
     15// TODO: vergleich bei complete-randomly möglichst kurze sÀtze generieren vs. einfach zufÀllig alternativen wÀhlen
     16// TODO: reward discounting (fÃŒr verÀnderliche reward distributions ÃŒber zeit). speziellen unit-test dafÃŒr erstellen
     17// TODO: constant optimization
     18
    2319
    2420namespace Main {
     
    2723      CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;
    2824
    29       //RunDemo();
    30       //RunGpDemo();
    31       // RunGridTest();
    32       //RunGpGridTest();
    33       RunFunApproxTest();
     25      RunDemo();
    3426    }
    3527
    36     private static void RunGridTest() {
    37       int maxIterations = 200000; // for poly-10 with 50000 evaluations no successful try with hl yet
    38       //var globalRandom = new Random(31415);
    39       var localRandSeed = new Random().Next();
    40       var reps = 20;
    41 
    42       var policyFactories = new Func<IBanditPolicy>[]
    43         {
    44          //() => new RandomPolicy(),
    45          // () => new ActiveLearningPolicy(), 
    46          //() => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
    47          //() => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
    48          //() => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
    49          //() => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
    50          ////() => new GaussianThompsonSamplingPolicy(),
    51          //() => new GaussianThompsonSamplingPolicy(true),
    52          //() => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)),
    53          //() => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)),
    54          ////() => new BernoulliThompsonSamplingPolicy(),
    55          //() => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)),
    56          //() => new EpsGreedyPolicy(0.01),
    57          //() => new EpsGreedyPolicy(0.05),
    58          //() => new EpsGreedyPolicy(0.1),
    59          //() => new EpsGreedyPolicy(0.2),
    60          //() => new EpsGreedyPolicy(0.5),
    61          //() => new UCTPolicy(0.01),
    62          //() => new UCTPolicy(0.05),
    63          //() => new UCTPolicy(0.1),
    64          //() => new UCTPolicy(0.5),
    65          //() => new UCTPolicy(1),
    66          //() => new UCTPolicy(2),
    67          //() => new UCTPolicy( 5),
    68          //() => new UCTPolicy( 10),
    69          //() => new ModifiedUCTPolicy(0.01),
    70          //() => new ModifiedUCTPolicy(0.05),
    71          //() => new ModifiedUCTPolicy(0.1),
    72          //() => new ModifiedUCTPolicy(0.5),
    73          //() => new ModifiedUCTPolicy(1),
    74          //() => new ModifiedUCTPolicy(2),
    75          //() => new ModifiedUCTPolicy( 5),
    76          //() => new ModifiedUCTPolicy( 10),
    77          //() => new UCB1Policy(),
    78          //() => new UCB1TunedPolicy(),
    79          //() => new UCBNormalPolicy(),
    80          //() => new BoltzmannExplorationPolicy(1),
    81          //() => new BoltzmannExplorationPolicy(10),
    82          //() => new BoltzmannExplorationPolicy(20),
    83          //() => new BoltzmannExplorationPolicy(100),
    84          //() => new BoltzmannExplorationPolicy(200),
    85          //() => new BoltzmannExplorationPolicy(500),
    86          // () => new ChernoffIntervalEstimationPolicy( 0.01),
    87          // () => new ChernoffIntervalEstimationPolicy( 0.05),
    88          // () => new ChernoffIntervalEstimationPolicy( 0.1),
    89          // () => new ChernoffIntervalEstimationPolicy( 0.2),
    90          //() => new ThresholdAscentPolicy(5, 0.01),
    91          //() => new ThresholdAscentPolicy(5, 0.05),
    92          //() => new ThresholdAscentPolicy(5, 0.1),
    93          //() => new ThresholdAscentPolicy(5, 0.2),
    94          //() => new ThresholdAscentPolicy(10, 0.01),
    95          //() => new ThresholdAscentPolicy(10, 0.05),
    96          //() => new ThresholdAscentPolicy(10, 0.1),
    97          //() => new ThresholdAscentPolicy(10, 0.2),
    98          //() => new ThresholdAscentPolicy(50, 0.01),
    99          //() => new ThresholdAscentPolicy(50, 0.05),
    100          //() => new ThresholdAscentPolicy(50, 0.1),
    101          //() => new ThresholdAscentPolicy(50, 0.2),
    102          //() => new ThresholdAscentPolicy(100, 0.01),
    103          () => new ThresholdAscentPolicy(100, 0.05),
    104          //() => new ThresholdAscentPolicy(100, 0.1),
    105          //() => new ThresholdAscentPolicy(100, 0.2),
    106          //() => new ThresholdAscentPolicy(500, 0.01),
    107          //() => new ThresholdAscentPolicy(500, 0.05),
    108          //() => new ThresholdAscentPolicy(500, 0.1),
    109          //() => new ThresholdAscentPolicy(500, 0.2),
    110          //() => new ThresholdAscentPolicy(5000, 0.01),
    111          //() => new ThresholdAscentPolicy(10000, 0.01),
    112         };
    113 
    114       var instanceFactories = new Func<Random, Tuple<IProblem, int>>[]
    115       {
    116         //(rand) => Tuple.Create((IProblem)new SantaFeAntProblem(), 17),
    117         //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:0, correctReward:1, decoyReward:0, phrasesAsSets:false ), 15),
    118         //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:0, correctReward:1, decoyReward:0, phrasesAsSets:true ), 15),
    119         //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:200, correctReward:1, decoyReward:0.5, phrasesAsSets:false), 15),
    120         //(rand) => Tuple.Create((IProblem)new FindPhrasesProblem(rand, 10, numPhrases:5, phraseLen:3, numOptimalPhrases:5, numDecoyPhrases:200, correctReward:1, decoyReward:0.5, phrasesAsSets:true), 15),
    121         (rand) => Tuple.Create((IProblem)new SymbolicRegressionPoly10Problem(), 23)
    122       };
    123 
    124       foreach (var instanceFactory in instanceFactories) {
    125         foreach (var useCanonical in new bool[] { true /*, false */ }) {
    126           foreach (var randomTries in new int[] { 1 /*, 1, 10 /*, /* 5, 100 /*, 500, 1000 */}) {
    127             foreach (var policyFactory in policyFactories) {
    128               var myRandomTries = randomTries;
    129               var localRand = new Random(localRandSeed);
    130               var options = new ParallelOptions();
    131               options.MaxDegreeOfParallelism = 1;
    132               Parallel.For(0, reps, options, (i) => {
    133                 Random myLocalRand;
    134                 lock (localRand)
    135                   myLocalRand = new Random(localRand.Next());
    136 
    137                 int iterations = 0;
    138                 var globalStatistics = new SentenceSetStatistics();
    139 
    140                 // var problem = new SymbolicRegressionPoly10Problem();
    141                 // var problem = new SantaFeAntProblem();
    142                 //var problem = new PalindromeProblem();
    143                 //var problem = new HardPalindromeProblem();
    144                 //var problem = new RoyalPairProblem();
    145                 //var problem = new EvenParityProblem();
    146                 // var alg = new MctsSampler(problem.Item1, problem.Item2, myLocalRand, myRandomTries, policy());
    147                 var instance = instanceFactory(myLocalRand);
    148                 var problem = instance.Item1;
    149                 var maxLen = instance.Item2;
    150                 var alg = new SequentialSearch(problem, maxLen, myLocalRand, myRandomTries,
    151                   new GenericGrammarPolicy(problem, policyFactory(), useCanonical));
    152                 // var alg = new SequentialSearch(problem, maxLen, myLocalRand,
    153                 //   myRandomTries,
    154                 //   new GenericFunctionApproximationGrammarPolicy(problem,
    155                 //     useCanonical));
    156                 //var alg = new ExhaustiveBreadthFirstSearch(problem, 25);
    157                 //var alg = new AlternativesContextSampler(problem, 25);
    158 
    159                 alg.SolutionEvaluated += (sentence, quality) => {
    160                   iterations++;
    161                   globalStatistics.AddSentence(sentence, quality);
    162                   if (iterations % 1000 == 0) {
    163                     Console.WriteLine("{0,3} {1,5} \"{2,25}\" {3} {4} {5}", i, myRandomTries, policyFactory(), useCanonical, problem.ToString(), globalStatistics);
    164                   }
    165                 };
    166                 alg.FoundNewBestSolution += (sentence, quality) => {
    167                   //Console.WriteLine("{0,5} {1,25} {2} {3}",
    168                   //  myRandomTries, policyFactory(), useCanonical,
    169                   //  globalStatistics);
    170                 };
    171 
    172                 alg.Run(maxIterations);
    173               });
    174             }
    175           }
    176         }
    177       }
    178     }
    17928
    18029    private static void RunDemo() {
    181       // TODO: cleanup nach EuroCAST
    182       // TODO: why does GaussianThompsonSampling work so well with MCTS for the artificial ant problem?
    183       // TODO: research thompson sampling for max bandit?
    184       // TODO: verify TA implementation using example from the original paper     
    185       // TODO: implement thompson sampling for gaussian mixture models
    186       // TODO: gleichzeitige modellierung von transformierter zielvariable (y, 1/y, log(y), exp(y), sqrt(y), ...)
    187       // TODO: vergleich bei complete-randomly möglichst kurze sÀtze generieren vs. einfach zufÀllig alternativen wÀhlen
    188       // TODO: reward discounting (fÃŒr verÀnderliche reward distributions ÃŒber zeit). speziellen unit-test dafÃŒr erstellen
    189       // TODO: constant optimization
    19030
    19131
    192       int maxIterations = 1000000;
     32      int maxIterations = 100000;
    19333      int iterations = 0;
    194       var sw = new Stopwatch();
    19534
    19635      var globalStatistics = new SentenceSetStatistics();
    19736      var random = new Random();
    19837
    199 
    200       //var problem = new RoyalSequenceProblem(random, 10, 30, 2, 1, 0);
    201       // var phraseLen = 3;
    202       // var numPhrases = 5;
    203       // var problem = new RoyalPhraseSequenceProblem(random, 10, numPhrases, phraseLen: phraseLen, numCorrectPhrases: 1, correctReward: 1, incorrectReward: 0.0, phrasesAsSets: false);
    204 
    205       //var phraseLen = 3;
    206       //var numPhrases = 5;
    207       //var problem = new FindPhrasesProblem(random, 10, numPhrases, phraseLen, numOptimalPhrases: numPhrases, numDecoyPhrases: 0, correctReward: 1.0, decoyReward: 0, phrasesAsSets: false);
    208 
    209       // good results for symb-reg
    210       // prev results: e.g. 10 randomtries and EpsGreedyPolicy(0.2, (aInfo)=>aInfo.MaxReward)
    211       // 2015 01 19: grid test with canonical states:
    212       // - EpsGreedyPolicy(0.20,max)
    213       // - GenericThompsonSamplingPolicy("")
    214       // - UCTPolicy(0.10) (5 of 5 runs, 35000 iters avg.), 10 successful runs of 10 with rand-tries 0, bei 40000 iters 9 / 10, bei 30000 1 / 10
    215       // 2015 01 22: symb-reg: grid test on find-phrases problem showed good results for UCB1TunedPolicy and SequentialSearch with canonical states
    216       // - symb-reg: consistent results with UCB1Tuned. finds optimal solution in ~50k iters (new GenericGrammarPolicy(problem, new UCB1TunedPolicy(), true));
    217       // 2015 01 23: grid test with canonical states:
    218       // - UCTPolicy(0.10) und UCBNormalPolicy 10/10 optimale Lösungen bei max. 50k iters, etwas schlechter: generic-thompson with variable sigma und bolzmannexploration (100)
    219 
    220 
    221       // good results for artificial ant:
    222       // prev results:
    223       // - var alg = new MctsSampler(problem, 17, random, 1, (rand, numActions) => new ThresholdAscentPolicy(numActions, 500, 0.01));
    224       // - GaussianModelWithUnknownVariance (and Q= 0.99-quantil) also works well for Ant
    225       // 2015 01 19: grid test with canonical states (non-canonical slightly worse)
    226       // - ant: Threshold Ascent (best 100, 0.01; all variants relatively good)
    227       // - ant: Policies where the variance has a large weight compared to the mean? (Gaussian(compatible), Gaussian with fixed variance, UCT with large c, alle TA)
    228       // - ant: UCB1Tuned with canonical states also works very well for the artificial ant! constistent solutions in less than 10k iters     
    229 
    230       //var problem = new SymbolicRegressionPoly10Problem();
    231       //var problem = new SantaFeAntProblem();
    232       var problem = new SymbolicRegressionProblem(random, "Breiman");
    233       //var problem = new PalindromeProblem();
    234       //var problem = new HardPalindromeProblem();
     38      var problem = new SymbolicRegressionPoly10Problem();
     39      //var problem = new SantaFeAntProblem();             
    23540      //var problem = new RoyalPairProblem();
    23641      //var problem = new EvenParityProblem();
    237       // symbreg length = 11 q = 0.824522210419616
    238       //var alg = new MctsSampler(problem, 23, random, 0, new BoltzmannExplorationPolicy(100));
    239       //var alg = new MctsSampler(problem, 23, random, 0, new EpsGreedyPolicy(0.1));
    240       //var alg = new SequentialSearch(problem, 23, random, 0,
    241       //  new HeuristicLab.Algorithms.Bandits.GrammarPolicies.QLearningGrammarPolicy(problem, new BoltzmannExplorationPolicy(10),
    242       //    1, 1, true));
    243       //var alg = new SequentialSearch(problem, 23, random, 0,
    244       //  new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericContextualGrammarPolicy(problem, new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)), true));
    245       var alg = new SequentialSearch(problem, 30, random, 0,
    246         new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericFunctionApproximationGrammarPolicy(problem, true));
    247       //var alg = new MctsQLearningSampler(problem, sentenceLen, random, 0, null);
    248       //var alg = new MctsQLearningSampler(problem, 30, random, 0, new EpsGreedyPolicy(0.2));
    249       //var alg = new MctsContextualSampler(problem, 23, random, 0); // must visit each canonical solution only once
    250       //var alg = new TemporalDifferenceTreeSearchSampler(problem, 30, random, 1);
    251       //var alg = new ExhaustiveBreadthFirstSearch(problem, 7);
    252       //var alg = new AlternativesContextSampler(problem, random, 17, 4, (rand, numActions) => new RandomPolicy(rand, numActions));
    253       //var alg = new ExhaustiveDepthFirstSearch(problem, 17);
    254       // var alg = new AlternativesSampler(problem, 17);
    255       // var alg = new RandomSearch(problem, random, 17);
    256       //var alg = new ExhaustiveRandomFirstSearch(problem, random, 17);
     42      var alg = new SequentialSearch(problem, 23, random, 0,
     43       new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericGrammarPolicy(problem, new UCB1TunedPolicy()));
     44
    25745
    25846      alg.FoundNewBestSolution += (sentence, quality) => {
    25947        //Console.WriteLine("{0}", globalStatistics);
    260         //Console.ReadLine();
    26148      };
     49
    26250      alg.SolutionEvaluated += (sentence, quality) => {
    26351        iterations++;
    26452        globalStatistics.AddSentence(sentence, quality);
    26553
    266         //if (iterations % 100 == 0) {
    267         //  if (iterations % 10000 == 0) Console.Clear();
    268         //  Console.SetCursorPosition(0, 0);
    269         //  alg.PrintStats();
    270         //}
     54        // comment this if you don't want to see solver statistics
     55        if (iterations % 100 == 0) {
     56          if (iterations % 10000 == 0) Console.Clear();
     57          Console.SetCursorPosition(0, 0);
     58          alg.PrintStats();
     59        }
    27160
    272         //Console.WriteLine(sentence);
    273 
    274         if (iterations % 100 == 0) {
    275           Console.WriteLine("{0}", globalStatistics);
    276         }
     61        // uncomment this if you want to collect statistics of the generated sentences
     62        // if (iterations % 1000 == 0) {
     63        //   Console.WriteLine("{0}", globalStatistics);
     64        // }
    27765      };
    27866
    279 
     67      var sw = new Stopwatch();
    28068      sw.Start();
    281 
    28269      alg.Run(maxIterations);
    283 
    28470      sw.Stop();
    28571
     
    29278        (double)sw.ElapsedMilliseconds * 1000 / maxIterations);
    29379    }
    294 
    295     public static void RunGpDemo() {
    296       int iterations = 0;
    297       const int seed = 31415;
    298       const int maxIterations = 100000;
    299 
    300       //var prob = new SymbolicRegressionProblem(new Random(31415), "Tower");
    301       var prob = new SymbolicRegressionPoly10Problem();
    302       var sgp = new OffspringSelectionGP(prob, new Random(seed), true);
    303       RunGP(sgp, prob, 200000, 500, 0.15, 50);
    304     }
    305 
    306 
    307     private static void RunFunApproxTest() {
    308       const int nReps = 30;
    309       const int seed = 31415;
    310       //const int maxIters = 50000;
    311       var rand = new Random();
    312       var problemFactories = new Func<Tuple<int, int, ISymbolicExpressionTreeProblem>>[]
    313       {
    314         () => Tuple.Create(100000, 23,  (ISymbolicExpressionTreeProblem)new SymbolicRegressionPoly10Problem()),
    315         //() => Tuple.Create(100000, 17, (ISymbolicExpressionTreeProblem)new SantaFeAntProblem()),
    316         //() => Tuple.Create(50000, 32,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
    317         //() => Tuple.Create(50000, 64, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
    318         //() => Tuple.Create(50000, 64,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
    319         //() => Tuple.Create(50000, 128, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
    320         //() => Tuple.Create(50000, 128,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
    321         //() => Tuple.Create(50000, 256, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
    322         //() => Tuple.Create(50000, 256,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
    323         //() => new RoyalPairProblem(),
    324         //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 0, 1, 0, true),
    325         //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 0, 1, 0, false),
    326         //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 50, 1, 0.8, false),
    327       };
    328 
    329       // skip experiments that are already done
    330       for (int i = 0; i < nReps; i++) {
    331         foreach (var problemFactory in problemFactories) {
    332           {
    333             var solverSeed = rand.Next();
    334             var tupel = problemFactory();
    335             var maxIters = tupel.Item1;
    336             var maxSize = tupel.Item2;
    337             var prob = tupel.Item3;
    338 
    339             var alg = new SequentialSearch(prob, maxSize, new Random(solverSeed), 0,
    340           new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericFunctionApproximationGrammarPolicy(prob, true));
    341 
    342             int iterations = 0;
    343             double bestQuality = double.NegativeInfinity;
    344             var globalStatistics = new SentenceSetStatistics();
    345             var algName = alg.GetType().Name;
    346             var probName = prob.GetType().Name;
    347             alg.SolutionEvaluated += (sentence, quality) => {
    348               bestQuality = Math.Max(bestQuality, quality);
    349               iterations++;
    350               globalStatistics.AddSentence(sentence, quality);
    351               //if (iterations % 100 == 0) {
    352               //  Console.Clear();
    353               //  Console.SetCursorPosition(0, 0);
    354               //  alg.PrintStats();
    355               //}
    356               //Console.WriteLine("{0:N5} {1}", quality, sentence);
    357               if (iterations % 1000 == 0) {
    358                 Console.WriteLine("\"{0,25}\" {1} \"{2,25}\" {3}", algName, maxSize, probName, globalStatistics);
    359                 if (bestQuality.IsAlmost(1.0)) {
    360                   alg.StopRequested = true;
    361                 }
    362               }
    363             };
    364 
    365             alg.Run(maxIters);
    366 
    367 
    368             while (iterations < maxIters) {
    369               iterations++;
    370               globalStatistics.AddSentence("BEST", bestQuality);
    371               if (iterations % 1000 == 0) {
    372                 Console.WriteLine("\"{0,25}\" {1} \"{2,25}\" {3}", algName, maxSize, probName, globalStatistics);
    373                 if (bestQuality.IsAlmost(1.0)) {
    374                   alg.StopRequested = true;
    375                 }
    376               }
    377             }
    378           }
    379         }
    380       }
    381     }
    382 
    383     private static void RunGpGridTest() {
    384       const int nReps = 20;
    385       const int seed = 31415;
    386       //const int maxIters = 50000;
    387       var rand = new Random(seed);
    388       var problemFactories = new Func<Tuple<int, int, ISymbolicExpressionTreeProblem>>[]
    389       {
    390         () => Tuple.Create(50000, 32, (ISymbolicExpressionTreeProblem)new PermutationProblem()),
    391         () => Tuple.Create(50000, 32, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
    392         () => Tuple.Create(50000, 32,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
    393         () => Tuple.Create(50000, 64, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
    394         () => Tuple.Create(50000, 64,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
    395         () => Tuple.Create(50000, 128, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
    396         () => Tuple.Create(50000, 128,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
    397         () => Tuple.Create(50000, 256, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
    398         () => Tuple.Create(50000, 256,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
    399         //() => new RoyalPairProblem(),
    400         //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 0, 1, 0, true),
    401         //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 0, 1, 0, false),
    402         //() => new FindPhrasesProblem(rand, 20, 5, 3, 5, 50, 1, 0.8, false),
    403       };
    404 
    405       foreach (var popSize in new int[] { 100 /*, 250, 500, 1000, 2500, 5000, 10000 */ }) {
    406         foreach (var mutationRate in new double[] { /* 0.05, /* 0.10, */ 0.15, /* 0.25, 0.3 */}) {
    407           // skip experiments that are already done
    408           foreach (var problemFactory in problemFactories) {
    409             for (int i = 0; i < nReps; i++) {
    410               {
    411                 var solverSeed = rand.Next();
    412                 var tupel = problemFactory();
    413                 var maxIters = tupel.Item1;
    414                 var maxSize = tupel.Item2;
    415                 var prob = tupel.Item3;
    416                 var sgp = new StandardGP(prob, new Random(solverSeed));
    417                 RunGP(sgp, prob, maxIters, popSize, mutationRate, maxSize);
    418               }
    419               //{
    420               //  var prob = problemFactory();
    421               //  var osgp = new OffspringSelectionGP(prob, new Random(solverSeed));
    422               //  RunGP(osgp, prob, maxIters, popSize, mutationRate, maxSize);
    423               //}
    424             }
    425           }
    426         }
    427 
    428       }
    429     }
    430 
    431     private static void RunGP(IGPSolver gp, ISymbolicExpressionTreeProblem prob, int maxIters, int popSize, double mutationRate, int maxSize) {
    432       int iterations = 0;
    433       var globalStatistics = new SentenceSetStatistics(prob.BestKnownQuality(maxSize));
    434       var gpName = gp.GetType().Name;
    435       var probName = prob.GetType().Name;
    436       gp.SolutionEvaluated += (sentence, quality) => {
    437         iterations++;
    438         globalStatistics.AddSentence(sentence, quality);
    439 
    440         if (iterations % 100 == 0) {
    441           Console.WriteLine("\"{0,25}\" {1} {2:N2} {3} \"{4,25}\" {5}", gpName, popSize, mutationRate, maxSize, probName, globalStatistics);
    442         }
    443       };
    444 
    445       gp.PopulationSize = popSize;
    446       gp.MutationRate = mutationRate;
    447       gp.MaxSolutionSize = maxSize + 2;
    448       gp.MaxSolutionDepth = maxSize + 2;
    449 
    450       gp.Run(maxIters);
    451     }
    45280  }
    45381}
Note: See TracChangeset for help on using the changeset viewer.