Free cookie consent management tool by TermsFeed Policy Generator

Ignore:
Timestamp:
02/10/15 02:05:31 (9 years ago)
Author:
gkronber
Message:

#2283: eurocast experiments

File:
1 edited

Legend:

Unmodified
Added
Removed
  • branches/HeuristicLab.Problems.GrammaticalOptimization/Main/Program.cs

    r11973 r11974  
    3030      //RunGridTest();
    3131      //RunGpGridTest();
    32       RunFunApproxTest();
     32     RunFunApproxTest();
    3333    }
    3434
     
    3636      int maxIterations = 200000; // for poly-10 with 50000 evaluations no successful try with hl yet
    3737      //var globalRandom = new Random(31415);
    38       var localRandSeed = 31415;
     38      var localRandSeed = new Random().Next();
    3939      var reps = 20;
    4040
    4141      var policyFactories = new Func<IBanditPolicy>[]
    4242        {
    43          () => new RandomPolicy(),
    44           () => new ActiveLearningPolicy(), 
    45          () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
    46          () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
    47          () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
    48          () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
    49          //() => new GaussianThompsonSamplingPolicy(),
    50          () => new GaussianThompsonSamplingPolicy(true),
    51          () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)),
    52          () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)),
    53          //() => new BernoulliThompsonSamplingPolicy(),
    54          () => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)),
    55          () => new EpsGreedyPolicy(0.01),
    56          () => new EpsGreedyPolicy(0.05),
    57          () => new EpsGreedyPolicy(0.1),
    58          () => new EpsGreedyPolicy(0.2),
    59          () => new EpsGreedyPolicy(0.5),
    60          () => new UCTPolicy(0.01),
    61          () => new UCTPolicy(0.05),
    62          () => new UCTPolicy(0.1),
    63          () => new UCTPolicy(0.5),
    64          () => new UCTPolicy(1),
    65          () => new UCTPolicy(2),
    66          () => new UCTPolicy( 5),
    67          () => new UCTPolicy( 10),
    68          () => new ModifiedUCTPolicy(0.01),
    69          () => new ModifiedUCTPolicy(0.05),
    70          () => new ModifiedUCTPolicy(0.1),
    71          () => new ModifiedUCTPolicy(0.5),
    72          () => new ModifiedUCTPolicy(1),
    73          () => new ModifiedUCTPolicy(2),
    74          () => new ModifiedUCTPolicy( 5),
    75          () => new ModifiedUCTPolicy( 10),
    76          () => new UCB1Policy(),
    77          () => new UCB1TunedPolicy(),
    78          () => new UCBNormalPolicy(),
    79          () => new BoltzmannExplorationPolicy(1),
    80          () => new BoltzmannExplorationPolicy(10),
    81          () => new BoltzmannExplorationPolicy(20),
    82          () => new BoltzmannExplorationPolicy(100),
    83          () => new BoltzmannExplorationPolicy(200),
    84          () => new BoltzmannExplorationPolicy(500),
     43         //() => new RandomPolicy(),
     44         // () => new ActiveLearningPolicy(), 
     45         //() => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
     46         //() => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
     47         //() => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
     48         //() => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
     49         ////() => new GaussianThompsonSamplingPolicy(),
     50         //() => new GaussianThompsonSamplingPolicy(true),
     51         //() => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)),
     52         //() => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)),
     53         ////() => new BernoulliThompsonSamplingPolicy(),
     54         //() => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)),
     55         //() => new EpsGreedyPolicy(0.01),
     56         //() => new EpsGreedyPolicy(0.05),
     57         //() => new EpsGreedyPolicy(0.1),
     58         //() => new EpsGreedyPolicy(0.2),
     59         //() => new EpsGreedyPolicy(0.5),
     60         //() => new UCTPolicy(0.01),
     61         //() => new UCTPolicy(0.05),
     62         //() => new UCTPolicy(0.1),
     63         //() => new UCTPolicy(0.5),
     64         //() => new UCTPolicy(1),
     65         //() => new UCTPolicy(2),
     66         //() => new UCTPolicy( 5),
     67         //() => new UCTPolicy( 10),
     68         //() => new ModifiedUCTPolicy(0.01),
     69         //() => new ModifiedUCTPolicy(0.05),
     70         //() => new ModifiedUCTPolicy(0.1),
     71         //() => new ModifiedUCTPolicy(0.5),
     72         //() => new ModifiedUCTPolicy(1),
     73         //() => new ModifiedUCTPolicy(2),
     74         //() => new ModifiedUCTPolicy( 5),
     75         //() => new ModifiedUCTPolicy( 10),
     76         //() => new UCB1Policy(),
     77         //() => new UCB1TunedPolicy(),
     78         //() => new UCBNormalPolicy(),
     79         //() => new BoltzmannExplorationPolicy(1),
     80         //() => new BoltzmannExplorationPolicy(10),
     81         //() => new BoltzmannExplorationPolicy(20),
     82         //() => new BoltzmannExplorationPolicy(100),
     83         //() => new BoltzmannExplorationPolicy(200),
     84         //() => new BoltzmannExplorationPolicy(500),
    8585         () => new ChernoffIntervalEstimationPolicy( 0.01),
    8686         () => new ChernoffIntervalEstimationPolicy( 0.05),
    8787         () => new ChernoffIntervalEstimationPolicy( 0.1),
    8888         () => new ChernoffIntervalEstimationPolicy( 0.2),
    89          () => new ThresholdAscentPolicy(5, 0.01),
    90          () => new ThresholdAscentPolicy(5, 0.05),
    91          () => new ThresholdAscentPolicy(5, 0.1),
    92          () => new ThresholdAscentPolicy(5, 0.2),
    93          () => new ThresholdAscentPolicy(10, 0.01),
    94          () => new ThresholdAscentPolicy(10, 0.05),
    95          () => new ThresholdAscentPolicy(10, 0.1),
    96          () => new ThresholdAscentPolicy(10, 0.2),
    97          () => new ThresholdAscentPolicy(50, 0.01),
    98          () => new ThresholdAscentPolicy(50, 0.05),
    99          () => new ThresholdAscentPolicy(50, 0.1),
    100          () => new ThresholdAscentPolicy(50, 0.2),
    101          () => new ThresholdAscentPolicy(100, 0.01),
    102          () => new ThresholdAscentPolicy(100, 0.05),
    103          () => new ThresholdAscentPolicy(100, 0.1),
    104          () => new ThresholdAscentPolicy(100, 0.2),
    105          () => new ThresholdAscentPolicy(500, 0.01),
    106          () => new ThresholdAscentPolicy(500, 0.05),
    107          () => new ThresholdAscentPolicy(500, 0.1),
    108          () => new ThresholdAscentPolicy(500, 0.2),
     89         //() => new ThresholdAscentPolicy(5, 0.01),
     90         //() => new ThresholdAscentPolicy(5, 0.05),
     91         //() => new ThresholdAscentPolicy(5, 0.1),
     92         //() => new ThresholdAscentPolicy(5, 0.2),
     93         //() => new ThresholdAscentPolicy(10, 0.01),
     94         //() => new ThresholdAscentPolicy(10, 0.05),
     95         //() => new ThresholdAscentPolicy(10, 0.1),
     96         //() => new ThresholdAscentPolicy(10, 0.2),
     97         //() => new ThresholdAscentPolicy(50, 0.01),
     98         //() => new ThresholdAscentPolicy(50, 0.05),
     99         //() => new ThresholdAscentPolicy(50, 0.1),
     100         //() => new ThresholdAscentPolicy(50, 0.2),
     101         //() => new ThresholdAscentPolicy(100, 0.01),
     102         //() => new ThresholdAscentPolicy(100, 0.05),
     103         //() => new ThresholdAscentPolicy(100, 0.1),
     104         //() => new ThresholdAscentPolicy(100, 0.2),
     105         //() => new ThresholdAscentPolicy(500, 0.01),
     106         //() => new ThresholdAscentPolicy(500, 0.05),
     107         //() => new ThresholdAscentPolicy(500, 0.1),
     108         //() => new ThresholdAscentPolicy(500, 0.2),
    109109         //() => new ThresholdAscentPolicy(5000, 0.01),
    110110         //() => new ThresholdAscentPolicy(10000, 0.01),
     
    128128              var localRand = new Random(localRandSeed);
    129129              var options = new ParallelOptions();
    130               options.MaxDegreeOfParallelism = 4;
     130              options.MaxDegreeOfParallelism = 1;
    131131              Parallel.For(0, reps, options, (i) => {
    132132                Random myLocalRand;
     
    314314      var problemFactories = new Func<Tuple<int, int, ISymbolicExpressionTreeProblem>>[]
    315315      {
    316         () => Tuple.Create(100000, 23,  (ISymbolicExpressionTreeProblem)new SymbolicRegressionPoly10Problem()),
    317         //() => Tuple.Create(100000, 17, (ISymbolicExpressionTreeProblem)new SantaFeAntProblem()),
     316        //() => Tuple.Create(100000, 23,  (ISymbolicExpressionTreeProblem)new SymbolicRegressionPoly10Problem()),
     317        () => Tuple.Create(100000, 17, (ISymbolicExpressionTreeProblem)new SantaFeAntProblem()),
    318318        //() => Tuple.Create(50000, 32,(ISymbolicExpressionTreeProblem)new RoyalSymbolProblem()),
    319319        //() => Tuple.Create(50000, 64, (ISymbolicExpressionTreeProblem)new RoyalPairProblem()),
Note: See TracChangeset for help on using the changeset viewer.