Free cookie consent management tool by TermsFeed Policy Generator

Changeset 12290


Ignore:
Timestamp:
04/07/15 14:31:06 (10 years ago)
Author:
gkronber
Message:

#2283 created a new branch to separate development from aballeit

Location:
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr
Files:
22 edited
1 copied

Legend:

Unmodified
Added
Removed
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.Bandits/ActionInfos/DefaultPolicyActionInfo.cs

    r11849 r12290  
    1212    public int Tries { get; private set; }
    1313    public double MaxReward { get; private set; }
     14    private double avgValue = 0.0;
    1415    public double Value {
    1516      get {
    16         return Tries > 0 ? SumReward / Tries : 0.0;
     17        return Tries > 0 ? avgValue : double.PositiveInfinity;
    1718      }
    1819    }
     
    2526      SumReward += reward;
    2627      MaxReward = Math.Max(MaxReward, reward);
     28      var delta = reward - avgValue;
     29      //var alpha = 0.01;
     30      var alpha = Math.Max(1.0/Tries, 0.01);
     31      avgValue = avgValue + alpha * delta;
    2732    }
    2833
     
    3136      Tries = 0;
    3237      MaxReward = 0.0;
     38      avgValue = 0.0;
    3339    }
    3440
     
    3642      return string.Format("{0:F3} {1:F3} {2}", Value, MaxReward, Tries);
    3743    }
    38 
    39     public static Func<DefaultPolicyActionInfo, double> AverageReward {
    40       get {
    41         return (aInfo) =>
    42           aInfo.Tries == 0 ?
    43           double.PositiveInfinity :
    44           aInfo.SumReward / (double)aInfo.Tries;
    45       }
    46     }
    4744  }
    4845}
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.Bandits/ActionInfos/MeanAndVariancePolicyActionInfo.cs

    r11849 r12290  
    2626      estimator.Reset();
    2727    }
     28
     29    public override string ToString() {
     30      return string.Format("{0:N3} {1,3}", AvgReward, Tries);
     31    }
    2832  }
    2933}
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.Bandits/Policies/BoltzmannExplorationPolicy.cs

    r11806 r12290  
    1111  public class BoltzmannExplorationPolicy : IBanditPolicy {
    1212    private readonly double beta;
    13     private readonly Func<DefaultPolicyActionInfo, double> valueFunction;
    1413
    15     public BoltzmannExplorationPolicy(double beta) : this(beta, DefaultPolicyActionInfo.AverageReward) { }
    16 
    17     public BoltzmannExplorationPolicy(double beta, Func<DefaultPolicyActionInfo, double> valueFunction) {
     14    public BoltzmannExplorationPolicy(double beta)  {
    1815      if (beta < 0) throw new ArgumentException();
    1916      this.beta = beta;
    20       this.valueFunction = valueFunction;
    2117    }
    2218    public int SelectAction(Random random, IEnumerable<IBanditPolicyActionInfo> actionInfos) {
     
    3733
    3834      var w = from aInfo in myActionInfos
    39               select Math.Exp(beta * valueFunction(aInfo));
     35              select Math.Exp(beta * aInfo.Value);
    4036
    4137      var bestAction = Enumerable.Range(0, myActionInfos.Count()).SampleProportional(random, w);
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.Bandits/Policies/EpsGreedyPolicy.cs

    r11806 r12290  
    1111    private readonly double eps;
    1212    private readonly RandomPolicy randomPolicy;
    13     private readonly Func<DefaultPolicyActionInfo, double> valueFunction;
    1413    private readonly string desc;
    1514
    1615
    17     public EpsGreedyPolicy(double eps) : this(eps, DefaultPolicyActionInfo.AverageReward, string.Empty) { }
     16    public EpsGreedyPolicy(double eps) : this(eps, string.Empty) { }
    1817
    19     public EpsGreedyPolicy(double eps, Func<DefaultPolicyActionInfo, double> valueFunction, string desc) {
     18    public EpsGreedyPolicy(double eps, string desc) {
    2019      this.eps = eps;
    2120      this.randomPolicy = new RandomPolicy();
    22       this.valueFunction = valueFunction;
    2321      this.desc = desc;
    2422    }
     
    3634          aIdx++;
    3735
    38           var q = valueFunction(aInfo);
     36          var q = aInfo.Value;
    3937
    4038          if (q > bestQ) {
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.GrammaticalOptimization/HeuristicLab.Algorithms.GrammaticalOptimization.csproj

    r11981 r12290  
    3939  <ItemGroup>
    4040    <Compile Include="ISequentialDecisionPolicy.cs" />
     41    <Compile Include="SequentialDecisionPolicies\GenericPolicy.cs" />
    4142    <Compile Include="SequentialDecisionPolicies\GenericFunctionApproximationGrammarPolicy.cs" />
    4243    <Compile Include="SequentialDecisionPolicies\GenericGrammarPolicy.cs" />
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.GrammaticalOptimization/SequentialDecisionPolicies/GenericGrammarPolicy.cs

    r11806 r12290  
    103103    // the canonical states for the value function (banditInfos) and the done set must be distinguished
    104104    // sequences of different length could have the same canonical representation and can have the same value (banditInfo)
    105     // however, if the canonical representation of a state is shorter than we must not mark the canonical state as done when all possible derivations from the initial state have been explored
     105    // however, if the canonical representation of a state is shorter then we must not mark the canonical state as done when all possible derivations from the initial state have been explored
    106106    // eg. in the ant problem the canonical representation for ...lllA is ...rA
    107107    // even though all possible derivations (of limited length) of lllA have been visited we must not mark the state rA as done
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.GrammaticalOptimization/Solvers/SequentialSearch.cs

    r11977 r12290  
    4343    private readonly int randomTries;
    4444    private readonly IGrammarPolicy behaviourPolicy;
    45     private readonly IGrammarPolicy greedyPolicy;
    4645    private TreeNode rootNode;
    4746
     
    5857      this.randomTries = randomTries;
    5958      this.behaviourPolicy = behaviourPolicy;
    60       this.greedyPolicy = new GenericGrammarPolicy(problem, new EpsGreedyPolicy(0.0), false);
    6159      this.stateChain = new List<string>();
    6260    }
     
    171169    private void DistributeReward(double reward) {
    172170      behaviourPolicy.UpdateReward(stateChain, reward);
    173       //greedyPolicy.UpdateReward(stateChain, reward);
    174171    }
    175172
     
    178175      StopRequested = false;
    179176      behaviourPolicy.Reset();
    180       greedyPolicy.Reset();
    181177      maxSearchDepth = 0;
    182178      bestQuality = 0.0;
     
    208204        var maxValue = values.Max();
    209205        if (maxValue == 0) maxValue = 1.0;
     206        if (double.IsPositiveInfinity(maxValue)) maxValue = double.MaxValue;
    210207
    211208        // write phrases
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.MonteCarloTreeSearch

    • Property svn:ignore set to
      bin
      obj
      *.user
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Common/ExpressionExtender.cs

    r12024 r12290  
    140140    private Expr ExpandFactors(IEnumerable<Factor> factors) {
    141141      // if (invFactors.Count > 0) throw new NotImplementedException();
    142       Debug.Assert(!factors.First().IsInverse); // the first factor is never an inverted factor
     142      //Debug.Assert(!factors.First().IsInverse); // the first factor is never an inverted factor
    143143
    144144      // each factor could be a list of terms (expression)
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Common/OnlineMeanAndVarianceEstimator.cs

    r11849 r12290  
    1919
    2020      var delta = reward - Avg;
    21       Avg += delta / N;
     21      //Avg += delta / N;
     22      // double alpha = 0.01;
     23      double alpha = 1.0 / N;
     24      Avg = Avg + alpha * (delta);
    2225      sampleM2 += delta * (reward - Avg);
    2326    }
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization.SymbReg/SymbolicRegressionProblem.cs

    r12099 r12290  
    33using System.Collections.Generic;
    44using System.Diagnostics;
     5using System.Globalization;
     6using System.IO;
    57using System.Linq;
    68using System.Security;
     
    6971    private readonly bool useConstantOpt;
    7072    public string Name { get; private set; }
    71 
    72     public SymbolicRegressionProblem(Random random, string partOfName, bool useConstantOpt = true) {
     73    private Random random;
     74    private double lambda;
     75
     76
     77    // lambda should be tuned using CV
     78    public SymbolicRegressionProblem(Random random, string partOfName, double lambda = 1.0, bool useConstantOpt = true) {
    7379      var instanceProviders = new RegressionInstanceProvider[]
    7480      {new RegressionRealWorldInstanceProvider(),
     
    8086    };
    8187      var instanceProvider = instanceProviders.FirstOrDefault(prov => prov.GetDataDescriptors().Any(dd => dd.Name.Contains(partOfName)));
    82       if (instanceProvider == null) throw new ArgumentException("instance not found");
     88      IRegressionProblemData problemData = null;
     89      if (instanceProvider != null) {
     90        var dds = instanceProvider.GetDataDescriptors();
     91        problemData = instanceProvider.LoadData(dds.Single(ds => ds.Name.Contains(partOfName)));
     92
     93      } else if (File.Exists(partOfName)) {
     94        // check if it is a file
     95        var prov = new RegressionCSVInstanceProvider();
     96        problemData = prov.ImportData(partOfName);
     97        problemData.TrainingPartition.Start = 0;
     98        problemData.TrainingPartition.End = problemData.Dataset.Rows;
     99        // no test partition
     100        problemData.TestPartition.Start = problemData.Dataset.Rows;
     101        problemData.TestPartition.End = problemData.Dataset.Rows;
     102      } else {
     103        throw new ArgumentException("instance not found");
     104      }
    83105
    84106      this.useConstantOpt = useConstantOpt;
    85107
    86       var dds = instanceProvider.GetDataDescriptors();
    87       var problemData = instanceProvider.LoadData(dds.Single(ds => ds.Name.Contains(partOfName)));
    88       this.Name = problemData.Name;
     108      this.Name = problemData.Name + string.Format("lambda={0:N2}", lambda);
    89109
    90110      this.N = problemData.TrainingIndices.Count();
     
    94114      this.y = problemData.Dataset.GetDoubleValues(problemData.TargetVariable, problemData.TrainingIndices).ToArray();
    95115
     116      var varEst = new OnlineMeanAndVarianceCalculator();
     117
     118      var means = new double[d];
     119      var stdDevs = new double[d];
     120
    96121      int i = 0;
     122      foreach (var inputVariable in problemData.AllowedInputVariables) {
     123        varEst.Reset();
     124        problemData.Dataset.GetDoubleValues(inputVariable).ToList().ForEach(varEst.Add);
     125        if (varEst.VarianceErrorState != OnlineCalculatorError.None) throw new ArgumentException();
     126        means[i] = varEst.Mean;
     127        stdDevs[i] = Math.Sqrt(varEst.Variance);
     128        i++;
     129      }
     130
     131      i = 0;
    97132      foreach (var r in problemData.TrainingIndices) {
    98133        int j = 0;
    99134        foreach (var inputVariable in problemData.AllowedInputVariables) {
    100           x[i, j++] = problemData.Dataset.GetDoubleValue(inputVariable, r);
     135          x[i, j] = (problemData.Dataset.GetDoubleValue(inputVariable, r) - means[j]) / stdDevs[j];
     136          j++;
    101137        }
    102138        i++;
    103139      }
     140
     141      this.random = random;
     142      this.lambda = lambda;
     143
    104144      // initialize ERC values
    105145      erc = Enumerable.Range(0, 10).Select(_ => Rand.RandNormal(random) * 10).ToArray();
     
    132172        return OptimizeConstantsAndEvaluate(sentence);
    133173      else {
    134 
    135174        Debug.Assert(SimpleEvaluate(sentence) == SimpleEvaluate(extender.CanonicalRepresentation(sentence)));
    136175        return SimpleEvaluate(sentence);
     
    154193
    155194    public IEnumerable<Feature> GetFeatures(string phrase) {
    156       // throw new NotImplementedException();
    157       phrase = CanonicalRepresentation(phrase);
    158       return phrase.Split('+').Distinct().Select(t => new Feature(t, 1.0));
     195      throw new NotImplementedException();
     196      //phrase = CanonicalRepresentation(phrase);
     197      //return phrase.Split('+').Distinct().Select(t => new Feature(t, 1.0));
    159198      // return new Feature[] { new Feature(phrase, 1.0) };
    160199    }
     
    176215      if (!constants.Any()) return SimpleEvaluate(sentence);
    177216
    178       AutoDiff.IParametricCompiledTerm compiledFunc = func.Compile(constants, variables); // variate constants leave variables fixed to data
    179 
    180       double[] c = constants.Select(_ => 1.0).ToArray(); // start with ones
    181 
     217      // L2 regularization
     218      // not possible with lsfit, would need to change to minlm below
     219      // func = TermBuilder.Sum(func, lambda * TermBuilder.Sum(constants.Select(con => con * con)));
     220
     221      AutoDiff.IParametricCompiledTerm compiledFunc = func.Compile(constants, variables); // variate constants, leave variables fixed to data
     222
     223      // 10 restarts with random starting points
     224      double[] bestStart = null;
     225      double bestError = double.PositiveInfinity;
     226      int info;
    182227      alglib.lsfitstate state;
    183228      alglib.lsfitreport rep;
    184       int info;
    185 
    186 
    187       int k = c.Length;
    188 
    189229      alglib.ndimensional_pfunc function_cx_1_func = CreatePFunc(compiledFunc);
    190230      alglib.ndimensional_pgrad function_cx_1_grad = CreatePGrad(compiledFunc);
    191 
    192       const int maxIterations = 10;
    193       try {
    194         alglib.lsfitcreatefg(x, y, c, n, m, k, false, out state);
    195         alglib.lsfitsetcond(state, 0.0, 0.0, maxIterations);
    196         //alglib.lsfitsetgradientcheck(state, 0.001);
    197         alglib.lsfitfit(state, function_cx_1_func, function_cx_1_grad, null, null);
    198         alglib.lsfitresults(state, out info, out c, out rep);
    199       } catch (ArithmeticException) {
    200         return 0.0;
    201       } catch (alglib.alglibexception) {
    202         return 0.0;
    203       }
    204 
    205       //info == -7  => constant optimization failed due to wrong gradient
    206       if (info == -7) throw new ArgumentException();
     231      for (int t = 0; t < 10; t++) {
     232        double[] cStart = constants.Select(_ => Rand.RandNormal(random) * 10).ToArray();
     233        double[] cEnd;
     234        // start with normally distributed (N(0, 10)) weights
     235
     236
     237        int k = cStart.Length;
     238
     239
     240        const int maxIterations = 10;
     241        try {
     242          alglib.lsfitcreatefg(x, y, cStart, n, m, k, false, out state);
     243          alglib.lsfitsetcond(state, 0.0, 0.0, maxIterations);
     244          //alglib.lsfitsetgradientcheck(state, 0.001);
     245          alglib.lsfitfit(state, function_cx_1_func, function_cx_1_grad, null, null);
     246          alglib.lsfitresults(state, out info, out cEnd, out rep);
     247          if (info != -7 && rep.rmserror < bestError) {
     248            bestStart = cStart;
     249            bestError = rep.rmserror;
     250          }
     251        } catch (ArithmeticException) {
     252          return 0.0;
     253        } catch (alglib.alglibexception) {
     254          return 0.0;
     255        }
     256      }
     257
     258      // 100 iteration steps from the best starting point
    207259      {
    208         var rowData = new double[d];
    209         return HeuristicLab.Common.Extensions.RSq(y, Enumerable.Range(0, N).Select(i => {
    210           for (int j = 0; j < d; j++) rowData[j] = x[i, j];
    211           return compiledFunc.Evaluate(c, rowData);
    212         }));
     260        double[] c = bestStart;
     261
     262        int k = c.Length;
     263
     264        const int maxIterations = 100;
     265        try {
     266          alglib.lsfitcreatefg(x, y, c, n, m, k, false, out state);
     267          alglib.lsfitsetcond(state, 0.0, 0.0, maxIterations);
     268          //alglib.lsfitsetgradientcheck(state, 0.001);
     269          alglib.lsfitfit(state, function_cx_1_func, function_cx_1_grad, null, null);
     270          alglib.lsfitresults(state, out info, out c, out rep);
     271        } catch (ArithmeticException) {
     272          return 0.0;
     273        } catch (alglib.alglibexception) {
     274          return 0.0;
     275        }
     276        //info == -7  => constant optimization failed due to wrong gradient
     277        if (info == -7) throw new ArgumentException();
     278        {
     279          var rowData = new double[d];
     280          return HeuristicLab.Common.Extensions.RSq(y, Enumerable.Range(0, N).Select(i => {
     281            for (int j = 0; j < d; j++) rowData[j] = x[i, j];
     282            return compiledFunc.Evaluate(c, rowData);
     283          }));
     284        }
    213285      }
    214286    }
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/HeuristicLab.Problems.GrammaticalOptimization.csproj

    r11981 r12290  
    5050  </ItemGroup>
    5151  <ItemGroup>
     52    <Compile Include="PartialExpressionInterpreter.cs" />
    5253    <Compile Include="ExpressionInterpreter.cs" />
    5354    <Compile Include="Feature.cs" />
     
    5556    <Compile Include="Interfaces\IProblem.cs" />
    5657    <Compile Include="Interfaces\ISymbolicExpressionTreeProblem.cs" />
     58    <Compile Include="Problems\PrimePolynomialProblem.cs" />
    5759    <Compile Include="Problems\PermutationProblem.cs" />
    5860    <Compile Include="Problems\EvenParityProblem.cs" />
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/EvenParityProblem.cs

    r12099 r12290  
    6868    }
    6969
    70     public IEnumerable<Feature> GetFeatures(string phrase) {
    71       throw new NotImplementedException();
     70    public IEnumerable<Feature> GetFeatures(string phrase)
     71    {
     72      return new[] {new Feature(phrase, 1.0)};
    7273    }
    7374
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/FindPhrasesProblem.cs

    r12099 r12290  
    159159    }
    160160
    161     public IEnumerable<Feature> GetFeatures(string phrase) {
    162       throw new NotImplementedException();
     161    public IEnumerable<Feature> GetFeatures(string phrase)
     162    {
     163      return new Feature[] {new Feature(phrase, 1.0),};
    163164    }
    164165
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/RoyalPairProblem.cs

    r12099 r12290  
    1010  // counts the number of times a pair of symbols occurs in a sentence
    1111  public class RoyalPairProblem : ISymbolicExpressionTreeProblem {
    12     private const string grammarString = @"
    13 G(S):
    14 S -> a | aS | b | bS
    15 ";
    16 
    17     private const string hlGrammarString = @"
    18 G(S):
    19 S -> a | b | SS
    20 ";
    2112
    2213    private readonly IGrammar grammar;
     14    private readonly int numTerminals;
    2315    public string Name { get { return "RoyalPair"; } }
    24    
    25     public RoyalPairProblem() {
    26       this.grammar = new Grammar(grammarString);
    27       this.TreeBasedGPGrammar = new Grammar(hlGrammarString);
    28       // TODO: allow configuration of the number of symbols
     16
     17    public RoyalPairProblem(int numTerminals = 2) {
     18      this.numTerminals = numTerminals;
     19
     20      var sentenceSymbol = 'S';
     21      var terminalSymbols = Enumerable.Range(0, numTerminals).Select(off => (char)((byte)'a' + off)).ToArray();
     22      var nonTerminalSymbols = new char[] { sentenceSymbol };
     23
     24      {
     25        // create grammar
     26        // S -> a..z | aS .. zS
     27        var rules = terminalSymbols.Select(t => Tuple.Create(sentenceSymbol, t.ToString()))
     28          .Concat(terminalSymbols.Select(t => Tuple.Create(sentenceSymbol, t + sentenceSymbol.ToString())));
     29
     30        this.grammar = new Grammar(sentenceSymbol, terminalSymbols, nonTerminalSymbols, rules);
     31      }
     32      {
     33        // create grammar for tree-based GP
     34        // S -> a..z | SS
     35        var rules = terminalSymbols.Select(t => Tuple.Create(sentenceSymbol, t.ToString()))
     36          .Concat(new Tuple<char, string>[] { Tuple.Create(sentenceSymbol, sentenceSymbol.ToString() + sentenceSymbol) });
     37
     38        this.TreeBasedGPGrammar = new Grammar(sentenceSymbol, terminalSymbols, nonTerminalSymbols, rules);
     39      }
     40
     41
    2942    }
    3043
     
    4962
    5063    public IEnumerable<Feature> GetFeatures(string phrase) {
    51       throw new NotImplementedException();
     64      if (phrase.Length <= 1)
     65        yield return new Feature("$$", 1.0);
     66      else if (phrase.Length == 2)
     67        yield return new Feature(phrase, 1.0);
     68      else if (phrase.EndsWith("S")) // second to last symbol
     69        yield return new Feature(phrase.Substring(phrase.Length - 3, 2), 1.0);
     70      else // last symbol
     71        yield return new Feature(phrase.Substring(phrase.Length - 2, 2), 1.0);
     72
    5273    }
     74
    5375    public IGrammar TreeBasedGPGrammar { get; private set; }
    5476    public string ConvertTreeToSentence(ISymbolicExpressionTree tree) {
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/RoyalPhraseSequenceProblem.cs

    r12099 r12290  
    33using System.Diagnostics;
    44using System.Linq;
     5using System.Runtime.InteropServices;
    56using System.Text;
    67using System.Text.RegularExpressions;
     
    148149    }
    149150
    150     public IEnumerable<Feature> GetFeatures(string phrase) {
    151       throw new NotImplementedException();
     151    public IEnumerable<Feature> GetFeatures(string phrase)
     152    {
     153      return new Feature[] {new Feature(phrase, 1.0)};
    152154    }
    153155
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/SantaFeAntProblem.cs

    r12099 r12290  
    138138
    139139    public IEnumerable<Feature> GetFeatures(string phrase) {
    140       phrase = CanonicalRepresentation(phrase);
    141       var isTerminal = grammar.IsTerminal(phrase);
    142 
    143       yield return new Feature(isTerminal + ToString(), 1.0);
    144      
    145       yield return new Feature("$" + (phrase.Length > 0 ? phrase[0] : ' '), 1.0);
    146       if (!isTerminal) {
    147         for (int i = 4; i < phrase.Length; i++) {
    148           if (!grammar.IsTerminal(phrase[i])) {
    149             yield return new Feature(phrase[i - 4].ToString() + phrase[i - 3].ToString() + phrase[i - 2] + phrase[i - 1], 1.0);
    150             break;
    151           }
    152         }
    153       }
    154    
    155140      yield return new Feature(phrase, 1.0);
     141      //var ant = new Ant(false);
     142      //int p = 0;
     143      //Run(ant, phrase.Replace('A', '.'), ref p, true);
     144      //yield return new Feature(ant.PosX + "x" + ant.PosY + "-" + ant.Heading, 1.0);
    156145    }
    157146
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/SymbolicRegressionPoly10Problem.cs

    r12099 r12290  
    1919    //    V -> a .. j
    2020    //    ";
     21    //private const string grammarString = @"
     22    //G(E):
     23    //E -> a | b | c | d | e | f | g | h | i | j | a+E | b+E | c+E | d+E | e+E | f+E | g+E | h+E | i+E | j+E | a*E | b*E | c*E | d*E | e*E | f*E | g*E | h*E | i*E | j*E
     24    //";
    2125    private const string grammarString = @"
    2226    G(E):
    23     E -> a | b | c | d | e | f | g | h | i | j | a+E | b+E | c+E | d+E | e+E | f+E | g+E | h+E | i+E | j+E | a*E | b*E | c*E | d*E | e*E | f*E | g*E | h*E | i*E | j*E
     27    E -> a | b | c | d | e | f | g | h | i | j | a+E | b+E | c+E | d+E | e+E | f+E | g+E | h+E | i+E | j+E | a*E | b*E | c*E | d*E | e*E | f*E | g*E | h*E | i*E | j*E  
    2428    ";
    2529
     
    148152    }
    149153
     154    private double[] varIds = new double[] { };
     155
    150156    // splits the phrase into terms and creates (sparse) term-occurrance features
    151157    public IEnumerable<Feature> GetFeatures(string phrase) {
    152       var canonicalTerms = new HashSet<string>();
    153       foreach (string t in phrase.Split('+')) {
    154         canonicalTerms.Add(CanonicalTerm(t));
    155       }
    156       return canonicalTerms.Select(entry => new Feature(entry, 1.0))
    157         .Concat(new Feature[] { new Feature(CanonicalRepresentation(phrase), 1.0) });
     158      // var canonicalTerms = new HashSet<string>();
     159      // foreach (string t in phrase.Split('+')) {
     160      //   canonicalTerms.Add(CanonicalTerm(t));
     161      // }
     162      // return canonicalTerms.Select(entry => new Feature(entry, 1.0))
     163      //   .Concat(new Feature[] { new Feature(CanonicalRepresentation(phrase), 1.0) });
     164
     165      var partialInterpreter = new PartialExpressionInterpreter();
     166      var vars = new double[] { 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, };
     167      var s = partialInterpreter.Interpret(phrase, vars);
     168      //if (s.Any())
     169      //  return new Feature[] { new Feature(s.Pop().ToString(), 1.0), };
     170      //else
     171      //  return new Feature[] { new Feature("$", 1.0), };
     172      return new Feature[] { new Feature(string.Join(",", s), 1.0) };
    158173    }
    159174
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/Main/Main.csproj

    r12098 r12290  
    5656      <Name>HeuristicLab.Algorithms.MonteCarloTreeSearch</Name>
    5757    </ProjectReference>
     58    <ProjectReference Include="..\HeuristicLab.Problems.GrammaticalOptimization.SymbReg\HeuristicLab.Problems.GrammaticalOptimization.SymbReg.csproj">
     59      <Project>{17A7A380-86CE-482D-8D22-CBD70CC97F0D}</Project>
     60      <Name>HeuristicLab.Problems.GrammaticalOptimization.SymbReg</Name>
     61    </ProjectReference>
    5862    <ProjectReference Include="..\HeuristicLab.Problems.GrammaticalOptimization\HeuristicLab.Problems.GrammaticalOptimization.csproj">
    5963      <Project>{cb9dccf6-667e-4a13-b82d-dbd6b45a045e}</Project>
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/Main/Program.cs

    r12098 r12290  
    33using System.Globalization;
    44using HeuristicLab.Algorithms.Bandits.BanditPolicies;
     5using HeuristicLab.Algorithms.Bandits.GrammarPolicies;
    56using HeuristicLab.Algorithms.GrammaticalOptimization;
    67using HeuristicLab.Algorithms.MonteCarloTreeSearch;
     
    1819// TODO: reward discounting (fÌr verÀnderliche reward distributions Ìber zeit). speziellen unit-test dafÌr erstellen
    1920// TODO: constant optimization
     21using HeuristicLab.Problems.GrammaticalOptimization.SymbReg;
    2022
    2123
    22 namespace Main
    23 {
    24     class Program
    25     {
    26         static void Main(string[] args)
    27         {
    28             CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;
     24namespace Main {
     25  class Program {
     26    static void Main(string[] args) {
     27      CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture;
    2928
    30             RunDemo();
    31         }
     29      RunDemo();
     30    }
    3231
    3332
    34         private static void RunDemo()
    35         {
     33    private static void RunDemo() {
     34
     35      for (int i = 0; i < 100; i++) {
     36        int maxIterations = 2000000;
     37        int iterations = 0;
     38
     39        var globalStatistics = new SentenceSetStatistics();
     40        var random = new Random();
     41
     42        var problem = new SymbolicRegressionPoly10Problem();
     43        //var problem = new SantaFeAntProblem();             
     44        //var problem = new RoyalPairProblem(25);
     45        //var problem = new FindPhrasesProblem(random, 10, 5, 3, 5, 5, 1.0, 0.9, true);
     46        //var problem = new PrimePolynomialProblem();
     47        //var problem = new SymbolicRegressionProblem(random,
     48        //  //@"C:\reps\HeuristicLab\branches\HeuristicLab.Problems.GrammaticalOptimization\HeuristicLab.Problems.GrammaticalOptimization.SymbReg\nht-train.csv",
     49        //  @"C:\reps\fhooe-new\research\Datasets\Benchmark\kommenda-1.csv",
     50        //  1.0,
     51        //  true);
     52        // //var problem = new PrimePolynomialProblem();
     53        // var alg = new SequentialSearch(problem, 25, random, 0,
     54        //   new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericGrammarPolicy(problem, new UCB1TunedPolicy()));
     55        var alg = new SequentialSearch(problem, 25, random, 0,
     56          new GenericPolicy(problem, new HeuristicLab.Algorithms.Bandits.BanditPolicies.EpsGreedyPolicy(0.1)));
     57        //var alg = new MonteCarloTreeSearch(problem, 23, random, new UCB1Policy(), new RandomSimulation(problem, random, 30));
    3658
    3759
    38             int maxIterations = 100000;
    39             int iterations = 0;
     60        alg.FoundNewBestSolution += (sentence, quality) => {
     61          //Console.WriteLine("{0}", globalStatistics);
     62        };
    4063
    41             var globalStatistics = new SentenceSetStatistics();
    42             var random = new Random();
     64        alg.SolutionEvaluated += (sentence, quality) => {
     65          iterations++;
     66          globalStatistics.AddSentence(sentence, quality);
    4367
    44             //var problem = new SymbolicRegressionPoly10Problem();
    45             //var problem = new SantaFeAntProblem();             
    46             var problem = new RoyalPairProblem();
    47             //var problem = new EvenParityProblem();
    48             //var alg = new SequentialSearch(problem, 23, random, 0,
    49             // new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericGrammarPolicy(problem, new UCB1TunedPolicy()));
    50             var alg = new MonteCarloTreeSearch(problem, 23, random, new UCB1Policy(), new RandomSimulation(problem, random, 23));
     68          // comment this if you don't want to see solver statistics
     69          if (iterations % 100 == 0) {
     70            if (iterations % 1000 == 0) Console.Clear();
     71            Console.SetCursorPosition(0, 0);
     72            alg.PrintStats();
     73          }
    5174
     75          // uncomment this if you want to collect statistics of the generated sentences
     76          //if (iterations % 100 == 0) {
     77          //  Console.WriteLine("{0}", globalStatistics);
     78          //}
     79        };
    5280
    53             alg.FoundNewBestSolution += (sentence, quality) =>
    54             {
    55                 //Console.WriteLine("{0}", globalStatistics);
    56             };
     81        var sw = new Stopwatch();
     82        sw.Start();
     83        alg.Run(maxIterations);
     84        sw.Stop();
    5785
    58             alg.SolutionEvaluated += (sentence, quality) =>
    59             {
    60                 iterations++;
    61                 globalStatistics.AddSentence(sentence, quality);
     86        Console.WriteLine(globalStatistics);
    6287
    63                 // comment this if you don't want to see solver statistics
    64                 if (iterations % 100 == 0)
    65                 {
    66                     if (iterations % 10000 == 0) Console.Clear();
    67                     Console.SetCursorPosition(0, 0);
    68                     alg.PrintStats();
    69                 }
    70 
    71                 // uncomment this if you want to collect statistics of the generated sentences
    72                 // if (iterations % 1000 == 0) {
    73                 //   Console.WriteLine("{0}", globalStatistics);
    74                 // }
    75             };
    76 
    77             var sw = new Stopwatch();
    78             sw.Start();
    79             alg.Run(maxIterations);
    80             sw.Stop();
    81 
    82             Console.Clear();
    83             alg.PrintStats();
    84             Console.WriteLine(globalStatistics);
    85             Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol",
    86               sw.Elapsed.TotalSeconds,
    87               maxIterations / (double)sw.Elapsed.TotalSeconds,
    88               (double)sw.ElapsedMilliseconds * 1000 / maxIterations);
    89         }
     88        Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol",
     89          sw.Elapsed.TotalSeconds,
     90          maxIterations / (double)sw.Elapsed.TotalSeconds,
     91          (double)sw.ElapsedMilliseconds * 1000 / maxIterations);
     92      }
    9093    }
     94  }
    9195}
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/Test/RunDemo.cs

    r12014 r12290  
    2626         () => new RandomPolicy(),
    2727          () => new ActiveLearningPolicy(), 
    28          () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
    29          () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
    30          () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
    31          () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
     28         // () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
     29         // () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
     30         // () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
     31         // () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
    3232         //() => new GaussianThompsonSamplingPolicy(),
    3333         () => new GaussianThompsonSamplingPolicy(true),
  • branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/Test/TestTunedSettings.cs

    r12099 r12290  
    4242         () => new RandomPolicy(),
    4343         () => new ActiveLearningPolicy(), 
    44          () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
    45          () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
    46          () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
    47          () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
     44         // () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
     45         // () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
     46         // () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
     47         // () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
    4848         //() => new GaussianThompsonSamplingPolicy(),
    4949         () => new GaussianThompsonSamplingPolicy(true),
     
    152152         () => new RandomPolicy(),
    153153         () => new ActiveLearningPolicy(), 
    154          () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
    155          () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
    156          () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
    157          () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
     154         // () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),
     155         // () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),
     156         // () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),
     157         // () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),
    158158         //() => new GaussianThompsonSamplingPolicy(),
    159159         () => new GaussianThompsonSamplingPolicy(true),
     
    255255      var instanceFactories = new Func<int, ISymbolicExpressionTreeProblem>[]
    256256      {
    257         (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Nguyen F7", true),
    258         (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Keijzer 6", true),
    259         (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Vladislavleva-4", true),
    260         (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Spatial", true),
    261         (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Friedman - II", true),
    262         (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Tower", true),
     257        //(randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Nguyen F7", true),   very easy?!
     258        //(randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Keijzer 6", true),  very easy?!
     259        (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Vladislavleva-4", 1.0, true),  // kommenda - const opt
     260        (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Spatial", 1.0, true),
     261        (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Friedman - II", 1.0, true),
     262        (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Tower", 1.0, true),
    263263      };
    264264
     
    348348      var instanceFactories = new Func<int, ISymbolicExpressionTreeProblem>[]
    349349      {
    350         (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Poly-10", true ),
     350        (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Poly-10", 1.0, true ),
    351351      };
    352352
Note: See TracChangeset for help on using the changeset viewer.