- Timestamp:
- 04/07/15 14:31:06 (10 years ago)
- Location:
- branches/HeuristicLab.Problems.GrammaticalOptimization-gkr
- Files:
-
- 22 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.Bandits/ActionInfos/DefaultPolicyActionInfo.cs
r11849 r12290 12 12 public int Tries { get; private set; } 13 13 public double MaxReward { get; private set; } 14 private double avgValue = 0.0; 14 15 public double Value { 15 16 get { 16 return Tries > 0 ? SumReward / Tries : 0.0;17 return Tries > 0 ? avgValue : double.PositiveInfinity; 17 18 } 18 19 } … … 25 26 SumReward += reward; 26 27 MaxReward = Math.Max(MaxReward, reward); 28 var delta = reward - avgValue; 29 //var alpha = 0.01; 30 var alpha = Math.Max(1.0/Tries, 0.01); 31 avgValue = avgValue + alpha * delta; 27 32 } 28 33 … … 31 36 Tries = 0; 32 37 MaxReward = 0.0; 38 avgValue = 0.0; 33 39 } 34 40 … … 36 42 return string.Format("{0:F3} {1:F3} {2}", Value, MaxReward, Tries); 37 43 } 38 39 public static Func<DefaultPolicyActionInfo, double> AverageReward {40 get {41 return (aInfo) =>42 aInfo.Tries == 0 ?43 double.PositiveInfinity :44 aInfo.SumReward / (double)aInfo.Tries;45 }46 }47 44 } 48 45 } -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.Bandits/ActionInfos/MeanAndVariancePolicyActionInfo.cs
r11849 r12290 26 26 estimator.Reset(); 27 27 } 28 29 public override string ToString() { 30 return string.Format("{0:N3} {1,3}", AvgReward, Tries); 31 } 28 32 } 29 33 } -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.Bandits/Policies/BoltzmannExplorationPolicy.cs
r11806 r12290 11 11 public class BoltzmannExplorationPolicy : IBanditPolicy { 12 12 private readonly double beta; 13 private readonly Func<DefaultPolicyActionInfo, double> valueFunction;14 13 15 public BoltzmannExplorationPolicy(double beta) : this(beta, DefaultPolicyActionInfo.AverageReward) { } 16 17 public BoltzmannExplorationPolicy(double beta, Func<DefaultPolicyActionInfo, double> valueFunction) { 14 public BoltzmannExplorationPolicy(double beta) { 18 15 if (beta < 0) throw new ArgumentException(); 19 16 this.beta = beta; 20 this.valueFunction = valueFunction;21 17 } 22 18 public int SelectAction(Random random, IEnumerable<IBanditPolicyActionInfo> actionInfos) { … … 37 33 38 34 var w = from aInfo in myActionInfos 39 select Math.Exp(beta * valueFunction(aInfo));35 select Math.Exp(beta * aInfo.Value); 40 36 41 37 var bestAction = Enumerable.Range(0, myActionInfos.Count()).SampleProportional(random, w); -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.Bandits/Policies/EpsGreedyPolicy.cs
r11806 r12290 11 11 private readonly double eps; 12 12 private readonly RandomPolicy randomPolicy; 13 private readonly Func<DefaultPolicyActionInfo, double> valueFunction;14 13 private readonly string desc; 15 14 16 15 17 public EpsGreedyPolicy(double eps) : this(eps, DefaultPolicyActionInfo.AverageReward,string.Empty) { }16 public EpsGreedyPolicy(double eps) : this(eps, string.Empty) { } 18 17 19 public EpsGreedyPolicy(double eps, Func<DefaultPolicyActionInfo, double> valueFunction,string desc) {18 public EpsGreedyPolicy(double eps, string desc) { 20 19 this.eps = eps; 21 20 this.randomPolicy = new RandomPolicy(); 22 this.valueFunction = valueFunction;23 21 this.desc = desc; 24 22 } … … 36 34 aIdx++; 37 35 38 var q = valueFunction(aInfo);36 var q = aInfo.Value; 39 37 40 38 if (q > bestQ) { -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.GrammaticalOptimization/HeuristicLab.Algorithms.GrammaticalOptimization.csproj
r11981 r12290 39 39 <ItemGroup> 40 40 <Compile Include="ISequentialDecisionPolicy.cs" /> 41 <Compile Include="SequentialDecisionPolicies\GenericPolicy.cs" /> 41 42 <Compile Include="SequentialDecisionPolicies\GenericFunctionApproximationGrammarPolicy.cs" /> 42 43 <Compile Include="SequentialDecisionPolicies\GenericGrammarPolicy.cs" /> -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.GrammaticalOptimization/SequentialDecisionPolicies/GenericGrammarPolicy.cs
r11806 r12290 103 103 // the canonical states for the value function (banditInfos) and the done set must be distinguished 104 104 // sequences of different length could have the same canonical representation and can have the same value (banditInfo) 105 // however, if the canonical representation of a state is shorter th an we must not mark the canonical state as done when all possible derivations from the initial state have been explored105 // however, if the canonical representation of a state is shorter then we must not mark the canonical state as done when all possible derivations from the initial state have been explored 106 106 // eg. in the ant problem the canonical representation for ...lllA is ...rA 107 107 // even though all possible derivations (of limited length) of lllA have been visited we must not mark the state rA as done -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.GrammaticalOptimization/Solvers/SequentialSearch.cs
r11977 r12290 43 43 private readonly int randomTries; 44 44 private readonly IGrammarPolicy behaviourPolicy; 45 private readonly IGrammarPolicy greedyPolicy;46 45 private TreeNode rootNode; 47 46 … … 58 57 this.randomTries = randomTries; 59 58 this.behaviourPolicy = behaviourPolicy; 60 this.greedyPolicy = new GenericGrammarPolicy(problem, new EpsGreedyPolicy(0.0), false);61 59 this.stateChain = new List<string>(); 62 60 } … … 171 169 private void DistributeReward(double reward) { 172 170 behaviourPolicy.UpdateReward(stateChain, reward); 173 //greedyPolicy.UpdateReward(stateChain, reward);174 171 } 175 172 … … 178 175 StopRequested = false; 179 176 behaviourPolicy.Reset(); 180 greedyPolicy.Reset();181 177 maxSearchDepth = 0; 182 178 bestQuality = 0.0; … … 208 204 var maxValue = values.Max(); 209 205 if (maxValue == 0) maxValue = 1.0; 206 if (double.IsPositiveInfinity(maxValue)) maxValue = double.MaxValue; 210 207 211 208 // write phrases -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Algorithms.MonteCarloTreeSearch
-
Property
svn:ignore
set to
bin
obj
*.user
-
Property
svn:ignore
set to
-
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Common/ExpressionExtender.cs
r12024 r12290 140 140 private Expr ExpandFactors(IEnumerable<Factor> factors) { 141 141 // if (invFactors.Count > 0) throw new NotImplementedException(); 142 Debug.Assert(!factors.First().IsInverse); // the first factor is never an inverted factor142 //Debug.Assert(!factors.First().IsInverse); // the first factor is never an inverted factor 143 143 144 144 // each factor could be a list of terms (expression) -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Common/OnlineMeanAndVarianceEstimator.cs
r11849 r12290 19 19 20 20 var delta = reward - Avg; 21 Avg += delta / N; 21 //Avg += delta / N; 22 // double alpha = 0.01; 23 double alpha = 1.0 / N; 24 Avg = Avg + alpha * (delta); 22 25 sampleM2 += delta * (reward - Avg); 23 26 } -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization.SymbReg/SymbolicRegressionProblem.cs
r12099 r12290 3 3 using System.Collections.Generic; 4 4 using System.Diagnostics; 5 using System.Globalization; 6 using System.IO; 5 7 using System.Linq; 6 8 using System.Security; … … 69 71 private readonly bool useConstantOpt; 70 72 public string Name { get; private set; } 71 72 public SymbolicRegressionProblem(Random random, string partOfName, bool useConstantOpt = true) { 73 private Random random; 74 private double lambda; 75 76 77 // lambda should be tuned using CV 78 public SymbolicRegressionProblem(Random random, string partOfName, double lambda = 1.0, bool useConstantOpt = true) { 73 79 var instanceProviders = new RegressionInstanceProvider[] 74 80 {new RegressionRealWorldInstanceProvider(), … … 80 86 }; 81 87 var instanceProvider = instanceProviders.FirstOrDefault(prov => prov.GetDataDescriptors().Any(dd => dd.Name.Contains(partOfName))); 82 if (instanceProvider == null) throw new ArgumentException("instance not found"); 88 IRegressionProblemData problemData = null; 89 if (instanceProvider != null) { 90 var dds = instanceProvider.GetDataDescriptors(); 91 problemData = instanceProvider.LoadData(dds.Single(ds => ds.Name.Contains(partOfName))); 92 93 } else if (File.Exists(partOfName)) { 94 // check if it is a file 95 var prov = new RegressionCSVInstanceProvider(); 96 problemData = prov.ImportData(partOfName); 97 problemData.TrainingPartition.Start = 0; 98 problemData.TrainingPartition.End = problemData.Dataset.Rows; 99 // no test partition 100 problemData.TestPartition.Start = problemData.Dataset.Rows; 101 problemData.TestPartition.End = problemData.Dataset.Rows; 102 } else { 103 throw new ArgumentException("instance not found"); 104 } 83 105 84 106 this.useConstantOpt = useConstantOpt; 85 107 86 var dds = instanceProvider.GetDataDescriptors(); 87 var problemData = instanceProvider.LoadData(dds.Single(ds => ds.Name.Contains(partOfName))); 88 this.Name = problemData.Name; 108 this.Name = problemData.Name + string.Format("lambda={0:N2}", lambda); 89 109 90 110 this.N = problemData.TrainingIndices.Count(); … … 94 114 this.y = problemData.Dataset.GetDoubleValues(problemData.TargetVariable, problemData.TrainingIndices).ToArray(); 95 115 116 var varEst = new OnlineMeanAndVarianceCalculator(); 117 118 var means = new double[d]; 119 var stdDevs = new double[d]; 120 96 121 int i = 0; 122 foreach (var inputVariable in problemData.AllowedInputVariables) { 123 varEst.Reset(); 124 problemData.Dataset.GetDoubleValues(inputVariable).ToList().ForEach(varEst.Add); 125 if (varEst.VarianceErrorState != OnlineCalculatorError.None) throw new ArgumentException(); 126 means[i] = varEst.Mean; 127 stdDevs[i] = Math.Sqrt(varEst.Variance); 128 i++; 129 } 130 131 i = 0; 97 132 foreach (var r in problemData.TrainingIndices) { 98 133 int j = 0; 99 134 foreach (var inputVariable in problemData.AllowedInputVariables) { 100 x[i, j++] = problemData.Dataset.GetDoubleValue(inputVariable, r); 135 x[i, j] = (problemData.Dataset.GetDoubleValue(inputVariable, r) - means[j]) / stdDevs[j]; 136 j++; 101 137 } 102 138 i++; 103 139 } 140 141 this.random = random; 142 this.lambda = lambda; 143 104 144 // initialize ERC values 105 145 erc = Enumerable.Range(0, 10).Select(_ => Rand.RandNormal(random) * 10).ToArray(); … … 132 172 return OptimizeConstantsAndEvaluate(sentence); 133 173 else { 134 135 174 Debug.Assert(SimpleEvaluate(sentence) == SimpleEvaluate(extender.CanonicalRepresentation(sentence))); 136 175 return SimpleEvaluate(sentence); … … 154 193 155 194 public IEnumerable<Feature> GetFeatures(string phrase) { 156 //throw new NotImplementedException();157 phrase = CanonicalRepresentation(phrase);158 return phrase.Split('+').Distinct().Select(t => new Feature(t, 1.0));195 throw new NotImplementedException(); 196 //phrase = CanonicalRepresentation(phrase); 197 //return phrase.Split('+').Distinct().Select(t => new Feature(t, 1.0)); 159 198 // return new Feature[] { new Feature(phrase, 1.0) }; 160 199 } … … 176 215 if (!constants.Any()) return SimpleEvaluate(sentence); 177 216 178 AutoDiff.IParametricCompiledTerm compiledFunc = func.Compile(constants, variables); // variate constants leave variables fixed to data 179 180 double[] c = constants.Select(_ => 1.0).ToArray(); // start with ones 181 217 // L2 regularization 218 // not possible with lsfit, would need to change to minlm below 219 // func = TermBuilder.Sum(func, lambda * TermBuilder.Sum(constants.Select(con => con * con))); 220 221 AutoDiff.IParametricCompiledTerm compiledFunc = func.Compile(constants, variables); // variate constants, leave variables fixed to data 222 223 // 10 restarts with random starting points 224 double[] bestStart = null; 225 double bestError = double.PositiveInfinity; 226 int info; 182 227 alglib.lsfitstate state; 183 228 alglib.lsfitreport rep; 184 int info;185 186 187 int k = c.Length;188 189 229 alglib.ndimensional_pfunc function_cx_1_func = CreatePFunc(compiledFunc); 190 230 alglib.ndimensional_pgrad function_cx_1_grad = CreatePGrad(compiledFunc); 191 192 const int maxIterations = 10; 193 try { 194 alglib.lsfitcreatefg(x, y, c, n, m, k, false, out state); 195 alglib.lsfitsetcond(state, 0.0, 0.0, maxIterations); 196 //alglib.lsfitsetgradientcheck(state, 0.001); 197 alglib.lsfitfit(state, function_cx_1_func, function_cx_1_grad, null, null); 198 alglib.lsfitresults(state, out info, out c, out rep); 199 } catch (ArithmeticException) { 200 return 0.0; 201 } catch (alglib.alglibexception) { 202 return 0.0; 203 } 204 205 //info == -7 => constant optimization failed due to wrong gradient 206 if (info == -7) throw new ArgumentException(); 231 for (int t = 0; t < 10; t++) { 232 double[] cStart = constants.Select(_ => Rand.RandNormal(random) * 10).ToArray(); 233 double[] cEnd; 234 // start with normally distributed (N(0, 10)) weights 235 236 237 int k = cStart.Length; 238 239 240 const int maxIterations = 10; 241 try { 242 alglib.lsfitcreatefg(x, y, cStart, n, m, k, false, out state); 243 alglib.lsfitsetcond(state, 0.0, 0.0, maxIterations); 244 //alglib.lsfitsetgradientcheck(state, 0.001); 245 alglib.lsfitfit(state, function_cx_1_func, function_cx_1_grad, null, null); 246 alglib.lsfitresults(state, out info, out cEnd, out rep); 247 if (info != -7 && rep.rmserror < bestError) { 248 bestStart = cStart; 249 bestError = rep.rmserror; 250 } 251 } catch (ArithmeticException) { 252 return 0.0; 253 } catch (alglib.alglibexception) { 254 return 0.0; 255 } 256 } 257 258 // 100 iteration steps from the best starting point 207 259 { 208 var rowData = new double[d]; 209 return HeuristicLab.Common.Extensions.RSq(y, Enumerable.Range(0, N).Select(i => { 210 for (int j = 0; j < d; j++) rowData[j] = x[i, j]; 211 return compiledFunc.Evaluate(c, rowData); 212 })); 260 double[] c = bestStart; 261 262 int k = c.Length; 263 264 const int maxIterations = 100; 265 try { 266 alglib.lsfitcreatefg(x, y, c, n, m, k, false, out state); 267 alglib.lsfitsetcond(state, 0.0, 0.0, maxIterations); 268 //alglib.lsfitsetgradientcheck(state, 0.001); 269 alglib.lsfitfit(state, function_cx_1_func, function_cx_1_grad, null, null); 270 alglib.lsfitresults(state, out info, out c, out rep); 271 } catch (ArithmeticException) { 272 return 0.0; 273 } catch (alglib.alglibexception) { 274 return 0.0; 275 } 276 //info == -7 => constant optimization failed due to wrong gradient 277 if (info == -7) throw new ArgumentException(); 278 { 279 var rowData = new double[d]; 280 return HeuristicLab.Common.Extensions.RSq(y, Enumerable.Range(0, N).Select(i => { 281 for (int j = 0; j < d; j++) rowData[j] = x[i, j]; 282 return compiledFunc.Evaluate(c, rowData); 283 })); 284 } 213 285 } 214 286 } -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/HeuristicLab.Problems.GrammaticalOptimization.csproj
r11981 r12290 50 50 </ItemGroup> 51 51 <ItemGroup> 52 <Compile Include="PartialExpressionInterpreter.cs" /> 52 53 <Compile Include="ExpressionInterpreter.cs" /> 53 54 <Compile Include="Feature.cs" /> … … 55 56 <Compile Include="Interfaces\IProblem.cs" /> 56 57 <Compile Include="Interfaces\ISymbolicExpressionTreeProblem.cs" /> 58 <Compile Include="Problems\PrimePolynomialProblem.cs" /> 57 59 <Compile Include="Problems\PermutationProblem.cs" /> 58 60 <Compile Include="Problems\EvenParityProblem.cs" /> -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/EvenParityProblem.cs
r12099 r12290 68 68 } 69 69 70 public IEnumerable<Feature> GetFeatures(string phrase) { 71 throw new NotImplementedException(); 70 public IEnumerable<Feature> GetFeatures(string phrase) 71 { 72 return new[] {new Feature(phrase, 1.0)}; 72 73 } 73 74 -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/FindPhrasesProblem.cs
r12099 r12290 159 159 } 160 160 161 public IEnumerable<Feature> GetFeatures(string phrase) { 162 throw new NotImplementedException(); 161 public IEnumerable<Feature> GetFeatures(string phrase) 162 { 163 return new Feature[] {new Feature(phrase, 1.0),}; 163 164 } 164 165 -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/RoyalPairProblem.cs
r12099 r12290 10 10 // counts the number of times a pair of symbols occurs in a sentence 11 11 public class RoyalPairProblem : ISymbolicExpressionTreeProblem { 12 private const string grammarString = @"13 G(S):14 S -> a | aS | b | bS15 ";16 17 private const string hlGrammarString = @"18 G(S):19 S -> a | b | SS20 ";21 12 22 13 private readonly IGrammar grammar; 14 private readonly int numTerminals; 23 15 public string Name { get { return "RoyalPair"; } } 24 25 public RoyalPairProblem() { 26 this.grammar = new Grammar(grammarString); 27 this.TreeBasedGPGrammar = new Grammar(hlGrammarString); 28 // TODO: allow configuration of the number of symbols 16 17 public RoyalPairProblem(int numTerminals = 2) { 18 this.numTerminals = numTerminals; 19 20 var sentenceSymbol = 'S'; 21 var terminalSymbols = Enumerable.Range(0, numTerminals).Select(off => (char)((byte)'a' + off)).ToArray(); 22 var nonTerminalSymbols = new char[] { sentenceSymbol }; 23 24 { 25 // create grammar 26 // S -> a..z | aS .. zS 27 var rules = terminalSymbols.Select(t => Tuple.Create(sentenceSymbol, t.ToString())) 28 .Concat(terminalSymbols.Select(t => Tuple.Create(sentenceSymbol, t + sentenceSymbol.ToString()))); 29 30 this.grammar = new Grammar(sentenceSymbol, terminalSymbols, nonTerminalSymbols, rules); 31 } 32 { 33 // create grammar for tree-based GP 34 // S -> a..z | SS 35 var rules = terminalSymbols.Select(t => Tuple.Create(sentenceSymbol, t.ToString())) 36 .Concat(new Tuple<char, string>[] { Tuple.Create(sentenceSymbol, sentenceSymbol.ToString() + sentenceSymbol) }); 37 38 this.TreeBasedGPGrammar = new Grammar(sentenceSymbol, terminalSymbols, nonTerminalSymbols, rules); 39 } 40 41 29 42 } 30 43 … … 49 62 50 63 public IEnumerable<Feature> GetFeatures(string phrase) { 51 throw new NotImplementedException(); 64 if (phrase.Length <= 1) 65 yield return new Feature("$$", 1.0); 66 else if (phrase.Length == 2) 67 yield return new Feature(phrase, 1.0); 68 else if (phrase.EndsWith("S")) // second to last symbol 69 yield return new Feature(phrase.Substring(phrase.Length - 3, 2), 1.0); 70 else // last symbol 71 yield return new Feature(phrase.Substring(phrase.Length - 2, 2), 1.0); 72 52 73 } 74 53 75 public IGrammar TreeBasedGPGrammar { get; private set; } 54 76 public string ConvertTreeToSentence(ISymbolicExpressionTree tree) { -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/RoyalPhraseSequenceProblem.cs
r12099 r12290 3 3 using System.Diagnostics; 4 4 using System.Linq; 5 using System.Runtime.InteropServices; 5 6 using System.Text; 6 7 using System.Text.RegularExpressions; … … 148 149 } 149 150 150 public IEnumerable<Feature> GetFeatures(string phrase) { 151 throw new NotImplementedException(); 151 public IEnumerable<Feature> GetFeatures(string phrase) 152 { 153 return new Feature[] {new Feature(phrase, 1.0)}; 152 154 } 153 155 -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/SantaFeAntProblem.cs
r12099 r12290 138 138 139 139 public IEnumerable<Feature> GetFeatures(string phrase) { 140 phrase = CanonicalRepresentation(phrase);141 var isTerminal = grammar.IsTerminal(phrase);142 143 yield return new Feature(isTerminal + ToString(), 1.0);144 145 yield return new Feature("$" + (phrase.Length > 0 ? phrase[0] : ' '), 1.0);146 if (!isTerminal) {147 for (int i = 4; i < phrase.Length; i++) {148 if (!grammar.IsTerminal(phrase[i])) {149 yield return new Feature(phrase[i - 4].ToString() + phrase[i - 3].ToString() + phrase[i - 2] + phrase[i - 1], 1.0);150 break;151 }152 }153 }154 155 140 yield return new Feature(phrase, 1.0); 141 //var ant = new Ant(false); 142 //int p = 0; 143 //Run(ant, phrase.Replace('A', '.'), ref p, true); 144 //yield return new Feature(ant.PosX + "x" + ant.PosY + "-" + ant.Heading, 1.0); 156 145 } 157 146 -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/HeuristicLab.Problems.GrammaticalOptimization/Problems/SymbolicRegressionPoly10Problem.cs
r12099 r12290 19 19 // V -> a .. j 20 20 // "; 21 //private const string grammarString = @" 22 //G(E): 23 //E -> a | b | c | d | e | f | g | h | i | j | a+E | b+E | c+E | d+E | e+E | f+E | g+E | h+E | i+E | j+E | a*E | b*E | c*E | d*E | e*E | f*E | g*E | h*E | i*E | j*E 24 //"; 21 25 private const string grammarString = @" 22 26 G(E): 23 E -> a | b | c | d | e | f | g | h | i | j | a+E | b+E | c+E | d+E | e+E | f+E | g+E | h+E | i+E | j+E | a*E | b*E | c*E | d*E | e*E | f*E | g*E | h*E | i*E | j*E 27 E -> a | b | c | d | e | f | g | h | i | j | a+E | b+E | c+E | d+E | e+E | f+E | g+E | h+E | i+E | j+E | a*E | b*E | c*E | d*E | e*E | f*E | g*E | h*E | i*E | j*E 24 28 "; 25 29 … … 148 152 } 149 153 154 private double[] varIds = new double[] { }; 155 150 156 // splits the phrase into terms and creates (sparse) term-occurrance features 151 157 public IEnumerable<Feature> GetFeatures(string phrase) { 152 var canonicalTerms = new HashSet<string>(); 153 foreach (string t in phrase.Split('+')) { 154 canonicalTerms.Add(CanonicalTerm(t)); 155 } 156 return canonicalTerms.Select(entry => new Feature(entry, 1.0)) 157 .Concat(new Feature[] { new Feature(CanonicalRepresentation(phrase), 1.0) }); 158 // var canonicalTerms = new HashSet<string>(); 159 // foreach (string t in phrase.Split('+')) { 160 // canonicalTerms.Add(CanonicalTerm(t)); 161 // } 162 // return canonicalTerms.Select(entry => new Feature(entry, 1.0)) 163 // .Concat(new Feature[] { new Feature(CanonicalRepresentation(phrase), 1.0) }); 164 165 var partialInterpreter = new PartialExpressionInterpreter(); 166 var vars = new double[] { 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, }; 167 var s = partialInterpreter.Interpret(phrase, vars); 168 //if (s.Any()) 169 // return new Feature[] { new Feature(s.Pop().ToString(), 1.0), }; 170 //else 171 // return new Feature[] { new Feature("$", 1.0), }; 172 return new Feature[] { new Feature(string.Join(",", s), 1.0) }; 158 173 } 159 174 -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/Main/Main.csproj
r12098 r12290 56 56 <Name>HeuristicLab.Algorithms.MonteCarloTreeSearch</Name> 57 57 </ProjectReference> 58 <ProjectReference Include="..\HeuristicLab.Problems.GrammaticalOptimization.SymbReg\HeuristicLab.Problems.GrammaticalOptimization.SymbReg.csproj"> 59 <Project>{17A7A380-86CE-482D-8D22-CBD70CC97F0D}</Project> 60 <Name>HeuristicLab.Problems.GrammaticalOptimization.SymbReg</Name> 61 </ProjectReference> 58 62 <ProjectReference Include="..\HeuristicLab.Problems.GrammaticalOptimization\HeuristicLab.Problems.GrammaticalOptimization.csproj"> 59 63 <Project>{cb9dccf6-667e-4a13-b82d-dbd6b45a045e}</Project> -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/Main/Program.cs
r12098 r12290 3 3 using System.Globalization; 4 4 using HeuristicLab.Algorithms.Bandits.BanditPolicies; 5 using HeuristicLab.Algorithms.Bandits.GrammarPolicies; 5 6 using HeuristicLab.Algorithms.GrammaticalOptimization; 6 7 using HeuristicLab.Algorithms.MonteCarloTreeSearch; … … 18 19 // TODO: reward discounting (fÃŒr verÀnderliche reward distributions ÃŒber zeit). speziellen unit-test dafÃŒr erstellen 19 20 // TODO: constant optimization 21 using HeuristicLab.Problems.GrammaticalOptimization.SymbReg; 20 22 21 23 22 namespace Main 23 { 24 class Program 25 { 26 static void Main(string[] args) 27 { 28 CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture; 24 namespace Main { 25 class Program { 26 static void Main(string[] args) { 27 CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture; 29 28 30 31 29 RunDemo(); 30 } 32 31 33 32 34 private static void RunDemo() 35 { 33 private static void RunDemo() { 34 35 for (int i = 0; i < 100; i++) { 36 int maxIterations = 2000000; 37 int iterations = 0; 38 39 var globalStatistics = new SentenceSetStatistics(); 40 var random = new Random(); 41 42 var problem = new SymbolicRegressionPoly10Problem(); 43 //var problem = new SantaFeAntProblem(); 44 //var problem = new RoyalPairProblem(25); 45 //var problem = new FindPhrasesProblem(random, 10, 5, 3, 5, 5, 1.0, 0.9, true); 46 //var problem = new PrimePolynomialProblem(); 47 //var problem = new SymbolicRegressionProblem(random, 48 // //@"C:\reps\HeuristicLab\branches\HeuristicLab.Problems.GrammaticalOptimization\HeuristicLab.Problems.GrammaticalOptimization.SymbReg\nht-train.csv", 49 // @"C:\reps\fhooe-new\research\Datasets\Benchmark\kommenda-1.csv", 50 // 1.0, 51 // true); 52 // //var problem = new PrimePolynomialProblem(); 53 // var alg = new SequentialSearch(problem, 25, random, 0, 54 // new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericGrammarPolicy(problem, new UCB1TunedPolicy())); 55 var alg = new SequentialSearch(problem, 25, random, 0, 56 new GenericPolicy(problem, new HeuristicLab.Algorithms.Bandits.BanditPolicies.EpsGreedyPolicy(0.1))); 57 //var alg = new MonteCarloTreeSearch(problem, 23, random, new UCB1Policy(), new RandomSimulation(problem, random, 30)); 36 58 37 59 38 int maxIterations = 100000; 39 int iterations = 0; 60 alg.FoundNewBestSolution += (sentence, quality) => { 61 //Console.WriteLine("{0}", globalStatistics); 62 }; 40 63 41 var globalStatistics = new SentenceSetStatistics(); 42 var random = new Random(); 64 alg.SolutionEvaluated += (sentence, quality) => { 65 iterations++; 66 globalStatistics.AddSentence(sentence, quality); 43 67 44 //var problem = new SymbolicRegressionPoly10Problem(); 45 //var problem = new SantaFeAntProblem(); 46 var problem = new RoyalPairProblem(); 47 //var problem = new EvenParityProblem(); 48 //var alg = new SequentialSearch(problem, 23, random, 0, 49 // new HeuristicLab.Algorithms.Bandits.GrammarPolicies.GenericGrammarPolicy(problem, new UCB1TunedPolicy())); 50 var alg = new MonteCarloTreeSearch(problem, 23, random, new UCB1Policy(), new RandomSimulation(problem, random, 23)); 68 // comment this if you don't want to see solver statistics 69 if (iterations % 100 == 0) { 70 if (iterations % 1000 == 0) Console.Clear(); 71 Console.SetCursorPosition(0, 0); 72 alg.PrintStats(); 73 } 51 74 75 // uncomment this if you want to collect statistics of the generated sentences 76 //if (iterations % 100 == 0) { 77 // Console.WriteLine("{0}", globalStatistics); 78 //} 79 }; 52 80 53 alg.FoundNewBestSolution += (sentence, quality) =>54 {55 //Console.WriteLine("{0}", globalStatistics);56 };81 var sw = new Stopwatch(); 82 sw.Start(); 83 alg.Run(maxIterations); 84 sw.Stop(); 57 85 58 alg.SolutionEvaluated += (sentence, quality) => 59 { 60 iterations++; 61 globalStatistics.AddSentence(sentence, quality); 86 Console.WriteLine(globalStatistics); 62 87 63 // comment this if you don't want to see solver statistics 64 if (iterations % 100 == 0) 65 { 66 if (iterations % 10000 == 0) Console.Clear(); 67 Console.SetCursorPosition(0, 0); 68 alg.PrintStats(); 69 } 70 71 // uncomment this if you want to collect statistics of the generated sentences 72 // if (iterations % 1000 == 0) { 73 // Console.WriteLine("{0}", globalStatistics); 74 // } 75 }; 76 77 var sw = new Stopwatch(); 78 sw.Start(); 79 alg.Run(maxIterations); 80 sw.Stop(); 81 82 Console.Clear(); 83 alg.PrintStats(); 84 Console.WriteLine(globalStatistics); 85 Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol", 86 sw.Elapsed.TotalSeconds, 87 maxIterations / (double)sw.Elapsed.TotalSeconds, 88 (double)sw.ElapsedMilliseconds * 1000 / maxIterations); 89 } 88 Console.WriteLine("{0:F2} sec {1,10:F1} sols/sec {2,10:F1} ns/sol", 89 sw.Elapsed.TotalSeconds, 90 maxIterations / (double)sw.Elapsed.TotalSeconds, 91 (double)sw.ElapsedMilliseconds * 1000 / maxIterations); 92 } 90 93 } 94 } 91 95 } -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/Test/RunDemo.cs
r12014 r12290 26 26 () => new RandomPolicy(), 27 27 () => new ActiveLearningPolicy(), 28 () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),29 () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),30 () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),31 () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),28 // () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"), 29 // () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"), 30 // () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"), 31 // () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"), 32 32 //() => new GaussianThompsonSamplingPolicy(), 33 33 () => new GaussianThompsonSamplingPolicy(true), -
branches/HeuristicLab.Problems.GrammaticalOptimization-gkr/Test/TestTunedSettings.cs
r12099 r12290 42 42 () => new RandomPolicy(), 43 43 () => new ActiveLearningPolicy(), 44 () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),45 () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),46 () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),47 () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),44 // () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"), 45 // () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"), 46 // () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"), 47 // () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"), 48 48 //() => new GaussianThompsonSamplingPolicy(), 49 49 () => new GaussianThompsonSamplingPolicy(true), … … 152 152 () => new RandomPolicy(), 153 153 () => new ActiveLearningPolicy(), 154 () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"),155 () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"),156 () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"),157 () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"),154 // () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"), 155 // () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"), 156 // () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"), 157 // () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"), 158 158 //() => new GaussianThompsonSamplingPolicy(), 159 159 () => new GaussianThompsonSamplingPolicy(true), … … 255 255 var instanceFactories = new Func<int, ISymbolicExpressionTreeProblem>[] 256 256 { 257 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Nguyen F7", true),258 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Keijzer 6", true),259 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Vladislavleva-4", true),260 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Spatial", true),261 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Friedman - II", true),262 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Tower", true),257 //(randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Nguyen F7", true), very easy?! 258 //(randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Keijzer 6", true), very easy?! 259 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Vladislavleva-4", 1.0, true), // kommenda - const opt 260 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Spatial", 1.0, true), 261 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Friedman - II", 1.0, true), 262 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Tower", 1.0, true), 263 263 }; 264 264 … … 348 348 var instanceFactories = new Func<int, ISymbolicExpressionTreeProblem>[] 349 349 { 350 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Poly-10", true ),350 (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Poly-10", 1.0, true ), 351 351 }; 352 352
Note: See TracChangeset
for help on using the changeset viewer.