using System; using System.Collections.Generic; using System.Globalization; using System.Linq; using System.Text; using System.Threading.Tasks; using HeuristicLab.Algorithms.Bandits; using HeuristicLab.Algorithms.Bandits.BanditPolicies; using HeuristicLab.Algorithms.Bandits.GrammarPolicies; using HeuristicLab.Algorithms.Bandits.Models; using HeuristicLab.Algorithms.GrammaticalOptimization; using HeuristicLab.Common; using HeuristicLab.Problems.GrammaticalOptimization.SymbReg; using Microsoft.VisualStudio.TestTools.UnitTesting; using RandomPolicy = HeuristicLab.Algorithms.Bandits.BanditPolicies.RandomPolicy; namespace HeuristicLab.Problems.GrammaticalOptimization.Test { [TestClass] public class TestTunedSettings { private const int randSeed = 31415; internal class Configuration { public IProblem Problem; public int MaxSize; public int RandSeed; public override string ToString() { return string.Format("{0} {1} {2}", RandSeed, Problem, MaxSize); } } [TestMethod] [Timeout(1000 * 60 * 60 * 12)] // 12 hours public void TestAllPoliciesArtificialAnt() { CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture; var instanceFactories = new Func[] { (randSeed) => (ISymbolicExpressionTreeProblem)new SantaFeAntProblem(), }; var policyFactories = new Func[] { () => new RandomPolicy(), () => new ActiveLearningPolicy(), () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"), () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"), () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"), () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"), //() => new GaussianThompsonSamplingPolicy(), () => new GaussianThompsonSamplingPolicy(true), () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)), () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)), //() => new BernoulliThompsonSamplingPolicy(), () => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)), () => new EpsGreedyPolicy(0.01), () => new EpsGreedyPolicy(0.05), () => new EpsGreedyPolicy(0.1), () => new EpsGreedyPolicy(0.2), () => new EpsGreedyPolicy(0.5), () => new UCTPolicy(0.01), () => new UCTPolicy(0.05), () => new UCTPolicy(0.1), () => new UCTPolicy(0.5), () => new UCTPolicy(1), () => new UCTPolicy(2), () => new UCTPolicy( 5), () => new UCTPolicy( 10), () => new ModifiedUCTPolicy(0.01), () => new ModifiedUCTPolicy(0.05), () => new ModifiedUCTPolicy(0.1), () => new ModifiedUCTPolicy(0.5), () => new ModifiedUCTPolicy(1), () => new ModifiedUCTPolicy(2), () => new ModifiedUCTPolicy( 5), () => new ModifiedUCTPolicy( 10), () => new UCB1Policy(), () => new UCB1TunedPolicy(), () => new UCBNormalPolicy(), () => new BoltzmannExplorationPolicy(1), () => new BoltzmannExplorationPolicy(10), () => new BoltzmannExplorationPolicy(20), () => new BoltzmannExplorationPolicy(100), () => new BoltzmannExplorationPolicy(200), () => new BoltzmannExplorationPolicy(500), () => new ChernoffIntervalEstimationPolicy( 0.01), () => new ChernoffIntervalEstimationPolicy( 0.05), () => new ChernoffIntervalEstimationPolicy( 0.1), () => new ChernoffIntervalEstimationPolicy( 0.2), () => new ThresholdAscentPolicy(5, 0.01), () => new ThresholdAscentPolicy(5, 0.05), () => new ThresholdAscentPolicy(5, 0.1), () => new ThresholdAscentPolicy(5, 0.2), () => new ThresholdAscentPolicy(10, 0.01), () => new ThresholdAscentPolicy(10, 0.05), () => new ThresholdAscentPolicy(10, 0.1), () => new ThresholdAscentPolicy(10, 0.2), () => new ThresholdAscentPolicy(50, 0.01), () => new ThresholdAscentPolicy(50, 0.05), () => new ThresholdAscentPolicy(50, 0.1), () => new ThresholdAscentPolicy(50, 0.2), () => new ThresholdAscentPolicy(100, 0.01), () => new ThresholdAscentPolicy(100, 0.05), () => new ThresholdAscentPolicy(100, 0.1), () => new ThresholdAscentPolicy(100, 0.2), () => new ThresholdAscentPolicy(500, 0.01), () => new ThresholdAscentPolicy(500, 0.05), () => new ThresholdAscentPolicy(500, 0.1), () => new ThresholdAscentPolicy(500, 0.2), () => new ThresholdAscentPolicy(5000, 0.01), () => new ThresholdAscentPolicy(10000, 0.01), }; var maxSizes = new int[] { 17 }; // necessary size for ant programm int nReps = 20; int maxIterations = 100000; foreach (var instanceFactory in instanceFactories) { var sumBestQ = 0.0; var sumItersToBest = 0; double fractionSolved = 0.0; foreach (var conf in GenerateConfigurations(instanceFactory, nReps, maxSizes)) { foreach (var policy in policyFactories) { var prob = conf.Problem; var maxLen = conf.MaxSize; var rand = new Random(conf.RandSeed); var solver = new SequentialSearch(prob, maxLen, rand, 0, new GenericGrammarPolicy(prob, policy(), true)); var problemName = prob.GetType().Name; var policyName = policy().ToString(); double bestQ; int itersToBest; RunSolver(solver, problemName, policyName, 1.0, maxIterations, maxLen, out bestQ, out itersToBest); sumBestQ += bestQ; sumItersToBest += itersToBest; if (bestQ.IsAlmost(1.0)) fractionSolved += 1.0 / nReps; } } // Assert.AreEqual(0.85, fractionSolved, 1E-6); // Assert.AreEqual(0.99438202247191, sumBestQ / nReps, 1E-6); // Assert.AreEqual(5461.7, sumItersToBest / (double)nReps, 1E-6); } } [TestMethod] [Timeout(1000 * 60 * 60 * 30)] // 30 hours public void TestAllPoliciesPoly10() { CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture; var instanceFactories = new Func[] { (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionPoly10Problem(), }; var policyFactories = new Func[] { () => new RandomPolicy(), () => new ActiveLearningPolicy(), () => new EpsGreedyPolicy(0.01, (aInfo)=> aInfo.MaxReward, "max"), () => new EpsGreedyPolicy(0.05, (aInfo)=> aInfo.MaxReward, "max"), () => new EpsGreedyPolicy(0.1, (aInfo)=> aInfo.MaxReward, "max"), () => new EpsGreedyPolicy(0.2, (aInfo)=> aInfo.MaxReward, "max"), //() => new GaussianThompsonSamplingPolicy(), () => new GaussianThompsonSamplingPolicy(true), () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1)), () => new GenericThompsonSamplingPolicy(new GaussianModel(0.5, 10, 1, 1)), //() => new BernoulliThompsonSamplingPolicy(), () => new GenericThompsonSamplingPolicy(new BernoulliModel(1, 1)), () => new EpsGreedyPolicy(0.01), () => new EpsGreedyPolicy(0.05), () => new EpsGreedyPolicy(0.1), () => new EpsGreedyPolicy(0.2), () => new EpsGreedyPolicy(0.5), () => new UCTPolicy(0.01), () => new UCTPolicy(0.05), () => new UCTPolicy(0.1), () => new UCTPolicy(0.5), () => new UCTPolicy(1), () => new UCTPolicy(2), () => new UCTPolicy( 5), () => new UCTPolicy( 10), () => new ModifiedUCTPolicy(0.01), () => new ModifiedUCTPolicy(0.05), () => new ModifiedUCTPolicy(0.1), () => new ModifiedUCTPolicy(0.5), () => new ModifiedUCTPolicy(1), () => new ModifiedUCTPolicy(2), () => new ModifiedUCTPolicy( 5), () => new ModifiedUCTPolicy( 10), () => new UCB1Policy(), () => new UCB1TunedPolicy(), () => new UCBNormalPolicy(), () => new BoltzmannExplorationPolicy(1), () => new BoltzmannExplorationPolicy(10), () => new BoltzmannExplorationPolicy(20), () => new BoltzmannExplorationPolicy(100), () => new BoltzmannExplorationPolicy(200), () => new BoltzmannExplorationPolicy(500), () => new ChernoffIntervalEstimationPolicy( 0.01), () => new ChernoffIntervalEstimationPolicy( 0.05), () => new ChernoffIntervalEstimationPolicy( 0.1), () => new ChernoffIntervalEstimationPolicy( 0.2), () => new ThresholdAscentPolicy(5, 0.01), () => new ThresholdAscentPolicy(5, 0.05), () => new ThresholdAscentPolicy(5, 0.1), () => new ThresholdAscentPolicy(5, 0.2), () => new ThresholdAscentPolicy(10, 0.01), () => new ThresholdAscentPolicy(10, 0.05), () => new ThresholdAscentPolicy(10, 0.1), () => new ThresholdAscentPolicy(10, 0.2), () => new ThresholdAscentPolicy(50, 0.01), () => new ThresholdAscentPolicy(50, 0.05), () => new ThresholdAscentPolicy(50, 0.1), () => new ThresholdAscentPolicy(50, 0.2), () => new ThresholdAscentPolicy(100, 0.01), () => new ThresholdAscentPolicy(100, 0.05), () => new ThresholdAscentPolicy(100, 0.1), () => new ThresholdAscentPolicy(100, 0.2), () => new ThresholdAscentPolicy(500, 0.01), () => new ThresholdAscentPolicy(500, 0.05), () => new ThresholdAscentPolicy(500, 0.1), () => new ThresholdAscentPolicy(500, 0.2), () => new ThresholdAscentPolicy(5000, 0.01), () => new ThresholdAscentPolicy(10000, 0.01), }; var maxSizes = new int[] { 23 }; // necessary size symb reg poly 10 int nReps = 20; int maxIterations = 100000; foreach (var instanceFactory in instanceFactories) { var sumBestQ = 0.0; var sumItersToBest = 0; double fractionSolved = 0.0; foreach (var conf in GenerateConfigurations(instanceFactory, nReps, maxSizes)) { foreach (var policy in policyFactories) { var prob = conf.Problem; var maxLen = conf.MaxSize; var rand = new Random(conf.RandSeed); var solver = new SequentialSearch(prob, maxLen, rand, 0, new GenericGrammarPolicy(prob, policy(), true)); var problemName = prob.GetType().Name; var policyName = policy().ToString(); double bestQ; int itersToBest; RunSolver(solver, problemName, policyName, 1.0, maxIterations, maxLen, out bestQ, out itersToBest); sumBestQ += bestQ; sumItersToBest += itersToBest; if (bestQ.IsAlmost(1.0)) fractionSolved += 1.0 / nReps; } } // Assert.AreEqual(0.85, fractionSolved, 1E-6); // Assert.AreEqual(0.99438202247191, sumBestQ / nReps, 1E-6); // Assert.AreEqual(5461.7, sumItersToBest / (double)nReps, 1E-6); } } [TestMethod] [Timeout(1000 * 60 * 60 * 30)] // 30 hours public void TestAllSymbolicRegression() { CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture; var instanceFactories = new Func[] { (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Nguyen F7", true), (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Keijzer 6", true), (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Vladislavleva-4", true), (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Spatial", true), (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Friedman - II", true), (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Tower", true), }; var policyFactories = new Func[] { () => new UCTPolicy(0.05), () => new UCTPolicy(0.1), () => new ModifiedUCTPolicy(0.01), () => new ModifiedUCTPolicy(0.05), () => new UCB1Policy(), () => new UCB1TunedPolicy(), }; var maxSizes = new int[] { 20 }; // default limit for all problems int nReps = 20; int maxIterations = 10000; foreach (var instanceFactory in instanceFactories) { var sumBestQ = 0.0; var sumItersToBest = 0; double fractionSolved = 0.0; foreach (var conf in GenerateConfigurations(instanceFactory, nReps, maxSizes)) { foreach (var policy in policyFactories) { var prob = conf.Problem; var maxLen = conf.MaxSize; var rand = new Random(conf.RandSeed); var solver = new SequentialSearch(prob, maxLen, rand, 0, new GenericGrammarPolicy(prob, policy(), true)); var problemName = prob.Name; var policyName = policy().ToString(); double bestQ; int itersToBest; RunSolver(solver, problemName, policyName, 1.0, maxIterations, maxLen, out bestQ, out itersToBest); sumBestQ += bestQ; sumItersToBest += itersToBest; if (bestQ.IsAlmost(1.0)) fractionSolved += 1.0 / nReps; } } // Assert.AreEqual(0.85, fractionSolved, 1E-6); // Assert.AreEqual(0.99438202247191, sumBestQ / nReps, 1E-6); // Assert.AreEqual(5461.7, sumItersToBest / (double)nReps, 1E-6); } } [TestMethod] [Timeout(1000 * 60 * 60 * 12)] // 12 hours // this configuration worked especially well in the experiments public void TestPoly10WithOutConstantOpt() { CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture; var instanceFactories = new Func[] { (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionPoly10Problem(), }; var maxSizes = new int[] { 23 }; int nReps = 20; int maxIterations = 100000; foreach (var instanceFactory in instanceFactories) { var sumBestQ = 0.0; var sumItersToBest = 0; double fractionSolved = 0.0; foreach (var conf in GenerateConfigurations(instanceFactory, nReps, maxSizes)) { var prob = conf.Problem; var maxLen = conf.MaxSize; var rand = new Random(conf.RandSeed); var solver = new SequentialSearch(prob, maxLen, rand, 0, new GenericFunctionApproximationGrammarPolicy(prob, true)); var problemName = prob.GetType().Name; double bestQ; int itersToBest; RunSolver(solver, problemName, string.Empty, 1.0, maxIterations, maxLen, out bestQ, out itersToBest); sumBestQ += bestQ; sumItersToBest += itersToBest; if (bestQ.IsAlmost(1.0)) fractionSolved += 1.0 / nReps; } // Assert.AreEqual(0.85, fractionSolved, 1E-6); // Assert.AreEqual(0.99438202247191, sumBestQ / nReps, 1E-6); // Assert.AreEqual(5461.7, sumItersToBest / (double)nReps, 1E-6); } } [TestMethod] [Timeout(1000 * 60 * 60 * 12)] // 12 hours // this configuration worked especially well in the experiments public void TestPoly10WithConstantOpt() { CultureInfo.DefaultThreadCurrentCulture = CultureInfo.InvariantCulture; var instanceFactories = new Func[] { (randSeed) => (ISymbolicExpressionTreeProblem)new SymbolicRegressionProblem(new Random(randSeed), "Poly-10", true ), }; var maxSizes = new int[] { 23 }; int nReps = 20; int maxIterations = 100000; foreach (var instanceFactory in instanceFactories) { var sumBestQ = 0.0; var sumItersToBest = 0; double fractionSolved = 0.0; foreach (var conf in GenerateConfigurations(instanceFactory, nReps, maxSizes)) { var prob = conf.Problem; var maxLen = conf.MaxSize; var rand = new Random(conf.RandSeed); var solver = new SequentialSearch(prob, maxLen, rand, 0, new GenericFunctionApproximationGrammarPolicy(prob, true)); var problemName = prob.GetType().Name; double bestQ; int itersToBest; RunSolver(solver, problemName, string.Empty, 1.0, maxIterations, maxLen, out bestQ, out itersToBest); sumBestQ += bestQ; sumItersToBest += itersToBest; if (bestQ.IsAlmost(1.0)) fractionSolved += 1.0 / nReps; } // Assert.AreEqual(0.85, fractionSolved, 1E-6); // Assert.AreEqual(0.99438202247191, sumBestQ / nReps, 1E-6); // Assert.AreEqual(5461.7, sumItersToBest / (double)nReps, 1E-6); } } private IEnumerable GenerateConfigurations(Func problemFactory, int nReps, IEnumerable maxSizes ) { var seedRand = new Random(randSeed); // the problem seed is the same for all configuratons // this guarantees that we solve the _same_ problem each time // with different solvers and multiple repetitions var problemSeed = randSeed; for (int i = 0; i < nReps; i++) { // in each repetition use the same random seed for all solver configuratons // do nReps with different seeds for each configuration var solverSeed = seedRand.Next(); foreach (var maxSize in maxSizes) { yield return new Configuration { MaxSize = maxSize, Problem = problemFactory(problemSeed), RandSeed = solverSeed }; } } } private static void RunSolver(ISolver solver, string problemName, string policyName, double bestKnownQuality, int maxIters, int maxSize, out double bestQ, out int itersToBest) { int iterations = 0; var globalStatistics = new SentenceSetStatistics(bestKnownQuality); var solverName = solver.GetType().Name; double bestQuality = double.NegativeInfinity; int iterationsToBest = -1; solver.SolutionEvaluated += (sentence, quality) => { iterations++; globalStatistics.AddSentence(sentence, quality); if (quality > bestQuality) { bestQuality = quality; iterationsToBest = iterations; } if (iterations % 1000 == 0) { Console.WriteLine("\"{0,25}\" \"{1,25}\" {2} \"{3,25}\" {4}", solverName, policyName, maxSize, problemName, globalStatistics); } }; solver.Run(maxIters); bestQ = bestQuality; itersToBest = iterationsToBest; } } }