using System; using System.Collections.Generic; using System.Diagnostics; using System.Linq; using System.Runtime.ExceptionServices; using System.Text; using System.Text.RegularExpressions; using System.Threading; using System.Threading.Tasks; using HeuristicLab.Common; using HeuristicLab.Problems.GrammaticalOptimization; namespace HeuristicLab.Algorithms.Bandits.GrammarPolicies { public sealed class GenericFunctionApproximationGrammarPolicy : IGrammarPolicy { private Dictionary featureWeigths; // stores the necessary information for bandit policies for each state (=canonical phrase) private Dictionary featureTries; private HashSet done; private readonly bool useCanonicalPhrases; private readonly IProblem problem; public GenericFunctionApproximationGrammarPolicy(IProblem problem, bool useCanonicalPhrases = false) { this.useCanonicalPhrases = useCanonicalPhrases; this.problem = problem; this.featureWeigths = new Dictionary(); this.featureTries = new Dictionary(); this.done = new HashSet(); } private double[] activeAfterStates; // don't allocate each time private int[] actionIndexMap; // don't allocate each time public bool TrySelect(Random random, string curState, IEnumerable afterStates, out int selectedStateIdx) { // fail if all states are done (corresponding state infos are disabled) if (afterStates.All(s => Done(s))) { // fail because all follow states have already been visited => also disable the current state (if we can be sure that it has been fully explored) MarkAsDone(curState); selectedStateIdx = -1; return false; } // determine active actions (not done yet) and create an array to map the selected index back to original actions if (activeAfterStates == null || activeAfterStates.Length < afterStates.Count()) { activeAfterStates = new double[afterStates.Count()]; actionIndexMap = new int[afterStates.Count()]; } var maxIdx = 0; int originalIdx = 0; foreach (var afterState in afterStates) { if (!Done(afterState)) { activeAfterStates[maxIdx] = 0.0; actionIndexMap[maxIdx] = originalIdx; activeAfterStates[maxIdx] = GetValue(afterState); maxIdx++; } originalIdx++; } // TODO: policy should be a parameter of the function approximation policy if (random.NextDouble() < 0.2) { selectedStateIdx = actionIndexMap[random.Next(maxIdx)]; } else { // find max var bestQ = double.NegativeInfinity; var bestIdxs = new List(); for (int i = 0; i < maxIdx; i++) { if (activeAfterStates[i] > bestQ) { bestIdxs.Clear(); bestIdxs.Add(i); bestQ = activeAfterStates[i]; } else if (activeAfterStates[i].IsAlmost(bestQ)) { bestIdxs.Add(i); } } selectedStateIdx = actionIndexMap[bestIdxs[random.Next(bestIdxs.Count)]]; } return true; } public void UpdateReward(IEnumerable stateTrajectory, double reward) { foreach (var state in stateTrajectory) { UpdateWeights(state, reward); // only the last state can be terminal if (problem.Grammar.IsTerminal(state)) { MarkAsDone(state); } } } private IEnumerable> Values { get { return featureWeigths.OrderByDescending(p => p.Value); } } public void Reset() { featureWeigths.Clear(); done.Clear(); } public int GetTries(string state) { return 0; } public int GetFeatureTries(string featureId) { int t; if (featureTries.TryGetValue(featureId, out t)) { return t; } else return 0; } public double GetValue(string state) { return problem.GetFeatures(state).Sum(feature => GetWeight(feature)); } private double GetWeight(Feature feature) { double w; if (featureWeigths.TryGetValue(feature.Id, out w)) return w * feature.Value; else return 0.0; } private void UpdateWeights(string state, double reward) { double delta = reward - GetValue(state); foreach (var feature in problem.GetFeatures(state)) { //featureTries[feature.Id] = GetFeatureTries(feature.Id) + 1; //Debug.Assert(GetFeatureTries(feature.Id) >= 1); //double alpha = 1.0 / GetFeatureTries(feature.Id); //alpha = Math.Max(alpha, 0.001); // simple setting of constant alpha = 0.01 works very well for poly-10 (100% success rate for 20 runs within 40000 evaluations)) var alpha = 0.01; double w; if (!featureWeigths.TryGetValue(feature.Id, out w)) { featureWeigths[feature.Id] = alpha * delta * feature.Value; } else { featureWeigths[feature.Id] += alpha * delta * feature.Value; } } } // the canonical states for the value function (banditInfos) and the done set must be distinguished // sequences of different length could have the same canonical representation and can have the same value (banditInfo) // however, if the canonical representation of a state is shorter than we must not mark the canonical state as done when all possible derivations from the initial state have been explored // eg. in the ant problem the canonical representation for ...lllA is ...rA // even though all possible derivations (of limited length) of lllA have been visited we must not mark the state rA as done private void MarkAsDone(string state) { var s = CanonicalState(state); // when the lengths of the canonical string and the original string are the same we also disable the actions // always disable terminals Debug.Assert(s.Length <= state.Length); if (s.Length == state.Length || problem.Grammar.IsTerminal(state)) { Debug.Assert(!done.Contains(s)); done.Add(s); } else { // for non-terminals where the canonical string is shorter than the original string we can only disable the canonical representation for all states in the same level Debug.Assert(!done.Contains(s + state.Length)); done.Add(s + state.Length); // encode the original length of the state, states in the same level of the tree are treated as equivalent } } // symmetric to MarkDone private bool Done(string state) { var s = CanonicalState(state); if (s.Length == state.Length || problem.Grammar.IsTerminal(state)) { return done.Contains(s); } else { // it is not necessary to visit states if the canonical representation has already been fully explored if (done.Contains(s)) return true; if (done.Contains(s + state.Length)) return true; for (int i = 1; i < state.Length; i++) { if (done.Contains(s + i)) return true; } return false; } } private string CanonicalState(string state) { if (useCanonicalPhrases) { return problem.CanonicalRepresentation(state); } else return state; } } }