Free cookie consent management tool by TermsFeed Policy Generator

source: branches/PerformanceComparison/HeuristicLab.Algorithms.MemPR/3.3/Binary/BinaryMemPR.cs @ 14666

Last change on this file since 14666 was 14563, checked in by abeham, 8 years ago

#2701:

  • Tagged unbiased models with property
  • Changed default configuration
  • Added solution distance to breeding, relinking and delinking performance models
  • Changed sampling model to base prediction on average distance in genotype space
  • Changed target for hillclimber and relinking to relative (quality improvement)
  • changed breeding to count cache hits per crossover
File size: 10.0 KB
Line 
1#region License Information
2/* HeuristicLab
3 * Copyright (C) 2002-2016 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
4 *
5 * This file is part of HeuristicLab.
6 *
7 * HeuristicLab is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * HeuristicLab is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with HeuristicLab. If not, see <http://www.gnu.org/licenses/>.
19 */
20#endregion
21
22using System;
23using System.Collections.Generic;
24using System.Linq;
25using System.Threading;
26using HeuristicLab.Algorithms.MemPR.Interfaces;
27using HeuristicLab.Common;
28using HeuristicLab.Core;
29using HeuristicLab.Data;
30using HeuristicLab.Encodings.BinaryVectorEncoding;
31using HeuristicLab.Optimization;
32using HeuristicLab.Persistence.Default.CompositeSerializers.Storable;
33using HeuristicLab.PluginInfrastructure;
34using HeuristicLab.Random;
35
36namespace HeuristicLab.Algorithms.MemPR.Binary {
37  [Item("MemPR (binary)", "MemPR implementation for binary vectors.")]
38  [StorableClass]
39  [Creatable(CreatableAttribute.Categories.PopulationBasedAlgorithms, Priority = 999)]
40  public class BinaryMemPR : MemPRAlgorithm<ISingleObjectiveHeuristicOptimizationProblem, BinaryVector, BinaryMemPRPopulationContext, BinaryMemPRSolutionContext> {
41    [StorableConstructor]
42    protected BinaryMemPR(bool deserializing) : base(deserializing) { }
43    protected BinaryMemPR(BinaryMemPR original, Cloner cloner) : base(original, cloner) { }
44    public BinaryMemPR() {
45      foreach (var trainer in ApplicationManager.Manager.GetInstances<ISolutionModelTrainer<BinaryMemPRPopulationContext>>())
46        SolutionModelTrainerParameter.ValidValues.Add(trainer);
47
48      if (SolutionModelTrainerParameter.ValidValues.Count > 0) {
49        var unbiased = SolutionModelTrainerParameter.ValidValues.FirstOrDefault(x => !x.Bias);
50        if (unbiased != null) SolutionModelTrainerParameter.Value = unbiased;
51      }
52
53      foreach (var localSearch in ApplicationManager.Manager.GetInstances<ILocalSearch<BinaryMemPRSolutionContext>>()) {
54        LocalSearchParameter.ValidValues.Add(localSearch);
55      }
56    }
57
58    public override IDeepCloneable Clone(Cloner cloner) {
59      return new BinaryMemPR(this, cloner);
60    }
61
62    protected override bool Eq(BinaryVector a, BinaryVector b) {
63      var len = a.Length;
64      for (var i = 0; i < len; i++)
65        if (a[i] != b[i]) return false;
66      return true;
67    }
68
69    protected override double Dist(ISingleObjectiveSolutionScope<BinaryVector> a, ISingleObjectiveSolutionScope<BinaryVector> b) {
70      return 1.0 - HammingSimilarityCalculator.CalculateSimilarity(a.Solution, b.Solution);
71    }
72
73    protected override ISolutionSubspace<BinaryVector> CalculateSubspace(IEnumerable<BinaryVector> solutions, bool inverse = false) {
74      var pop = solutions.ToList();
75      var N = pop[0].Length;
76      var subspace = new bool[N];
77      for (var i = 0; i < N; i++) {
78        var val = pop[0][i];
79        if (inverse) subspace[i] = true;
80        for (var p = 1; p < pop.Count; p++) {
81          if (pop[p][i] != val) subspace[i] = !inverse;
82        }
83      }
84      return new BinarySolutionSubspace(subspace);
85    }
86
87    protected override void AdaptiveWalk(ISingleObjectiveSolutionScope<BinaryVector> scope, int maxEvals, CancellationToken token, ISolutionSubspace<BinaryVector> subspace = null) {
88      var evaluations = 0;
89      var subset = subspace != null ? ((BinarySolutionSubspace)subspace).Subspace : null;
90      if (double.IsNaN(scope.Fitness)) {
91        Context.Evaluate(scope, token);
92        evaluations++;
93      }
94      SingleObjectiveSolutionScope<BinaryVector> bestOfTheWalk = null;
95      var currentScope = (SingleObjectiveSolutionScope<BinaryVector>)scope.Clone();
96      var current = currentScope.Solution;
97      var N = current.Length;
98
99      var subN = subset != null ? subset.Count(x => x) : N;
100      if (subN == 0) return;
101      var order = Enumerable.Range(0, N).Where(x => subset == null || subset[x]).Shuffle(Context.Random).ToArray();
102
103      var bound = Context.Maximization ? Context.Population.Max(x => x.Fitness) : Context.Population.Min(x => x.Fitness);
104      var range = Math.Abs(bound - Context.LocalOptimaLevel);
105      if (range.IsAlmost(0)) range = Math.Abs(bound * 0.05);
106      if (range.IsAlmost(0)) { // because bound = localoptimalevel = 0
107        Context.IncrementEvaluatedSolutions(evaluations);
108        return;
109      }
110     
111      var temp = -range / Math.Log(1.0 / maxEvals);
112      var endtemp = -range / Math.Log(0.1 / maxEvals);
113      var annealFactor = Math.Pow(endtemp / temp, 1.0 / maxEvals);
114      for (var iter = 0; iter < int.MaxValue; iter++) {
115        var moved = false;
116
117        for (var i = 0; i < subN; i++) {
118          var idx = order[i];
119          var before = currentScope.Fitness;
120          current[idx] = !current[idx];
121          Context.Evaluate(currentScope, token);
122          evaluations++;
123          var after = currentScope.Fitness;
124
125          if (Context.IsBetter(after, before) && (bestOfTheWalk == null || Context.IsBetter(after, bestOfTheWalk.Fitness))) {
126            bestOfTheWalk = (SingleObjectiveSolutionScope<BinaryVector>)currentScope.Clone();
127            if (Context.IsBetter(bestOfTheWalk, scope)) {
128              moved = false;
129              break;
130            }
131          }
132          var diff = Context.Maximization ? after - before : before - after;
133          if (diff > 0) moved = true;
134          else {
135            var prob = Math.Exp(diff / temp);
136            if (Context.Random.NextDouble() >= prob) {
137              // the move is not good enough -> undo the move
138              current[idx] = !current[idx];
139              currentScope.Fitness = before;
140            }
141          }
142          temp *= annealFactor;
143          if (evaluations >= maxEvals) break;
144        }
145        if (!moved) break;
146        if (evaluations >= maxEvals) break;
147      }
148
149      Context.IncrementEvaluatedSolutions(evaluations);
150      scope.Adopt(bestOfTheWalk ?? currentScope);
151    }
152
153    protected override ISingleObjectiveSolutionScope<BinaryVector> Breed(ISingleObjectiveSolutionScope<BinaryVector> p1, ISingleObjectiveSolutionScope<BinaryVector> p2, CancellationToken token) {
154      var evaluations = 0;
155      var N = p1.Solution.Length;
156
157      var probe = Context.ToScope((BinaryVector)p1.Solution.Clone());
158
159      var cache = new HashSet<BinaryVector>(new BinaryVectorEqualityComparer());
160      cache.Add(p1.Solution);
161      cache.Add(p2.Solution);
162
163      var cacheHits = new Dictionary<int, int>() { { 0, 0 }, { 1, 0 }, { 2, 0 } };
164      ISingleObjectiveSolutionScope<BinaryVector> offspring = null;
165     
166      while (evaluations < N) {
167        BinaryVector c = null;
168        var xochoice = cacheHits.SampleRandom(Context.Random).Key;
169        switch (xochoice) {
170          case 0: c = NPointCrossover.Apply(Context.Random, p1.Solution, p2.Solution, new IntValue(1)); break;
171          case 1: c = NPointCrossover.Apply(Context.Random, p1.Solution, p2.Solution, new IntValue(2)); break;
172          case 2: c = UniformCrossover.Apply(Context.Random, p1.Solution, p2.Solution); break;
173        }
174        if (cache.Contains(c)) {
175          cacheHits[xochoice]++;
176          if (cacheHits[xochoice] > 10) {
177            cacheHits.Remove(xochoice);
178            if (cacheHits.Count == 0) break;
179          }
180          continue;
181        }
182        probe.Solution = c;
183        Context.Evaluate(probe, token);
184        evaluations++;
185        cache.Add(c);
186        if (offspring == null || Context.IsBetter(probe, offspring)) {
187          offspring = probe;
188          if (Context.IsBetter(offspring, p1) && Context.IsBetter(offspring, p2))
189            break;
190        }
191      }
192      Context.IncrementEvaluatedSolutions(evaluations);
193      return offspring ?? probe;
194    }
195
196    protected override ISingleObjectiveSolutionScope<BinaryVector> Link(ISingleObjectiveSolutionScope<BinaryVector> a, ISingleObjectiveSolutionScope<BinaryVector> b, CancellationToken token, bool delink = false) {
197      var evaluations = 0;
198      var childScope = (ISingleObjectiveSolutionScope<BinaryVector>)a.Clone();
199      var child = childScope.Solution;
200      ISingleObjectiveSolutionScope<BinaryVector> best = null;
201      var cF = a.Fitness;
202      var bF = double.NaN;
203      var order = Enumerable.Range(0, child.Length)
204        .Where(x => !delink && child[x] != b.Solution[x] || delink && child[x] == b.Solution[x])
205        .Shuffle(Context.Random).ToList();
206      if (order.Count == 0) return childScope;
207
208      while (true) {
209        var bestS = double.NaN;
210        var bestI = -1;
211        for (var i = 0; i < order.Count; i++) {
212          var idx = order[i];
213          child[idx] = !child[idx]; // move
214          Context.Evaluate(childScope, token);
215          evaluations++;
216          var s = childScope.Fitness;
217          childScope.Fitness = cF;
218          child[idx] = !child[idx]; // undo move
219          if (Context.IsBetter(s, cF)) {
220            bestS = s;
221            bestI = i;
222            break; // first-improvement
223          }
224          if (Context.IsBetter(s, bestS)) {
225            // least-degrading
226            bestS = s;
227            bestI = i;
228          }
229        }
230        child[order[bestI]] = !child[order[bestI]];
231        order.RemoveAt(bestI);
232        cF = bestS;
233        childScope.Fitness = cF;
234        if (Context.IsBetter(cF, bF)) {
235          bF = cF;
236          best = (ISingleObjectiveSolutionScope<BinaryVector>)childScope.Clone();
237        }
238        if (order.Count == 0) break;
239      }
240      Context.IncrementEvaluatedSolutions(evaluations);
241      return best ?? childScope;
242    }
243  }
244}
Note: See TracBrowser for help on using the repository browser.