- Timestamp:
- 01/05/17 17:50:03 (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified branches/MemPRAlgorithm/HeuristicLab.Algorithms.MemPR/3.3/Binary/BinaryMemPR.cs ¶
r14544 r14550 54 54 } 55 55 56 protected override bool Eq(ISingleObjectiveSolutionScope<BinaryVector> a, ISingleObjectiveSolutionScope<BinaryVector> b) { 57 var len = a.Solution.Length; 58 var acode = a.Solution; 59 var bcode = b.Solution; 56 protected override bool Eq(BinaryVector a, BinaryVector b) { 57 var len = a.Length; 60 58 for (var i = 0; i < len; i++) 61 if (a code[i] != bcode[i]) return false;59 if (a[i] != b[i]) return false; 62 60 return true; 63 61 } … … 105 103 var order = Enumerable.Range(0, N).Where(x => subset == null || subset[x]).Shuffle(Context.Random).ToArray(); 106 104 107 var max = Context.Population.Max(x => x.Fitness); 108 var min = Context.Population.Min(x => x.Fitness); 109 var range = (max - min); 110 if (range.IsAlmost(0)) range = Math.Abs(max * 0.05); 111 //else range += range * 0.05; 112 if (range.IsAlmost(0)) { // because min = max = 0 105 var bound = Problem.Maximization ? Context.Population.Max(x => x.Fitness) : Context.Population.Min(x => x.Fitness); 106 var range = Math.Abs(bound - Context.LocalOptimaLevel); 107 if (range.IsAlmost(0)) range = Math.Abs(bound * 0.05); 108 if (range.IsAlmost(0)) { // because bound = localoptimalevel = 0 113 109 Context.IncrementEvaluatedSolutions(evaluations); 114 110 return; 115 111 } 116 117 //var upperBound = Problem.Maximization ? min - range : max + range;118 //var lowerBound = Problem.Maximization ? max : min;119 var temp = 1.0;112 113 var temp = -range / Math.Log(1.0 / maxEvals); 114 var endtemp = -range / Math.Log(0.1 / maxEvals); 115 var annealFactor = Math.Pow(endtemp / temp, 1.0 / maxEvals); 120 116 for (var iter = 0; iter < int.MaxValue; iter++) { 121 117 var moved = false; … … 131 127 if (Context.IsBetter(after, before) && (bestOfTheWalk == null || Context.IsBetter(after, bestOfTheWalk.Fitness))) { 132 128 bestOfTheWalk = (SingleObjectiveSolutionScope<BinaryVector>)currentScope.Clone(); 129 if (Context.IsBetter(bestOfTheWalk, scope)) { 130 moved = false; 131 break; 132 } 133 133 } 134 134 var diff = Problem.Maximization ? after - before : before - after; 135 135 if (diff > 0) moved = true; 136 136 else { 137 var prob = Math.Exp( temp * diff / range);137 var prob = Math.Exp(diff / temp); 138 138 if (Context.Random.NextDouble() >= prob) { 139 139 // the move is not good enough -> undo the move … … 142 142 } 143 143 } 144 temp += 100.0 / maxEvals;144 temp *= annealFactor; 145 145 if (evaluations >= maxEvals) break; 146 146 } … … 167 167 : Math.Min(p1.Fitness, p2.Fitness); 168 168 169 var offspring = ToScope(null); 169 170 var cache = new HashSet<BinaryVector>(new BinaryVectorEqualityComparer()); 171 cache.Add(p1.Solution); 172 cache.Add(p2.Solution); 173 174 ISingleObjectiveSolutionScope<BinaryVector> offspring = null; 170 175 var probe = ToScope(new BinaryVector(N)); 171 var order = Enumerable.Range(0, N - 1).Shuffle(Context.Random).ToList(); 172 for (var x = 0; x < N - 1; x++) { 173 var b = order[x]; 174 if (p1.Solution[b] == p2.Solution[b]) continue; 176 // first try all possible combinations of 1-point crossover 177 /*var order = Enumerable.Range(1, N - 2).Where(x => p1.Solution[x] != p2.Solution[x]).Shuffle(Context.Random).ToList(); 178 foreach (var b in order) { 175 179 for (var i = 0; i <= b; i++) 176 180 probe.Solution[i] = p1.Solution[i]; … … 178 182 probe.Solution[i] = p2.Solution[i]; 179 183 184 // only add to cache, because all solutions must be unique 185 if (cache.Contains(probe.Solution)) continue; 186 cache.Add(probe.Solution); 180 187 Evaluate(probe, token); 181 188 evaluations++; 182 if (Context.IsBetter(probe, offspring)) { 189 if (offspring == null || Context.IsBetter(probe, offspring)) { 190 // immediately return upon finding a better offspring than better parent 183 191 if (Context.IsBetter(probe.Fitness, better)) { 184 192 Context.IncrementEvaluatedSolutions(evaluations); … … 187 195 offspring = (ISingleObjectiveSolutionScope<BinaryVector>)probe.Clone(); 188 196 } 189 } 190 191 while (evaluations < Context.LocalSearchEvaluations) { 197 }*/ 198 199 var cacheHits = 0; 200 // if we got some evaluations left, try uniform crossover 201 while (evaluations < Math.Min(Context.LocalSearchEvaluations, N)) { 192 202 probe.Solution = UniformCrossover.Apply(Context.Random, p1.Solution, p2.Solution); 203 if (cache.Contains(probe.Solution)) { 204 cacheHits++; 205 if (cacheHits > 10) break; // variability of uniform crossover is too low -> parents are too similar 206 continue; 207 } else cache.Add(probe.Solution); 193 208 Evaluate(probe, token); 194 209 evaluations++; 195 if (Context.IsBetter(probe, offspring)) { 210 if (offspring == null || Context.IsBetter(probe, offspring)) { 211 // immediately return upon finding a better offspring than better parent 196 212 if (Context.IsBetter(probe.Fitness, better)) { 197 213 Context.IncrementEvaluatedSolutions(evaluations); … … 202 218 } 203 219 Context.IncrementEvaluatedSolutions(evaluations); 204 return offspring; 220 // return best offspring found 221 return offspring ?? probe; 205 222 } 206 223
Note: See TracChangeset
for help on using the changeset viewer.