#region License Information
/* HeuristicLab
* Author: Kaifeng Yang
*
* Copyright (C) 2002-2019 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
*
* This file is part of HeuristicLab.
*\
* HeuristicLab is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* HeuristicLab is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with HeuristicLab. If not, see .
*/
// SMS-EMOA
/*
Implemenetation of a real-coded SMS_EMOA algorithm.
This implementation follows the description of: 'M. Emmerich, N. Beume, and B. Naujoks.
An EMO Algorithm Using the Hypervolume Measure as Selection Criterion.EMO 2005.'
*/
#endregion
using HEAL.Attic;
using HeuristicLab.Common;
using HeuristicLab.Core;
using HeuristicLab.Data;
using HeuristicLab.Random;
using System.Linq;
using System;
using CancellationToken = System.Threading.CancellationToken;
using HeuristicLab.Analysis;
using HeuristicLab.Problems.TestFunctions.MultiObjective;
using HeuristicLab.Optimization;
using System.Collections.Generic;
/* This algorithm is SMS-EMOA implementation on HL. The main structure and interfaces with HL are copied from MOEA/D on HL, which was written by Dr. Bogdan Burlacu. The S-metric selection operator was adapted from Kaifeng's MATLAB toolbox in SMS-EMOA. The computational complexity of HVC is AT LEAST $O (n^2 \log n)$ in 2-D and 3-D cases. HVC should definitely be reduced to $\Theta (n \times \log n)$.
*
* This algorithm assumes:
* 1. minimization problems. For maximization problems, it is better to add "-" symbol.
*
* This algorithm works on:
* 1. continuous, discrete, mixed-integer MOO problems. For different types of problems, the operators should be adjusted accordingly.
* 2. both multi-objective and many-objective problems. For many-objective problems, the bottleneck is the computational complexity of HV.
*
* This algorithm is the basic implementation of SMS-EMOA, proposed by Michael Emmerich et. al. Some potential improvements can be:
* 1. Dynamic reference point strategy
* 2. Normalized fitness value strategy ---- desirability function. See, Yali, Longmei, Kaifeng, Michael Emmerich CEC paper.
* 3. HVC calculation should definitely be improved, at least in the 2D and 3D cases.
* 4. multiple point strategy when $\lambda>1$
* 5. multiple reference points strategy, in ICNC 2016, Zhiwei Yang et. al.
* 6. HVC approximation by R2 for MANY OBJECTIVE cases, by Ishibushi 2019, IEEE TEC
* 7. Maybe: See maps
*
* Global parameters:
* 1. population
*
* Many thanks for Bogdan Burlacu and Johannes Karder, especially Bogdan for his explanation, help, and supports.
*/
namespace HeuristicLab.Algorithms.DynamicALPS {
// Format:
// The indexed name of the algorithm/item, Description of the algorithm/item in HL
[Item("DynamicALPS-MainLoop", "DynamicALPS-MainLoop implementation adapted from SMS-EMOA in HL.")]
// Call "HeuristicLab.Core"
// Define the category of this class.
[Creatable(CreatableAttribute.Categories.PopulationBasedAlgorithms, Priority = 125)]
// Call "HEAL.Attic"
// Define GUID for this Class
[StorableType("A7F33D16-3495-43E8-943C-8A919123F541")]
public class DynamicALPSAlgorithm : DynamicALPSAlgorithmBase {
public DynamicALPSAlgorithm() { }
protected DynamicALPSAlgorithm(DynamicALPSAlgorithm original, Cloner cloner) : base(original, cloner) { }
public override IDeepCloneable Clone(Cloner cloner) {
return new DynamicALPSAlgorithm(this, cloner);
}
[StorableConstructor]
protected DynamicALPSAlgorithm(StorableConstructorFlag deserializing) : base(deserializing) { }
protected override void Run(CancellationToken cancellationToken) {
if (previousExecutionState != ExecutionState.Paused) { // Call "base" class, DynamicALPSAlgorithmBase
InitializeAlgorithm(cancellationToken);
}
var populationSize = PopulationSize.Value;
bool[] maximization = ((BoolArray)Problem.MaximizationParameter.ActualValue).CloneAsArray();
var crossover = Crossover;
var crossoverProbability = CrossoverProbability.Value;
var mutator = Mutator;
var mutationProbability = MutationProbability.Value;
var evaluator = Problem.Evaluator;
var analyzer = Analyzer;
var rand = RandomParameter.Value;
var maximumEvaluatedSolutions = MaximumEvaluatedSolutions.Value;
int lambda = 1; // the size of offspring
int nLayerALPS = ALPSLayers.Value;
int counterLayerALPS = 0;
// IMPROVE: ........
int[] ageGapArray = new int[] { 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
int[] numberDiscard = new int[10];
activeLayer = new bool[nLayerALPS];
int[][] ageMatrix = new int[nLayerALPS][]; // layer * population size
// cancellation token for the inner operations which should not be immediately cancelled
var innerToken = new CancellationToken();
// run some analyzer on each layer (for now calculate scatter plots )
List> layerAnalyzers;
// 12022020
layerPopulation = new IDynamicALPSSolution[nLayerALPS][];
layerOffspringPopulation = new IDynamicALPSSolution[nLayerALPS][];
layerJointPopulation = new IDynamicALPSSolution[nLayerALPS][];
layerDiscardPopulation = new IDynamicALPSSolution[nLayerALPS][];
layerPopulation[counterLayerALPS] = new IDynamicALPSSolution[populationSize];
// BUG: The size of offspring should vary in different layers!!!!
layerOffspringPopulation[counterLayerALPS] = new IDynamicALPSSolution[lambda];
layerDiscardPopulation[counterLayerALPS] = new IDynamicALPSSolution[populationSize];
population.CopyTo(layerPopulation[counterLayerALPS], 0);
activeLayer[counterLayerALPS] = true;
for (int i = 0; i < nLayerALPS; i++) {
if (i == 0) {
activeLayer[i] = true;
}
else { activeLayer[i] = false; }
numberDiscard[i] = 0;
}
// Mainloop
while (evaluatedSolutions < maximumEvaluatedSolutions && !cancellationToken.IsCancellationRequested) {
for (int i = 0; i < nLayerALPS; i++) { // loop for every layer
int discardedIndividualIndex = 0;
if (activeLayer[i] == true) { // check the layer is active or not.
evaluatedSolutions = SMSEMOA(populationSize, lambda, i);
if (evaluatedSolutions >= maximumEvaluatedSolutions) { break; } // check evaluation budget
ageMatrix[i] = layerJointPopulation[i].Select(x => x.Age).ToArray(); // get age info of the current layer joint population
if (ageMatrix[i].Max() > ageGapArray[i]) { // mature: moving
discardedIndividualIndex = ageMatrix[i].ToList().IndexOf(ageMatrix[i].Max()); // BUG when two individual has the same maximal age???? NOT POSSBILE IN SMS-EMOA
layerDiscardPopulation[i][numberDiscard[i]] = layerJointPopulation[i][discardedIndividualIndex]; // move the individual to the next layer
layerJointPopulation[i].Where((x, idx) => idx != discardedIndividualIndex).ToArray().CopyTo(layerPopulation[i], 0); // discard the indivudal in the current layer
numberDiscard[i] += 1;
if (activeLayer[i + 1] == false) { // next layer is not active // bug, if i == number of layer, out of range .
if (numberDiscard[i] == populationSize) {
InitializeLayer(i + 1, populationSize, lambda); // initilizae the layerPopulation for the next layer && ACTIVE FLAGE
layerDiscardPopulation[i].CopyTo(layerPopulation[i + 1], 0);
numberDiscard[i] = 0;
}
else {
// number of matured individuals < population size in the next layer
}
}
else { // next layer is active
layerPopulation[i+1].CopyTo(layerJointPopulation[i + 1], 0);
layerJointPopulation[i].Where((x, idx) => idx == discardedIndividualIndex).ToArray().CopyTo(layerJointPopulation[i + 1], populationSize);
SmetricSelection(lambda, i + 1); // AGE and HVC
numberDiscard[i] = 0;
}
}
layerPopulation[i].CopyTo(population, 0); // BUG: should copy all the active layers to population.
}
else {
// Some thing wrong? lol nothing wrong here ^_^
}
}
// run analyzer
var analyze = executionContext.CreateChildOperation(analyzer, globalScope);
ExecuteOperation(executionContext, innerToken, analyze);
// update Pareto-front approximation sets
UpdateParetoFronts();
// Show some results in the GUI.
Results.AddOrUpdateResult("IdealPoint", new DoubleArray(IdealPoint));
Results.AddOrUpdateResult("NadirPoint", new DoubleArray(NadirPoint));
Results.AddOrUpdateResult("Evaluated Solutions", new IntValue(evaluatedSolutions));
var allLayerResults = new ResultCollection();
Results.AddOrUpdateResult("LayerResults", allLayerResults);
// run layer analyzers
for (int i = 0; i < activeLayer.Length; ++i) {
if (!activeLayer[i]) continue;
var scope = new Scope($"Layer {i}");
var layer = layerPopulation[i];
var layerResults = new ResultCollection();
allLayerResults.Add(new Result(scope.Name, layerResults));
var problem = (MultiObjectiveTestFunctionProblem)Problem;
//scope.Variables.Add(new Variable("Individuals", new ItemArray(layer.Select(x => (IScope)x.Individual))));
//scope.Variables.Add(new Variable("Qualities", new ItemArray(layer.Select(x => new DoubleArray(x.Qualities)))));
scope.SubScopes.AddRange(layer.Select(x => (IScope)x.Individual));
scope.Variables.Add(new Variable("Results", layerResults));
scope.Variables.Add(new Variable("TestFunction", problem.TestFunction));
var scatterPlotAnalyzer = new ScatterPlotAnalyzer();
scatterPlotAnalyzer.IndividualsParameter.ActualName = "RealVector";
var scattetPlotAnalyzerContext = executionContext.CreateChildOperation(scatterPlotAnalyzer, scope);
ExecuteOperation(executionContext, CancellationToken.None, scattetPlotAnalyzerContext);
scope.SubScopes.Clear();
scope.Variables.Clear();
}
// Update globalScope
globalScope.SubScopes.Replace(population.Select(x => (IScope)x.Individual));
//// intilize the population for the next layer
//counterLayerALPS += 1;
//layerPopulation[counterLayerALPS] = new IDynamicALPSSolution[populationSize];
//layerPopulation[counterLayerALPS-1].CopyTo(layerPopulation[counterLayerALPS], 0); // DETELTE DUBGU
//// BUG lambda should be different~~~~!!!!
//layerOffspringPopulation[counterLayerALPS] = new IDynamicALPSSolution[lambda];
}
}
}
}