Free cookie consent management tool by TermsFeed Policy Generator

Ignore:
Timestamp:
08/29/19 13:53:26 (5 years ago)
Author:
mkommend
Message:

#2521: Integrated changes of #2943 into problem refactoring branch.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • branches/2521_ProblemRefactoring/HeuristicLab.Algorithms.MOCMAEvolutionStrategy/3.3/MOCMAEvolutionStrategy.cs

    r16807 r17225  
    5252    }
    5353
     54    public RealVectorEncoding Encoding {
     55      get { return Problem.Encoding; }
     56    }
     57
    5458    #region Storable fields
    5559    [Storable]
     
    6468    private double stepSizeDampeningFactor; //d
    6569    [Storable]
    66     private double targetSuccessProbability;// p^target_succ
    67     [Storable]
    68     private double evolutionPathLearningRate;//cc
    69     [Storable]
    70     private double covarianceMatrixLearningRate;//ccov
     70    private double targetSuccessProbability; // p^target_succ
     71    [Storable]
     72    private double evolutionPathLearningRate; //cc
     73    [Storable]
     74    private double covarianceMatrixLearningRate; //ccov
    7175    [Storable]
    7276    private double covarianceMatrixUnlearningRate;
    7377    [Storable]
    7478    private double successThreshold; //ptresh
    75 
    7679    #endregion
    7780
     
    162165    }
    163166
    164     public double StepSizeLearningRate { get { return stepSizeLearningRate; } }
    165     public double StepSizeDampeningFactor { get { return stepSizeDampeningFactor; } }
    166     public double TargetSuccessProbability { get { return targetSuccessProbability; } }
    167     public double EvolutionPathLearningRate { get { return evolutionPathLearningRate; } }
    168     public double CovarianceMatrixLearningRate { get { return covarianceMatrixLearningRate; } }
    169     public double CovarianceMatrixUnlearningRate { get { return covarianceMatrixUnlearningRate; } }
    170     public double SuccessThreshold { get { return successThreshold; } }
     167    public double StepSizeLearningRate {
     168      get { return stepSizeLearningRate; }
     169    }
     170    public double StepSizeDampeningFactor {
     171      get { return stepSizeDampeningFactor; }
     172    }
     173    public double TargetSuccessProbability {
     174      get { return targetSuccessProbability; }
     175    }
     176    public double EvolutionPathLearningRate {
     177      get { return evolutionPathLearningRate; }
     178    }
     179    public double CovarianceMatrixLearningRate {
     180      get { return covarianceMatrixLearningRate; }
     181    }
     182    public double CovarianceMatrixUnlearningRate {
     183      get { return covarianceMatrixUnlearningRate; }
     184    }
     185    public double SuccessThreshold {
     186      get { return successThreshold; }
     187    }
    171188    #endregion
    172189
     
    238255      get { return ((DoubleValue)Results[DifferenceToBestKnownHypervolumeResultName].Value).Value; }
    239256      set { ((DoubleValue)Results[DifferenceToBestKnownHypervolumeResultName].Value).Value = value; }
    240 
    241257    }
    242258    //Solutions
     
    257273      Parameters.Add(new FixedValueParameter<BoolValue>(SetSeedRandomlyName, "True if the random seed should be set to a random value, otherwise false.", new BoolValue(true)));
    258274      Parameters.Add(new FixedValueParameter<IntValue>(PopulationSizeName, "λ (lambda) - the size of the offspring population.", new IntValue(20)));
    259       Parameters.Add(new ValueParameter<DoubleArray>(InitialSigmaName, "The initial sigma can be a single value or a value for each dimension. All values need to be > 0.", new DoubleArray(new[] { 0.5 })));
     275      Parameters.Add(new ValueParameter<DoubleArray>(InitialSigmaName, "The initial sigma can be a single value or a value for each dimension. All values need to be > 0.", new DoubleArray(new[] {0.5})));
    260276      Parameters.Add(new FixedValueParameter<IntValue>(MaximumGenerationsName, "The maximum number of generations which should be processed.", new IntValue(1000)));
    261277      Parameters.Add(new FixedValueParameter<IntValue>(MaximumEvaluatedSolutionsName, "The maximum number of evaluated solutions that should be computed.", new IntValue(int.MaxValue)));
    262       var set = new ItemSet<IIndicator> { new HypervolumeIndicator(), new CrowdingIndicator(), new MinimalDistanceIndicator() };
     278      var set = new ItemSet<IIndicator> {new HypervolumeIndicator(), new CrowdingIndicator(), new MinimalDistanceIndicator()};
    263279      Parameters.Add(new ConstrainedValueParameter<IIndicator>(IndicatorName, "The selection mechanism on non-dominated solutions", set, set.First()));
    264280    }
     
    280296    }
    281297
    282     public override IDeepCloneable Clone(Cloner cloner) { return new MOCMAEvolutionStrategy(this, cloner); }
     298    public override IDeepCloneable Clone(Cloner cloner) {
     299      return new MOCMAEvolutionStrategy(this, cloner);
     300    }
    283301    #endregion
    284302
     
    309327      solutions = new Individual[PopulationSize];
    310328      for (var i = 0; i < PopulationSize; i++) {
    311         var x = new RealVector(Problem.Encoding.Length); // Uniform distibution in all dimensions assumed.
    312         var bounds = Problem.Encoding.Bounds;
    313         for (var j = 0; j < Problem.Encoding.Length; j++) {
     329        var x = new RealVector(Encoding.Length); // Uniform distibution in all dimensions assumed.
     330        var bounds = Encoding.Bounds;
     331        for (var j = 0; j < Encoding.Length; j++) {
    314332          var dim = j % bounds.Rows;
    315333          x[j] = random.NextDouble() * (bounds[dim, 1] - bounds[dim, 0]) + bounds[dim, 0];
     
    322340    private void InitStrategy() {
    323341      const int lambda = 1;
    324       double n = Problem.Encoding.Length;
     342      double n = Encoding.Length;
    325343      targetSuccessProbability = 1.0 / (5.0 + Math.Sqrt(lambda) / 2.0);
    326344      stepSizeDampeningFactor = 1.0 + n / (2.0 * lambda);
     
    355373      Results.Add(new Result(ScatterPlotResultName, "A scatterplot displaying the evaluated solutions and (if available) the analytically optimal front", new ParetoFrontScatterPlot()));
    356374
    357       var problem = Problem as MultiObjectiveTestFunctionProblem;
     375      var problem = Problem;
    358376      if (problem == null) return;
    359       if (problem.BestKnownFront != null) {
    360         ResultsBestKnownHypervolume = Hypervolume.Calculate(problem.BestKnownFront.ToJaggedArray(), problem.TestFunction.ReferencePoint(problem.Objectives), Problem.Maximization);
     377      var bkf = problem.BestKnownFront == null ? null : problem.BestKnownFront.ToArray();
     378      if (bkf != null && problem.ReferencePoint != null) {
     379        ResultsBestKnownHypervolume = HypervolumeCalculator.CalculateHypervolume(bkf, problem.ReferencePoint, Problem.Maximization);
    361380        ResultsDifferenceBestKnownHypervolume = ResultsBestKnownHypervolume;
    362381      }
    363       ResultsScatterPlot = new ParetoFrontScatterPlot(new double[0][], new double[0][], problem.BestKnownFront.ToJaggedArray(), Problem.Objectives, Problem.Encoding.Length);
     382      ResultsScatterPlot = new ParetoFrontScatterPlot(new double[0][], new double[0][], bkf, Problem.Objectives, Problem.Encoding.Length);
    364383    }
    365384    #endregion
     
    417436    }
    418437    private RealVector ClosestFeasible(RealVector x) {
    419       var bounds = Problem.Encoding.Bounds;
     438      var bounds = Encoding.Bounds;
    420439      var r = new RealVector(x.Length);
    421440      for (var i = 0; i < x.Length; i++) {
     
    426445    }
    427446    private bool IsFeasable(RealVector offspring) {
    428       var bounds = Problem.Encoding.Bounds;
     447      var bounds = Encoding.Bounds;
    429448      for (var i = 0; i < offspring.Length; i++) {
    430449        var dim = i % bounds.Rows;
     
    438457      //perform a nondominated sort to assign the rank to every element
    439458      int[] ranks;
    440       var fronts = DominationCalculator<Individual>.CalculateAllParetoFronts(parents.ToArray(), parents.Select(i => i.PenalizedFitness).ToArray(), Problem.Maximization, out ranks);
     459      var fronts = DominationCalculator.CalculateAllParetoFronts(parents.ToArray(), parents.Select(i => i.PenalizedFitness).ToArray(), Problem.Maximization, out ranks);
    441460
    442461      //deselect the highest rank fronts until we would end up with less or equal mu elements
     
    470489
    471490    private void Analyze() {
    472       ResultsScatterPlot = new ParetoFrontScatterPlot(solutions.Select(x => x.Fitness).ToArray(), solutions.Select(x => x.Mean.ToArray()).ToArray(), ResultsScatterPlot.ParetoFront, ResultsScatterPlot.Objectives, ResultsScatterPlot.ProblemSize);
     491      var qualities = solutions.Select(x => x.Fitness).ToArray();
     492
     493      //to do check for side effects
     494      ResultsScatterPlot = new ParetoFrontScatterPlot(qualities, solutions.Select(x => x.Mean.ToArray()).ToArray(), ResultsScatterPlot.ParetoFront, ResultsScatterPlot.Objectives, ResultsScatterPlot.ProblemSize);
    473495      ResultsSolutions = solutions.Select(x => x.Mean.ToArray()).ToMatrix();
    474496
    475       var problem = Problem as MultiObjectiveTestFunctionProblem;
     497      var problem = Problem as MultiObjectiveProblem<RealVectorEncoding, RealVector>;
    476498      if (problem == null) return;
    477499
    478       var front = NonDominatedSelect.GetDominatingVectors(solutions.Select(x => x.Fitness), problem.ReferencePoint.CloneAsArray(), Problem.Maximization, true).ToArray();
    479       if (front.Length == 0) return;
    480       var bounds = problem.Bounds.CloneAsMatrix();
    481       ResultsCrowding = Crowding.Calculate(front, bounds);
    482       ResultsSpacing = Spacing.Calculate(front);
    483       ResultsGenerationalDistance = problem.BestKnownFront != null ? GenerationalDistance.Calculate(front, problem.BestKnownFront.ToJaggedArray(), 1) : double.NaN;
    484       ResultsInvertedGenerationalDistance = problem.BestKnownFront != null ? InvertedGenerationalDistance.Calculate(front, problem.BestKnownFront.ToJaggedArray(), 1) : double.NaN;
    485       ResultsHypervolume = Hypervolume.Calculate(front, problem.ReferencePoint.CloneAsArray(), Problem.Maximization);
     500
     501      if (qualities.Length == 0) return;
     502      ResultsCrowding = CrowdingCalculator.CalculateCrowding(qualities);
     503      ResultsSpacing = Spacing.Calculate(qualities);
     504
     505
     506      ResultsGenerationalDistance = problem.BestKnownFront != null ? GenerationalDistance.Calculate(qualities, problem.BestKnownFront, 1) : double.NaN;
     507      ResultsInvertedGenerationalDistance = problem.BestKnownFront != null ? InvertedGenerationalDistance.Calculate(qualities, problem.BestKnownFront, 1) : double.NaN;
     508      ResultsHypervolume = problem.ReferencePoint != null ? HypervolumeCalculator.CalculateHypervolume(qualities, problem.ReferencePoint, Problem.Maximization) : double.NaN;
    486509      ResultsBestHypervolume = Math.Max(ResultsHypervolume, ResultsBestHypervolume);
    487510      ResultsDifferenceBestKnownHypervolume = ResultsBestKnownHypervolume - ResultsBestHypervolume;
Note: See TracChangeset for help on using the changeset viewer.