#region License Information
/* HeuristicLab
* Copyright (C) 2002-2016 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
*
* This file is part of HeuristicLab.
*
* HeuristicLab is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* HeuristicLab is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with HeuristicLab. If not, see .
*/
#endregion
using HeuristicLab.Core;
using HeuristicLab.Data;
using HeuristicLab.Random;
using System;
using System.Linq;
namespace HeuristicLab.Analysis.SelfOrganizingMaps {
public static class RelationalSOM {
///
/// This is the online algorithm described in
/// Olteanu, M. and Villa-Vialaneix, N. 2015.
/// On-line relational and multiple relational SOM.
/// Neurocomputing 147, pp. 15-30.
///
/// The full NxN matrix containing all dissimilarities between N points.
/// The random number generator to use.
/// The length of a side of the SOM grid (there are somSize * somSize neurons).
/// The amount of iterations to perform in learning.
/// The initial learning rate.
/// The initial learning radius.
/// If the final coordinates should be jittered slightly within the grid.
/// A matrix of coordinates having N rows and 2 columns.
public static DoubleMatrix Map(DoubleMatrix dissimilarities, IRandom random = null, int somSize = 5, int iterations = 100, double learningRate = double.NaN, double learningRadius = 5.0, bool jittering = true) {
if (random == null) random = new MersenneTwister();
var K = somSize * somSize;
var N = dissimilarities.Rows;
if (double.IsNaN(learningRate)) learningRate = 1.0 / Math.Sqrt(2.0 * N);
var fixedLearningRate = learningRate / 10.0;
var varLearningRate = 9.0 * fixedLearningRate;
Func getX = (neuron) => neuron % somSize;
Func getY = (neuron) => neuron / somSize;
Func neighborhood = (maxIter, iter, k, bmu) => {
var sigma = learningRadius * ((maxIter - iter) / (double)maxIter) + 0.0001;
var xK = getX(k);
var yK = getY(k);
var xW = getX(bmu);
var yW = getY(bmu);
var d = (xK - xW) * (xK - xW) + (yK - yW) * (yK - yW);
return Math.Exp(-d / (2.0 * sigma * sigma));
};
var alphas = Enumerable.Range(0, K).Select(k => Enumerable.Range(0, N).Select(_ => random.NextDouble()).ToArray()).ToArray();
// normalize s.t. sum(alphas[k]) = 1
for (var k = 0; k < K; k++) {
var sum = alphas[k].Sum();
for (var i = 0; i < alphas[k].Length; i++) alphas[k][i] /= sum;
}
var oldAlphas = alphas.Select(x => (double[])x.Clone()).ToArray();
for (var iter = 0; iter < iterations; iter++) {
learningRate = varLearningRate * ((iterations - iter) / (double)iterations) + fixedLearningRate;
var pointShuffle = Enumerable.Range(0, N).Shuffle(random).ToArray();
for (var p = 0; p < N; p++) {
var i = pointShuffle[p];
var bmu = GetBestMatchingUnit(dissimilarities, alphas, i);
for (var k = 0; k < K; k++) {
for (var j = 0; j < N; j++) {
alphas[k][j] = oldAlphas[k][j] + learningRate * neighborhood(iterations, iter, k, bmu) * ((i == j ? 1.0 : 0.0) - oldAlphas[k][j]);
}
}
}
for (var k = 0; k < K; k++) {
for (var j = 0; j < N; j++) {
oldAlphas[k][j] = alphas[k][j];
}
}
}
var result = new DoubleMatrix(N, 2);
for (var i = 0; i < N; i++) {
var bmu = GetBestMatchingUnit(dissimilarities, alphas, i);
if (!jittering) {
result[i, 0] = getX(bmu);
result[i, 1] = getY(bmu);
} else {
result[i, 0] = getX(bmu) + random.NextDouble() * 0.8;
result[i, 1] = getY(bmu) + random.NextDouble() * 0.8;
}
}
return result;
}
private static int GetBestMatchingUnit(DoubleMatrix D, double[][] alphas, int i) {
var bmu = -1;
var minV = double.MaxValue;
for (var k = 0; k < alphas.Length; k++) {
var Daki = 0.0;
var akDak = 0.0;
for (var r = 0; r < D.Rows; r++) {
var Dakr = 0.0;
for (var s = 0; s < D.Rows; s++) {
Dakr += D[r, s] * alphas[k][s];
}
if (r == i) Daki = Dakr;
akDak += alphas[k][r] * Dakr;
}
var v = Daki - 0.5 * akDak;
if (v < minV) {
bmu = k;
minV = v;
}
}
return bmu;
}
}
}