Free cookie consent management tool by TermsFeed Policy Generator

source: branches/TSNE/HeuristicLab.Algorithms.DataAnalysis/3.4/TSNE/TSNEStatic.cs @ 14788

Last change on this file since 14788 was 14788, checked in by gkronber, 7 years ago

#2700: refactoring

File size: 23.4 KB
Line 
1#region License Information
2/* HeuristicLab
3 * Copyright (C) 2002-2016 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
4 *
5 * This file is part of HeuristicLab.
6 *
7 * HeuristicLab is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * HeuristicLab is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with HeuristicLab. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21//Code is based on an implementation from Laurens van der Maaten
22
23/*
24*
25* Copyright (c) 2014, Laurens van der Maaten (Delft University of Technology)
26* All rights reserved.
27*
28* Redistribution and use in source and binary forms, with or without
29* modification, are permitted provided that the following conditions are met:
30* 1. Redistributions of source code must retain the above copyright
31*    notice, this list of conditions and the following disclaimer.
32* 2. Redistributions in binary form must reproduce the above copyright
33*    notice, this list of conditions and the following disclaimer in the
34*    documentation and/or other materials provided with the distribution.
35* 3. All advertising materials mentioning features or use of this software
36*    must display the following acknowledgement:
37*    This product includes software developed by the Delft University of Technology.
38* 4. Neither the name of the Delft University of Technology nor the names of
39*    its contributors may be used to endorse or promote products derived from
40*    this software without specific prior written permission.
41*
42* THIS SOFTWARE IS PROVIDED BY LAURENS VAN DER MAATEN ''AS IS'' AND ANY EXPRESS
43* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
44* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
45* EVENT SHALL LAURENS VAN DER MAATEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
47* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
48* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
49* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
50* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
51* OF SUCH DAMAGE.
52*
53*/
54#endregion
55
56using System;
57using System.Collections.Generic;
58using System.Linq;
59using HeuristicLab.Collections;
60using HeuristicLab.Common;
61using HeuristicLab.Core;
62using HeuristicLab.Persistence.Default.CompositeSerializers.Storable;
63using HeuristicLab.Random;
64
65namespace HeuristicLab.Algorithms.DataAnalysis {
66  [StorableClass]
67  public class TSNE<T> {
68
69    [StorableClass]
70    public sealed class TSNEState : DeepCloneable {
71      // initialized once
72      public IDistance<T> distance;
73      public IRandom random;
74      public double perplexity;
75      public bool exact;
76      public int noDatapoints;
77      public double finalMomentum;
78      public int momSwitchIter;
79      public int stopLyingIter;
80      public double theta;
81      public double eta;
82      public int newDimensions;
83
84      // for approximate version: sparse representation of similarity/distance matrix
85      public double[] valP; // similarity/distance
86      public int[] rowP; // row index
87      public int[] colP; // col index
88
89      // for exact version: dense representation of distance/similarity matrix
90      public double[,] p;
91
92      // mapped data
93      public double[,] newData;
94
95      public int iter;
96      public double currentMomentum;
97
98      // helper variables (updated in each iteration)
99      public double[,] gains;
100      public double[,] uY;
101      public double[,] dY;
102
103      private TSNEState(TSNEState original, Cloner cloner) : base(original, cloner) {
104      }
105      public override IDeepCloneable Clone(Cloner cloner) {
106        return new TSNEState(this, cloner);
107      }
108
109      public TSNEState(T[] data, IDistance<T> distance, IRandom random, int newDimensions, double perplexity, double theta, int stopLyingIter, int momSwitchIter, double momentum, double finalMomentum, double eta) {
110        this.distance = distance;
111        this.random = random;
112        this.newDimensions = newDimensions;
113        this.perplexity = perplexity;
114        this.theta = theta;
115        this.stopLyingIter = stopLyingIter;
116        this.momSwitchIter = momSwitchIter;
117        this.currentMomentum = momentum;
118        this.finalMomentum = finalMomentum;
119        this.eta = eta;
120
121
122        // initialize
123        noDatapoints = data.Length;
124        if (noDatapoints - 1 < 3 * perplexity) throw new ArgumentException("Perplexity too large for the number of data points!");
125
126        exact = Math.Abs(theta) < double.Epsilon;
127        newData = new double[noDatapoints, newDimensions];
128        dY = new double[noDatapoints, newDimensions];
129        uY = new double[noDatapoints, newDimensions];
130        gains = new double[noDatapoints, newDimensions];
131        for (var i = 0; i < noDatapoints; i++)
132          for (var j = 0; j < newDimensions; j++)
133            gains[i, j] = 1.0;
134
135        p = null;
136        rowP = null;
137        colP = null;
138        valP = null;
139
140        //Calculate Similarities
141        if (exact) p = CalculateExactSimilarites(data, distance, perplexity);
142        else CalculateApproximateSimilarities(data, distance, perplexity, out rowP, out colP, out valP);
143
144        // Lie about the P-values
145        if (exact) for (var i = 0; i < noDatapoints; i++) for (var j = 0; j < noDatapoints; j++) p[i, j] *= 12.0;
146        else for (var i = 0; i < rowP[noDatapoints]; i++) valP[i] *= 12.0;
147
148        // Initialize solution (randomly)
149        var rand = new NormalDistributedRandom(random, 0, 1);
150        for (var i = 0; i < noDatapoints; i++)
151          for (var j = 0; j < newDimensions; j++)
152            newData[i, j] = rand.NextDouble() * .0001;  // TODO const
153      }
154
155      public double EvaluateError() {
156        return exact ? EvaluateErrorExact(p, newData, noDatapoints, newDimensions) : EvaluateErrorApproximate(rowP, colP, valP, newData, theta);
157      }
158
159      private static void CalculateApproximateSimilarities(T[] data, IDistance<T> distance, double perplexity, out int[] rowP, out int[] colP, out double[] valP) {
160        // Compute asymmetric pairwise input similarities
161        ComputeGaussianPerplexity(data, distance, out rowP, out colP, out valP, perplexity, (int)(3 * perplexity));        // TODO: why 3?
162        // Symmetrize input similarities
163        int[] sRowP, symColP;
164        double[] sValP;
165        SymmetrizeMatrix(rowP, colP, valP, out sRowP, out symColP, out sValP);
166        rowP = sRowP;
167        colP = symColP;
168        valP = sValP;
169        var sumP = .0;
170        for (var i = 0; i < rowP[data.Length]; i++) sumP += valP[i];
171        for (var i = 0; i < rowP[data.Length]; i++) valP[i] /= sumP;
172      }
173      private static double[,] CalculateExactSimilarites(T[] data, IDistance<T> distance, double perplexity) {
174        // Compute similarities
175        var p = new double[data.Length, data.Length];
176        ComputeGaussianPerplexity(data, distance, p, perplexity);
177        // Symmetrize input similarities
178        for (var n = 0; n < data.Length; n++) {
179          for (var m = n + 1; m < data.Length; m++) {
180            p[n, m] += p[m, n];
181            p[m, n] = p[n, m];
182          }
183        }
184        var sumP = .0;
185        for (var i = 0; i < data.Length; i++) for (var j = 0; j < data.Length; j++) sumP += p[i, j];
186        for (var i = 0; i < data.Length; i++) for (var j = 0; j < data.Length; j++) p[i, j] /= sumP;
187        return p;
188      }
189
190      private static void ComputeGaussianPerplexity(IReadOnlyList<T> x, IDistance<T> distance, out int[] rowP, out int[] colP, out double[] valP, double perplexity, int k) {
191        if (perplexity > k) throw new ArgumentException("Perplexity should be lower than k!");
192
193        int n = x.Count;
194        // Allocate the memory we need
195        rowP = new int[n + 1];
196        colP = new int[n * k];
197        valP = new double[n * k];
198        var curP = new double[n - 1];
199        rowP[0] = 0;
200        for (var i = 0; i < n; i++) rowP[i + 1] = rowP[i] + k;
201
202        var objX = new List<IndexedItem<T>>();
203        for (var i = 0; i < n; i++) objX.Add(new IndexedItem<T>(i, x[i]));
204
205        // Build ball tree on data set
206        var tree = new VantagePointTree<IndexedItem<T>>(new IndexedItemDistance<T>(distance), objX);           // do we really want to re-create the tree on each call?
207
208        // Loop over all points to find nearest neighbors
209        for (var i = 0; i < n; i++) {
210          IList<IndexedItem<T>> indices;
211          IList<double> distances;
212
213          // Find nearest neighbors
214          tree.Search(objX[i], k + 1, out indices, out distances);
215
216          // Initialize some variables for binary search
217          var found = false;
218          var beta = 1.0;
219          var minBeta = double.MinValue;
220          var maxBeta = double.MaxValue;
221          const double tol = 1e-5;  // TODO: why 1e-5?
222
223          // Iterate until we found a good perplexity
224          var iter = 0; double sumP = 0;
225          while (!found && iter < 200) {
226
227            // Compute Gaussian kernel row
228            for (var m = 0; m < k; m++) curP[m] = Math.Exp(-beta * distances[m + 1]);
229
230            // Compute entropy of current row
231            sumP = double.Epsilon;
232            for (var m = 0; m < k; m++) sumP += curP[m];
233            var h = .0;
234            for (var m = 0; m < k; m++) h += beta * (distances[m + 1] * curP[m]);
235            h = h / sumP + Math.Log(sumP);
236
237            // Evaluate whether the entropy is within the tolerance level
238            var hdiff = h - Math.Log(perplexity);
239            if (hdiff < tol && -hdiff < tol) {
240              found = true;
241            } else {
242              if (hdiff > 0) {
243                minBeta = beta;
244                if (maxBeta.IsAlmost(double.MaxValue) || maxBeta.IsAlmost(double.MinValue))
245                  beta *= 2.0;
246                else
247                  beta = (beta + maxBeta) / 2.0;
248              } else {
249                maxBeta = beta;
250                if (minBeta.IsAlmost(double.MinValue) || minBeta.IsAlmost(double.MaxValue))
251                  beta /= 2.0;
252                else
253                  beta = (beta + minBeta) / 2.0;
254              }
255            }
256
257            // Update iteration counter
258            iter++;
259          }
260
261          // Row-normalize current row of P and store in matrix
262          for (var m = 0; m < k; m++) curP[m] /= sumP;
263          for (var m = 0; m < k; m++) {
264            colP[rowP[i] + m] = indices[m + 1].Index;
265            valP[rowP[i] + m] = curP[m];
266          }
267        }
268      }
269      private static void ComputeGaussianPerplexity(T[] x, IDistance<T> distance, double[,] p, double perplexity) {
270        // Compute the distance matrix
271        var dd = ComputeDistances(x, distance);
272
273        int n = x.Length;
274        // Compute the Gaussian kernel row by row
275        for (var i = 0; i < n; i++) {
276          // Initialize some variables
277          var found = false;
278          var beta = 1.0;
279          var minBeta = -double.MaxValue;
280          var maxBeta = double.MaxValue;
281          const double tol = 1e-5;
282          double sumP = 0;
283
284          // Iterate until we found a good perplexity
285          var iter = 0;
286          while (!found && iter < 200) {       // TODO constant
287
288            // Compute Gaussian kernel row
289            for (var m = 0; m < n; m++) p[i, m] = Math.Exp(-beta * dd[i][m]);
290            p[i, i] = double.Epsilon;
291
292            // Compute entropy of current row
293            sumP = double.Epsilon;
294            for (var m = 0; m < n; m++) sumP += p[i, m];
295            var h = 0.0;
296            for (var m = 0; m < n; m++) h += beta * (dd[i][m] * p[i, m]);
297            h = h / sumP + Math.Log(sumP);
298
299            // Evaluate whether the entropy is within the tolerance level
300            var hdiff = h - Math.Log(perplexity);
301            if (hdiff < tol && -hdiff < tol) {
302              found = true;
303            } else {
304              if (hdiff > 0) {
305                minBeta = beta;
306                if (maxBeta.IsAlmost(double.MaxValue) || maxBeta.IsAlmost(double.MinValue))
307                  beta *= 2.0;
308                else
309                  beta = (beta + maxBeta) / 2.0;
310              } else {
311                maxBeta = beta;
312                if (minBeta.IsAlmost(double.MinValue) || minBeta.IsAlmost(double.MaxValue))
313                  beta /= 2.0;
314                else
315                  beta = (beta + minBeta) / 2.0;
316              }
317            }
318
319            // Update iteration counter
320            iter++;
321          }
322
323          // Row normalize P
324          for (var m = 0; m < n; m++) p[i, m] /= sumP;
325        }
326      }
327
328      private static double[][] ComputeDistances(T[] x, IDistance<T> distance) {
329        return x.Select(m => x.Select(n => distance.Get(m, n)).ToArray()).ToArray();
330      }
331
332
333
334      private static double EvaluateErrorExact(double[,] p, double[,] y, int n, int d) {
335        // Compute the squared Euclidean distance matrix
336        var dd = new double[n, n];
337        var q = new double[n, n];
338        ComputeSquaredEuclideanDistance(y, n, d, dd);
339
340        // Compute Q-matrix and normalization sum
341        var sumQ = double.Epsilon;
342        for (var n1 = 0; n1 < n; n1++) {
343          for (var m = 0; m < n; m++) {
344            if (n1 != m) {
345              q[n1, m] = 1 / (1 + dd[n1, m]);
346              sumQ += q[n1, m];
347            } else q[n1, m] = double.Epsilon;
348          }
349        }
350        for (var i = 0; i < n; i++) for (var j = 0; j < n; j++) q[i, j] /= sumQ;
351
352        // Sum t-SNE error
353        var c = .0;
354        for (var i = 0; i < n; i++)
355          for (var j = 0; j < n; j++) {
356            c += p[i, j] * Math.Log((p[i, j] + float.Epsilon) / (q[i, j] + float.Epsilon));
357          }
358        return c;
359      }
360      private static double EvaluateErrorApproximate(IReadOnlyList<int> rowP, IReadOnlyList<int> colP, IReadOnlyList<double> valP, double[,] y, double theta) {
361        // Get estimate of normalization term
362        var n = y.GetLength(0);
363        var d = y.GetLength(1);
364        var tree = new SpacePartitioningTree(y);
365        var buff = new double[d];
366        double sumQ = 0.0;
367        for (var i = 0; i < n; i++) tree.ComputeNonEdgeForces(i, theta, buff, ref sumQ);
368
369        // Loop over all edges to compute t-SNE error
370        var c = .0;
371        for (var k = 0; k < n; k++) {
372          for (var i = rowP[k]; i < rowP[k + 1]; i++) {
373            var q = .0;
374            for (var j = 0; j < d; j++) buff[j] = y[k, j];
375            for (var j = 0; j < d; j++) buff[j] -= y[colP[i], j];
376            for (var j = 0; j < d; j++) q += buff[j] * buff[j];
377            q = 1.0 / (1.0 + q) / sumQ;
378            c += valP[i] * Math.Log((valP[i] + float.Epsilon) / (q + float.Epsilon));
379          }
380        }
381        return c;
382      }
383      private static void SymmetrizeMatrix(IReadOnlyList<int> rowP, IReadOnlyList<int> colP, IReadOnlyList<double> valP, out int[] symRowP, out int[] symColP, out double[] symValP) {
384
385        // Count number of elements and row counts of symmetric matrix
386        var n = rowP.Count - 1;
387        var rowCounts = new int[n];
388        for (var j = 0; j < n; j++) {
389          for (var i = rowP[j]; i < rowP[j + 1]; i++) {
390
391            // Check whether element (col_P[i], n) is present
392            var present = false;
393            for (var m = rowP[colP[i]]; m < rowP[colP[i] + 1]; m++) {
394              if (colP[m] == j) present = true;
395            }
396            if (present) rowCounts[j]++;
397            else {
398              rowCounts[j]++;
399              rowCounts[colP[i]]++;
400            }
401          }
402        }
403        var noElem = 0;
404        for (var i = 0; i < n; i++) noElem += rowCounts[i];
405
406        // Allocate memory for symmetrized matrix
407        symRowP = new int[n + 1];
408        symColP = new int[noElem];
409        symValP = new double[noElem];
410
411        // Construct new row indices for symmetric matrix
412        symRowP[0] = 0;
413        for (var i = 0; i < n; i++) symRowP[i + 1] = symRowP[i] + rowCounts[i];
414
415        // Fill the result matrix
416        var offset = new int[n];
417        for (var j = 0; j < n; j++) {
418          for (var i = rowP[j]; i < rowP[j + 1]; i++) {                                  // considering element(n, colP[i])
419
420            // Check whether element (col_P[i], n) is present
421            var present = false;
422            for (var m = rowP[colP[i]]; m < rowP[colP[i] + 1]; m++) {
423              if (colP[m] != j) continue;
424              present = true;
425              if (j > colP[i]) continue; // make sure we do not add elements twice
426              symColP[symRowP[j] + offset[j]] = colP[i];
427              symColP[symRowP[colP[i]] + offset[colP[i]]] = j;
428              symValP[symRowP[j] + offset[j]] = valP[i] + valP[m];
429              symValP[symRowP[colP[i]] + offset[colP[i]]] = valP[i] + valP[m];
430            }
431
432            // If (colP[i], n) is not present, there is no addition involved
433            if (!present) {
434              symColP[symRowP[j] + offset[j]] = colP[i];
435              symColP[symRowP[colP[i]] + offset[colP[i]]] = j;
436              symValP[symRowP[j] + offset[j]] = valP[i];
437              symValP[symRowP[colP[i]] + offset[colP[i]]] = valP[i];
438            }
439
440            // Update offsets
441            if (present && (j > colP[i])) continue;
442            offset[j]++;
443            if (colP[i] != j) offset[colP[i]]++;
444          }
445        }
446
447        // Divide the result by two
448        for (var i = 0; i < noElem; i++) symValP[i] /= 2.0;
449      }
450
451    }
452
453    public static TSNEState CreateState(T[] data, IDistance<T> distance, IRandom random, int newDimensions = 2, double perplexity = 25, double theta = 0,
454      int stopLyingIter = 250, int momSwitchIter = 250, double momentum = .5, double finalMomentum = .8, double eta = 200.0
455      ) {
456      return new TSNEState(data, distance, random, newDimensions, perplexity, theta, stopLyingIter, momSwitchIter, momentum, finalMomentum, eta);
457    }
458
459
460    public static double[,] Iterate(TSNEState state) {
461      if (state.exact)
462        ComputeExactGradient(state.p, state.newData, state.noDatapoints, state.newDimensions, state.dY);
463      else
464        ComputeApproximateGradient(state.rowP, state.colP, state.valP, state.newData, state.noDatapoints, state.newDimensions, state.dY, state.theta);
465
466      // Update gains
467      for (var i = 0; i < state.noDatapoints; i++) {
468        for (var j = 0; j < state.newDimensions; j++) {
469          state.gains[i, j] = Math.Sign(state.dY[i, j]) != Math.Sign(state.uY[i, j])
470            ? state.gains[i, j] + .2
471            : state.gains[i, j] * .8; // 20% up or 20% down // TODO: +0.2?!
472
473          if (state.gains[i, j] < .01) state.gains[i, j] = .01;
474        }
475      }
476
477
478      // Perform gradient update (with momentum and gains)
479      for (var i = 0; i < state.noDatapoints; i++)
480        for (var j = 0; j < state.newDimensions; j++)
481          state.uY[i, j] = state.currentMomentum * state.uY[i, j] - state.eta * state.gains[i, j] * state.dY[i, j];
482
483      for (var i = 0; i < state.noDatapoints; i++)
484        for (var j = 0; j < state.newDimensions; j++)
485          state.newData[i, j] = state.newData[i, j] + state.uY[i, j];
486
487      // Make solution zero-mean
488      ZeroMean(state.newData);
489      // Stop lying about the P-values after a while, and switch momentum
490
491      if (state.iter == state.stopLyingIter) {
492        if (state.exact)
493          for (var i = 0; i < state.noDatapoints; i++) for (var j = 0; j < state.noDatapoints; j++) state.p[i, j] /= 12.0;                                   //XXX why 12?
494        else
495          for (var i = 0; i < state.rowP[state.noDatapoints]; i++) state.valP[i] /= 12.0;                       // XXX are we not scaling all values?
496      }
497
498      if (state.iter == state.momSwitchIter)
499        state.currentMomentum = state.finalMomentum;
500
501      state.iter++;
502      return state.newData;
503    }
504
505
506    private static void ComputeApproximateGradient(int[] rowP, int[] colP, double[] valP, double[,] y, int n, int d, double[,] dC, double theta) {
507      var tree = new SpacePartitioningTree(y);
508      double sumQ = 0.0;
509      var posF = new double[n, d];
510      var negF = new double[n, d];
511      tree.ComputeEdgeForces(rowP, colP, valP, n, posF);
512      var row = new double[d];
513      for (var n1 = 0; n1 < n; n1++) {
514        Buffer.BlockCopy(negF, (sizeof(double) * n1 * d), row, 0, d);
515        tree.ComputeNonEdgeForces(n1, theta, row, ref sumQ);
516      }
517
518      // Compute final t-SNE gradient
519      for (var i = 0; i < n; i++)
520        for (var j = 0; j < d; j++) {
521          dC[i, j] = posF[i, j] - negF[i, j] / sumQ;
522        }
523    }
524
525    private static void ComputeExactGradient(double[,] p, double[,] y, int n, int d, double[,] dC) {
526
527      // Make sure the current gradient contains zeros
528      for (var i = 0; i < n; i++) for (var j = 0; j < d; j++) dC[i, j] = 0.0;
529
530      // Compute the squared Euclidean distance matrix
531      var dd = new double[n, n];
532      ComputeSquaredEuclideanDistance(y, n, d, dd);
533
534      // Compute Q-matrix and normalization sum
535      var q = new double[n, n];
536      var sumQ = .0;
537      for (var n1 = 0; n1 < n; n1++) {
538        for (var m = 0; m < n; m++) {
539          if (n1 == m) continue;
540          q[n1, m] = 1 / (1 + dd[n1, m]);
541          sumQ += q[n1, m];
542        }
543      }
544
545      // Perform the computation of the gradient
546      for (var n1 = 0; n1 < n; n1++) {
547        for (var m = 0; m < n; m++) {
548          if (n1 == m) continue;
549          var mult = (p[n1, m] - q[n1, m] / sumQ) * q[n1, m];
550          for (var d1 = 0; d1 < d; d1++) {
551            dC[n1, d1] += (y[n1, d1] - y[m, d1]) * mult;
552          }
553        }
554      }
555    }
556
557    private static void ComputeSquaredEuclideanDistance(double[,] x, int n, int d, double[,] dd) {
558      var dataSums = new double[n];
559      for (var i = 0; i < n; i++) {
560        for (var j = 0; j < d; j++) {
561          dataSums[i] += x[i, j] * x[i, j];
562        }
563      }
564      for (var i = 0; i < n; i++) {
565        for (var m = 0; m < n; m++) {
566          dd[i, m] = dataSums[i] + dataSums[m];
567        }
568      }
569      for (var i = 0; i < n; i++) {
570        dd[i, i] = 0.0;
571        for (var m = i + 1; m < n; m++) {
572          dd[i, m] = 0.0;
573          for (var j = 0; j < d; j++) {
574            dd[i, m] += (x[i, j] - x[m, j]) * (x[i, j] - x[m, j]);
575          }
576          dd[m, i] = dd[i, m];
577        }
578      }
579    }
580
581    private static void ZeroMean(double[,] x) {
582      // Compute data mean
583      var n = x.GetLength(0);
584      var d = x.GetLength(1);
585      var mean = new double[d];
586      for (var i = 0; i < n; i++) {
587        for (var j = 0; j < d; j++) {
588          mean[j] += x[i, j];
589        }
590      }
591      for (var i = 0; i < d; i++) {
592        mean[i] /= n;
593      }
594      // Subtract data mean
595      for (var i = 0; i < n; i++) {
596        for (var j = 0; j < d; j++) {
597          x[i, j] -= mean[j];
598        }
599      }
600    }
601  }
602}
Note: See TracBrowser for help on using the repository browser.