Free cookie consent management tool by TermsFeed Policy Generator

source: branches/TSNE/HeuristicLab.Algorithms.DataAnalysis/3.4/TSNE/TSNEStatic.cs @ 14806

Last change on this file since 14806 was 14806, checked in by gkronber, 7 years ago

#2700: worked on tSNE, storable and cloning for tSNE state. Added some TODO comments while reviewing.

File size: 26.5 KB
Line 
1#region License Information
2/* HeuristicLab
3 * Copyright (C) 2002-2016 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
4 *
5 * This file is part of HeuristicLab.
6 *
7 * HeuristicLab is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * HeuristicLab is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with HeuristicLab. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21//Code is based on an implementation from Laurens van der Maaten
22
23/*
24*
25* Copyright (c) 2014, Laurens van der Maaten (Delft University of Technology)
26* All rights reserved.
27*
28* Redistribution and use in source and binary forms, with or without
29* modification, are permitted provided that the following conditions are met:
30* 1. Redistributions of source code must retain the above copyright
31*    notice, this list of conditions and the following disclaimer.
32* 2. Redistributions in binary form must reproduce the above copyright
33*    notice, this list of conditions and the following disclaimer in the
34*    documentation and/or other materials provided with the distribution.
35* 3. All advertising materials mentioning features or use of this software
36*    must display the following acknowledgement:
37*    This product includes software developed by the Delft University of Technology.
38* 4. Neither the name of the Delft University of Technology nor the names of
39*    its contributors may be used to endorse or promote products derived from
40*    this software without specific prior written permission.
41*
42* THIS SOFTWARE IS PROVIDED BY LAURENS VAN DER MAATEN ''AS IS'' AND ANY EXPRESS
43* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
44* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
45* EVENT SHALL LAURENS VAN DER MAATEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
46* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
47* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
48* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
49* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
50* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
51* OF SUCH DAMAGE.
52*
53*/
54#endregion
55
56using System;
57using System.Collections.Generic;
58using System.Linq;
59using HeuristicLab.Collections;
60using HeuristicLab.Common;
61using HeuristicLab.Core;
62using HeuristicLab.Persistence.Default.CompositeSerializers.Storable;
63using HeuristicLab.Random;
64
65namespace HeuristicLab.Algorithms.DataAnalysis {
66  [StorableClass]
67  public class TSNE<T> {
68
69    [StorableClass]
70    public sealed class TSNEState : DeepCloneable {
71      // initialized once
72      [Storable]
73      public IDistance<T> distance;
74      [Storable]
75      public IRandom random;
76      [Storable]
77      public double perplexity;
78      [Storable]
79      public bool exact;
80      [Storable]
81      public int noDatapoints;
82      [Storable]
83      public double finalMomentum;
84      [Storable]
85      public int momSwitchIter;
86      [Storable]
87      public int stopLyingIter;
88      [Storable]
89      public double theta;
90      [Storable]
91      public double eta;
92      [Storable]
93      public int newDimensions;
94
95      // for approximate version: sparse representation of similarity/distance matrix
96      [Storable]
97      public double[] valP; // similarity/distance
98      [Storable]
99      public int[] rowP; // row index
100      [Storable]
101      public int[] colP; // col index
102
103      // for exact version: dense representation of distance/similarity matrix
104      [Storable]
105      public double[,] p;
106
107      // mapped data
108      [Storable]
109      public double[,] newData;
110
111      [Storable]
112      public int iter;
113      [Storable]
114      public double currentMomentum;
115
116      // helper variables (updated in each iteration)
117      [Storable]
118      public double[,] gains;
119      [Storable]
120      public double[,] uY;
121      [Storable]
122      public double[,] dY;
123
124      private TSNEState(TSNEState original, Cloner cloner) : base(original, cloner) {
125        this.distance = cloner.Clone(original.distance);
126        this.random = cloner.Clone(original.random);
127        this.perplexity = original.perplexity;
128        this.exact = original.exact;
129        this.noDatapoints = original.noDatapoints;
130        this.finalMomentum = original.finalMomentum;
131        this.momSwitchIter = original.momSwitchIter;
132        this.stopLyingIter = original.stopLyingIter;
133        this.theta = original.theta;
134        this.eta = original.eta;
135        this.newDimensions = original.newDimensions;
136        if(original.valP != null) {
137          this.valP = new double[original.valP.Length];
138          Array.Copy(original.valP, this.valP, this.valP.Length);
139        }
140        if(original.rowP != null) {
141          this.rowP = new int[original.rowP.Length];
142          Array.Copy(original.rowP, this.rowP, this.rowP.Length);
143        }
144        if(original.colP != null) {
145          this.colP = new int[original.colP.Length];
146          Array.Copy(original.colP, this.colP, this.colP.Length);
147        }
148        if(original.p != null) {
149          this.p = new double[original.p.GetLength(0), original.p.GetLength(1)];
150          Array.Copy(original.p, this.p, this.p.Length);
151        }
152        this.newData = new double[original.newData.GetLength(0), original.newData.GetLength(1)];
153        Array.Copy(original.newData, this.newData, this.newData.Length);
154        this.iter = original.iter;
155        this.currentMomentum = original.currentMomentum;
156        this.gains = new double[original.gains.GetLength(0), original.gains.GetLength(1)];
157        Array.Copy(original.gains, this.gains, this.gains.Length);
158        this.uY = new double[original.uY.GetLength(0), original.uY.GetLength(1)];
159        Array.Copy(original.uY, this.uY, this.uY.Length);
160        this.dY = new double[original.dY.GetLength(0), original.dY.GetLength(1)];
161        Array.Copy(original.dY, this.dY, this.dY.Length);
162      }
163
164      public override IDeepCloneable Clone(Cloner cloner) {
165        return new TSNEState(this, cloner);
166      }
167
168      public TSNEState(T[] data, IDistance<T> distance, IRandom random, int newDimensions, double perplexity, double theta, int stopLyingIter, int momSwitchIter, double momentum, double finalMomentum, double eta) {
169        this.distance = distance;
170        this.random = random;
171        this.newDimensions = newDimensions;
172        this.perplexity = perplexity;
173        this.theta = theta;
174        this.stopLyingIter = stopLyingIter;
175        this.momSwitchIter = momSwitchIter;
176        this.currentMomentum = momentum;
177        this.finalMomentum = finalMomentum;
178        this.eta = eta;
179
180
181        // initialize
182        noDatapoints = data.Length;
183        if(noDatapoints - 1 < 3 * perplexity)
184          throw new ArgumentException("Perplexity too large for the number of data points!");
185
186        exact = Math.Abs(theta) < double.Epsilon;
187        newData = new double[noDatapoints, newDimensions];
188        dY = new double[noDatapoints, newDimensions];
189        uY = new double[noDatapoints, newDimensions];
190        gains = new double[noDatapoints, newDimensions];
191        for(var i = 0; i < noDatapoints; i++)
192          for(var j = 0; j < newDimensions; j++)
193            gains[i, j] = 1.0;
194
195        p = null;
196        rowP = null;
197        colP = null;
198        valP = null;
199
200        //Calculate Similarities
201        if(exact) p = CalculateExactSimilarites(data, distance, perplexity);
202        else CalculateApproximateSimilarities(data, distance, perplexity, out rowP, out colP, out valP);
203
204        // Lie about the P-values
205        if(exact) for(var i = 0; i < noDatapoints; i++) for(var j = 0; j < noDatapoints; j++) p[i, j] *= 12.0;
206        else for(var i = 0; i < rowP[noDatapoints]; i++) valP[i] *= 12.0;
207
208        // Initialize solution (randomly)
209        var rand = new NormalDistributedRandom(random, 0, 1);
210        for(var i = 0; i < noDatapoints; i++)
211          for(var j = 0; j < newDimensions; j++)
212            newData[i, j] = rand.NextDouble() * .0001;  // TODO const  ?
213      }
214
215      public double EvaluateError() {
216        return exact ?
217          EvaluateErrorExact(p, newData, noDatapoints, newDimensions) :
218          EvaluateErrorApproximate(rowP, colP, valP, newData, theta);
219      }
220
221      private static void CalculateApproximateSimilarities(T[] data, IDistance<T> distance, double perplexity, out int[] rowP, out int[] colP, out double[] valP) {
222        // Compute asymmetric pairwise input similarities
223        ComputeGaussianPerplexity(data, distance, out rowP, out colP, out valP, perplexity, (int)(3 * perplexity));        // TODO: why 3?
224        // Symmetrize input similarities
225        int[] sRowP, symColP;
226        double[] sValP;
227        SymmetrizeMatrix(rowP, colP, valP, out sRowP, out symColP, out sValP);
228        rowP = sRowP;
229        colP = symColP;
230        valP = sValP;
231        var sumP = .0;
232        for(var i = 0; i < rowP[data.Length]; i++) sumP += valP[i];
233        for(var i = 0; i < rowP[data.Length]; i++) valP[i] /= sumP;
234      }
235
236      private static double[,] CalculateExactSimilarites(T[] data, IDistance<T> distance, double perplexity) {
237        // Compute similarities
238        var p = new double[data.Length, data.Length];
239        ComputeGaussianPerplexity(data, distance, p, perplexity);
240        // Symmetrize input similarities
241        for(var n = 0; n < data.Length; n++) {
242          for(var m = n + 1; m < data.Length; m++) {
243            p[n, m] += p[m, n];
244            p[m, n] = p[n, m];
245          }
246        }
247        var sumP = .0;
248        for(var i = 0; i < data.Length; i++) for(var j = 0; j < data.Length; j++) sumP += p[i, j];
249        for(var i = 0; i < data.Length; i++) for(var j = 0; j < data.Length; j++) p[i, j] /= sumP;
250        return p;
251      }
252
253      private static void ComputeGaussianPerplexity(IReadOnlyList<T> x, IDistance<T> distance, out int[] rowP, out int[] colP, out double[] valP, double perplexity, int k) {
254        if(perplexity > k) throw new ArgumentException("Perplexity should be lower than k!");
255
256        int n = x.Count;
257        // Allocate the memory we need
258        rowP = new int[n + 1];
259        colP = new int[n * k];
260        valP = new double[n * k];
261        var curP = new double[n - 1];
262        rowP[0] = 0;
263        for(var i = 0; i < n; i++) rowP[i + 1] = rowP[i] + k;
264
265        var objX = new List<IndexedItem<T>>();
266        for(var i = 0; i < n; i++) objX.Add(new IndexedItem<T>(i, x[i]));
267
268        // Build ball tree on data set
269        var tree = new VantagePointTree<IndexedItem<T>>(new IndexedItemDistance<T>(distance), objX);           // do we really want to re-create the tree on each call?
270
271        // Loop over all points to find nearest neighbors
272        for(var i = 0; i < n; i++) {
273          IList<IndexedItem<T>> indices;
274          IList<double> distances;
275
276          // Find nearest neighbors
277          tree.Search(objX[i], k + 1, out indices, out distances);
278
279          // Initialize some variables for binary search
280          var found = false;
281          var beta = 1.0;
282          var minBeta = double.MinValue;
283          var maxBeta = double.MaxValue;
284          const double tol = 1e-5;  // TODO: why 1e-5?
285
286          // Iterate until we found a good perplexity
287          var iter = 0; double sumP = 0;
288          while(!found && iter < 200) {  // TODO 200 iterations always ok?
289
290            // Compute Gaussian kernel row
291            for(var m = 0; m < k; m++) curP[m] = Math.Exp(-beta * distances[m + 1]); // TODO distances m+1?
292
293            // Compute entropy of current row
294            sumP = double.Epsilon;
295            for(var m = 0; m < k; m++) sumP += curP[m];
296            var h = .0;
297            for(var m = 0; m < k; m++) h += beta * (distances[m + 1] * curP[m]); // TODO: distances m+1?
298            h = h / sumP + Math.Log(sumP);
299
300            // Evaluate whether the entropy is within the tolerance level
301            var hdiff = h - Math.Log(perplexity);
302            if(hdiff < tol && -hdiff < tol) {
303              found = true;
304            } else {
305              if(hdiff > 0) {
306                minBeta = beta;
307                if(maxBeta.IsAlmost(double.MaxValue) || maxBeta.IsAlmost(double.MinValue))
308                  beta *= 2.0;
309                else
310                  beta = (beta + maxBeta) / 2.0;
311              } else {
312                maxBeta = beta;
313                if(minBeta.IsAlmost(double.MinValue) || minBeta.IsAlmost(double.MaxValue))
314                  beta /= 2.0;
315                else
316                  beta = (beta + minBeta) / 2.0;
317              }
318            }
319
320            // Update iteration counter
321            iter++;
322          }
323
324          // Row-normalize current row of P and store in matrix
325          for(var m = 0; m < k; m++) curP[m] /= sumP;
326          for(var m = 0; m < k; m++) {
327            colP[rowP[i] + m] = indices[m + 1].Index;
328            valP[rowP[i] + m] = curP[m];
329          }
330        }
331      }
332      private static void ComputeGaussianPerplexity(T[] x, IDistance<T> distance, double[,] p, double perplexity) {
333        // Compute the distance matrix
334        var dd = ComputeDistances(x, distance);
335
336        int n = x.Length;
337        // Compute the Gaussian kernel row by row
338        for(var i = 0; i < n; i++) {
339          // Initialize some variables
340          var found = false;
341          var beta = 1.0;
342          var minBeta = -double.MaxValue;
343          var maxBeta = double.MaxValue;
344          const double tol = 1e-5;
345          double sumP = 0;
346
347          // Iterate until we found a good perplexity
348          var iter = 0;
349          while(!found && iter < 200) {       // TODO constant
350
351            // Compute Gaussian kernel row
352            for(var m = 0; m < n; m++) p[i, m] = Math.Exp(-beta * dd[i][m]);
353            p[i, i] = double.Epsilon;
354
355            // Compute entropy of current row
356            sumP = double.Epsilon;
357            for(var m = 0; m < n; m++) sumP += p[i, m];
358            var h = 0.0;
359            for(var m = 0; m < n; m++) h += beta * (dd[i][m] * p[i, m]);
360            h = h / sumP + Math.Log(sumP);
361
362            // Evaluate whether the entropy is within the tolerance level
363            var hdiff = h - Math.Log(perplexity);
364            if(hdiff < tol && -hdiff < tol) {
365              found = true;
366            } else {
367              if(hdiff > 0) {
368                minBeta = beta;
369                if(maxBeta.IsAlmost(double.MaxValue) || maxBeta.IsAlmost(double.MinValue))
370                  beta *= 2.0;
371                else
372                  beta = (beta + maxBeta) / 2.0;
373              } else {
374                maxBeta = beta;
375                if(minBeta.IsAlmost(double.MinValue) || minBeta.IsAlmost(double.MaxValue))
376                  beta /= 2.0;
377                else
378                  beta = (beta + minBeta) / 2.0;
379              }
380            }
381
382            // Update iteration counter
383            iter++;
384          }
385
386          // Row normalize P
387          for(var m = 0; m < n; m++) p[i, m] /= sumP;
388        }
389      }
390
391      private static double[][] ComputeDistances(T[] x, IDistance<T> distance) {
392        var res = new double[x.Length][];
393        for(int r = 0; r < x.Length; r++) {
394          var rowV = new double[x.Length];
395          // all distances must be symmetric
396          for(int c = 0; c < r; c++) {
397            rowV[c] = res[c][r];
398          }
399          rowV[r] = 0.0; // distance to self is zero for all distances
400          for(int c = r + 1; c < x.Length; c++) {
401            rowV[c] = distance.Get(x[r], x[c]);
402          }
403          res[r] = rowV;
404        }
405        return res;
406        // return x.Select(m => x.Select(n => distance.Get(m, n)).ToArray()).ToArray();
407      }
408
409      private static double EvaluateErrorExact(double[,] p, double[,] y, int n, int d) {
410        // Compute the squared Euclidean distance matrix
411        var dd = new double[n, n];
412        var q = new double[n, n];
413        ComputeSquaredEuclideanDistance(y, n, d, dd); // TODO: we use Euclidian distance regardless of the actual distance function
414
415        // Compute Q-matrix and normalization sum
416        var sumQ = double.Epsilon;
417        for(var n1 = 0; n1 < n; n1++) {
418          for(var m = 0; m < n; m++) {
419            if(n1 != m) {
420              q[n1, m] = 1 / (1 + dd[n1, m]);
421              sumQ += q[n1, m];
422            } else q[n1, m] = double.Epsilon;
423          }
424        }
425        for(var i = 0; i < n; i++) for(var j = 0; j < n; j++) q[i, j] /= sumQ;
426
427        // Sum t-SNE error
428        var c = .0;
429        for(var i = 0; i < n; i++)
430          for(var j = 0; j < n; j++) {
431            c += p[i, j] * Math.Log((p[i, j] + float.Epsilon) / (q[i, j] + float.Epsilon));
432          }
433        return c;
434      }
435
436      // TODO: there seems to be a bug in the error approximation.
437      // The mapping of the approximate tSNE looks good but the error curve never changes.
438      private static double EvaluateErrorApproximate(IReadOnlyList<int> rowP, IReadOnlyList<int> colP, IReadOnlyList<double> valP, double[,] y, double theta) {
439        // Get estimate of normalization term
440        var n = y.GetLength(0);
441        var d = y.GetLength(1);
442        var tree = new SpacePartitioningTree(y);
443        var buff = new double[d];
444        double sumQ = 0.0;
445        for(var i = 0; i < n; i++) tree.ComputeNonEdgeForces(i, theta, buff, ref sumQ);
446
447        // Loop over all edges to compute t-SNE error
448        var c = .0;
449        for(var k = 0; k < n; k++) {
450          for(var i = rowP[k]; i < rowP[k + 1]; i++) {
451            var q = .0;
452            for(var j = 0; j < d; j++) buff[j] = y[k, j];
453            for(var j = 0; j < d; j++) buff[j] -= y[colP[i], j];
454            for(var j = 0; j < d; j++) q += buff[j] * buff[j];     // TODO: squared error is used here!
455            q = 1.0 / (1.0 + q) / sumQ;
456            c += valP[i] * Math.Log((valP[i] + float.Epsilon) / (q + float.Epsilon));
457          }
458        }
459        return c;
460      }
461      private static void SymmetrizeMatrix(IReadOnlyList<int> rowP, IReadOnlyList<int> colP, IReadOnlyList<double> valP, out int[] symRowP, out int[] symColP, out double[] symValP) {
462
463        // Count number of elements and row counts of symmetric matrix
464        var n = rowP.Count - 1;
465        var rowCounts = new int[n];
466        for(var j = 0; j < n; j++) {
467          for(var i = rowP[j]; i < rowP[j + 1]; i++) {
468
469            // Check whether element (col_P[i], n) is present
470            var present = false;
471            for(var m = rowP[colP[i]]; m < rowP[colP[i] + 1]; m++) {
472              if(colP[m] == j) present = true;
473            }
474            if(present) rowCounts[j]++;
475            else {
476              rowCounts[j]++;
477              rowCounts[colP[i]]++;
478            }
479          }
480        }
481        var noElem = 0;
482        for(var i = 0; i < n; i++) noElem += rowCounts[i];
483
484        // Allocate memory for symmetrized matrix
485        symRowP = new int[n + 1];
486        symColP = new int[noElem];
487        symValP = new double[noElem];
488
489        // Construct new row indices for symmetric matrix
490        symRowP[0] = 0;
491        for(var i = 0; i < n; i++) symRowP[i + 1] = symRowP[i] + rowCounts[i];
492
493        // Fill the result matrix
494        var offset = new int[n];
495        for(var j = 0; j < n; j++) {
496          for(var i = rowP[j]; i < rowP[j + 1]; i++) {                                  // considering element(n, colP[i])
497
498            // Check whether element (col_P[i], n) is present
499            var present = false;
500            for(var m = rowP[colP[i]]; m < rowP[colP[i] + 1]; m++) {
501              if(colP[m] != j) continue;
502              present = true;
503              if(j > colP[i]) continue; // make sure we do not add elements twice
504              symColP[symRowP[j] + offset[j]] = colP[i];
505              symColP[symRowP[colP[i]] + offset[colP[i]]] = j;
506              symValP[symRowP[j] + offset[j]] = valP[i] + valP[m];
507              symValP[symRowP[colP[i]] + offset[colP[i]]] = valP[i] + valP[m];
508            }
509
510            // If (colP[i], n) is not present, there is no addition involved
511            if(!present) {
512              symColP[symRowP[j] + offset[j]] = colP[i];
513              symColP[symRowP[colP[i]] + offset[colP[i]]] = j;
514              symValP[symRowP[j] + offset[j]] = valP[i];
515              symValP[symRowP[colP[i]] + offset[colP[i]]] = valP[i];
516            }
517
518            // Update offsets
519            if(present && (j > colP[i])) continue;
520            offset[j]++;
521            if(colP[i] != j) offset[colP[i]]++;
522          }
523        }
524
525        for(var i = 0; i < noElem; i++) symValP[i] /= 2.0;
526      }
527
528    }
529
530    public static TSNEState CreateState(T[] data, IDistance<T> distance, IRandom random, int newDimensions = 2, double perplexity = 25, double theta = 0,
531      int stopLyingIter = 250, int momSwitchIter = 250, double momentum = .5, double finalMomentum = .8, double eta = 200.0
532      ) {
533      return new TSNEState(data, distance, random, newDimensions, perplexity, theta, stopLyingIter, momSwitchIter, momentum, finalMomentum, eta);
534    }
535
536
537    public static double[,] Iterate(TSNEState state) {
538      if(state.exact)
539        ComputeExactGradient(state.p, state.newData, state.noDatapoints, state.newDimensions, state.dY);
540      else
541        ComputeApproximateGradient(state.rowP, state.colP, state.valP, state.newData, state.noDatapoints, state.newDimensions, state.dY, state.theta);
542
543      // Update gains
544      for(var i = 0; i < state.noDatapoints; i++) {
545        for(var j = 0; j < state.newDimensions; j++) {
546          state.gains[i, j] = Math.Sign(state.dY[i, j]) != Math.Sign(state.uY[i, j])
547            ? state.gains[i, j] + .2
548            : state.gains[i, j] * .8; // 20% up or 20% down // TODO: +0.2?!
549
550          if(state.gains[i, j] < .01) state.gains[i, j] = .01; // TODO why limit the gains?
551        }
552      }
553
554
555      // Perform gradient update (with momentum and gains)
556      for(var i = 0; i < state.noDatapoints; i++)
557        for(var j = 0; j < state.newDimensions; j++)
558          state.uY[i, j] = state.currentMomentum * state.uY[i, j] - state.eta * state.gains[i, j] * state.dY[i, j];
559
560      for(var i = 0; i < state.noDatapoints; i++)
561        for(var j = 0; j < state.newDimensions; j++)
562          state.newData[i, j] = state.newData[i, j] + state.uY[i, j];
563
564      // Make solution zero-mean
565      ZeroMean(state.newData);
566      // Stop lying about the P-values after a while, and switch momentum
567
568      if(state.iter == state.stopLyingIter) {
569        if(state.exact)
570          for(var i = 0; i < state.noDatapoints; i++) for(var j = 0; j < state.noDatapoints; j++) state.p[i, j] /= 12.0;                                   //XXX why 12?
571        else
572          for(var i = 0; i < state.rowP[state.noDatapoints]; i++) state.valP[i] /= 12.0;                       // XXX are we not scaling all values?
573      }
574
575      if(state.iter == state.momSwitchIter)
576        state.currentMomentum = state.finalMomentum;
577
578      state.iter++;
579      return state.newData;
580    }
581
582
583    private static void ComputeApproximateGradient(int[] rowP, int[] colP, double[] valP, double[,] y, int n, int d, double[,] dC, double theta) {
584      var tree = new SpacePartitioningTree(y);
585      double sumQ = 0.0;
586      var posF = new double[n, d];
587      var negF = new double[n, d];
588      tree.ComputeEdgeForces(rowP, colP, valP, n, posF);
589      var row = new double[d];
590      for(var n1 = 0; n1 < n; n1++) {
591        Buffer.BlockCopy(negF, (sizeof(double) * n1 * d), row, 0, d);
592        tree.ComputeNonEdgeForces(n1, theta, row, ref sumQ);
593      }
594
595      // Compute final t-SNE gradient
596      for(var i = 0; i < n; i++)
597        for(var j = 0; j < d; j++) {
598          dC[i, j] = posF[i, j] - negF[i, j] / sumQ;
599        }
600    }
601
602    private static void ComputeExactGradient(double[,] p, double[,] y, int n, int d, double[,] dC) {
603
604      // Make sure the current gradient contains zeros
605      for(var i = 0; i < n; i++) for(var j = 0; j < d; j++) dC[i, j] = 0.0;
606
607      // Compute the squared Euclidean distance matrix
608      var dd = new double[n, n];
609      ComputeSquaredEuclideanDistance(y, n, d, dd); // TODO: we use Euclidian distance regardless which distance function is actually set!
610
611      // Compute Q-matrix and normalization sum
612      var q = new double[n, n];
613      var sumQ = .0;
614      for(var n1 = 0; n1 < n; n1++) {
615        for(var m = 0; m < n; m++) {
616          if(n1 == m) continue;
617          q[n1, m] = 1 / (1 + dd[n1, m]);
618          sumQ += q[n1, m];
619        }
620      }
621
622      // Perform the computation of the gradient
623      for(var n1 = 0; n1 < n; n1++) {
624        for(var m = 0; m < n; m++) {
625          if(n1 == m) continue;
626          var mult = (p[n1, m] - q[n1, m] / sumQ) * q[n1, m];
627          for(var d1 = 0; d1 < d; d1++) {
628            dC[n1, d1] += (y[n1, d1] - y[m, d1]) * mult;
629          }
630        }
631      }
632    }
633
634    private static void ComputeSquaredEuclideanDistance(double[,] x, int n, int d, double[,] dd) {
635      var dataSums = new double[n];
636      for(var i = 0; i < n; i++) {
637        for(var j = 0; j < d; j++) {
638          dataSums[i] += x[i, j] * x[i, j];
639        }
640      }
641      for(var i = 0; i < n; i++) {
642        for(var m = 0; m < n; m++) {
643          dd[i, m] = dataSums[i] + dataSums[m];
644        }
645      }
646      for(var i = 0; i < n; i++) {
647        dd[i, i] = 0.0;
648        for(var m = i + 1; m < n; m++) {
649          dd[i, m] = 0.0;
650          for(var j = 0; j < d; j++) {
651            dd[i, m] += (x[i, j] - x[m, j]) * (x[i, j] - x[m, j]);
652          }
653          dd[m, i] = dd[i, m];
654        }
655      }
656    }
657
658    private static void ZeroMean(double[,] x) {
659      // Compute data mean
660      var n = x.GetLength(0);
661      var d = x.GetLength(1);
662      var mean = new double[d];
663      for(var i = 0; i < n; i++) {
664        for(var j = 0; j < d; j++) {
665          mean[j] += x[i, j];
666        }
667      }
668      for(var i = 0; i < d; i++) {
669        mean[i] /= n;
670      }
671      // Subtract data mean
672      for(var i = 0; i < n; i++) {
673        for(var j = 0; j < d; j++) {
674          x[i, j] -= mean[j];
675        }
676      }
677    }
678  }
679}
Note: See TracBrowser for help on using the repository browser.