Changeset 14806 for branches/TSNE/HeuristicLab.Algorithms.DataAnalysis
- Timestamp:
- 03/30/17 17:54:45 (8 years ago)
- Location:
- branches/TSNE/HeuristicLab.Algorithms.DataAnalysis/3.4
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/TSNE/HeuristicLab.Algorithms.DataAnalysis/3.4/Interfaces/TSNEInterfaces/ISpacePartitioningTree.cs
r14788 r14806 25 25 public interface ISpacePartitioningTree { 26 26 27 double[,] Data { set; }28 27 int GetDepth(); 29 ISpacePartitioningTree GetParent();30 28 bool Insert(int newIndex); 31 29 void Subdivide(); -
branches/TSNE/HeuristicLab.Algorithms.DataAnalysis/3.4/TSNE/SpacePartitioningTree.cs
r14788 r14806 77 77 78 78 // Indices in this space-partitioning tree node, corresponding center-of-mass, and list of all children 79 p ublic double[,] Data { private get; set; } // TODO public setter, private getter?79 private double[,] data; 80 80 81 81 private double[] centerOfMass; … … 91 91 var meanY = new double[d]; 92 92 var minY = new double[d]; 93 for 93 for(var i = 0; i < d; i++) minY[i] = double.MaxValue; 94 94 var maxY = new double[d]; 95 for 96 for 97 for 95 for(var i = 0; i < d; i++) maxY[i] = double.MinValue; 96 for(uint i = 0; i < n; i++) { 97 for(uint j = 0; j < d; j++) { 98 98 meanY[j] = inpData[i, j]; 99 if 100 if 101 } 102 } 103 for 99 if(inpData[i, j] < minY[j]) minY[j] = inpData[i, j]; 100 if(inpData[i, j] > maxY[j]) maxY[j] = inpData[i, j]; 101 } 102 } 103 for(var i = 0; i < d; i++) meanY[i] /= n; 104 104 var width = new double[d]; 105 for (var i = 0; i < d; i++) width[i] = Math.Max(maxY[i] - meanY[i], meanY[i] - maxY[i]) + 1e-5;105 for(var i = 0; i < d; i++) width[i] = Math.Max(maxY[i] - meanY[i], meanY[i] - maxY[i]) + 1e-5; // TODO constant? 106 106 Init(null, inpData, meanY, width); 107 107 Fill(n); … … 122 122 // Ignore objects which do not belong in this quad tree 123 123 var point = new double[dimension]; 124 Buffer.BlockCopy( Data, sizeof(double) * dimension * newIndex, point, 0, sizeof(double) * dimension);125 if 124 Buffer.BlockCopy(data, sizeof(double) * dimension * newIndex, point, 0, sizeof(double) * dimension); 125 if(!boundary.ContainsPoint(point)) return false; 126 126 cumulativeSize++; 127 127 // Online update of cumulative size and center-of-mass 128 128 var mult1 = (double)(cumulativeSize - 1) / cumulativeSize; 129 129 var mult2 = 1.0 / cumulativeSize; 130 for 131 for 130 for(var i = 0; i < dimension; i++) centerOfMass[i] *= mult1; 131 for(var i = 0; i < dimension; i++) centerOfMass[i] += mult2 * point[i]; 132 132 133 133 // If there is space in this quad tree and it is a leaf, add the object here 134 if 134 if(isLeaf && size < QT_NODE_CAPACITY) { 135 135 index[size] = newIndex; 136 136 size++; … … 140 140 // Don't add duplicates for now (this is not very nice) 141 141 var anyDuplicate = false; 142 for 142 for(uint n = 0; n < size; n++) { 143 143 var duplicate = true; 144 for 145 if (Math.Abs(point[d] - Data[index[n], d]) < double.Epsilon) continue;144 for(var d = 0; d < dimension; d++) { 145 if(Math.Abs(point[d] - data[index[n], d]) < double.Epsilon) continue; 146 146 duplicate = false; break; 147 147 } 148 148 anyDuplicate = anyDuplicate | duplicate; 149 149 } 150 if 150 if(anyDuplicate) return true; 151 151 152 152 // Otherwise, we need to subdivide the current cell 153 if 153 if(isLeaf) Subdivide(); 154 154 // Find out where the point can be inserted 155 for 156 if 155 for(var i = 0; i < noChildren; i++) { 156 if(children[i].Insert(newIndex)) return true; 157 157 } 158 158 … … 165 165 var newCorner = new double[dimension]; 166 166 var newWidth = new double[dimension]; 167 for 167 for(var i = 0; i < noChildren; i++) { 168 168 var div = 1; 169 for 169 for(var d = 0; d < dimension; d++) { 170 170 newWidth[d] = .5 * boundary.GetWidth(d); 171 if 171 if((i / div) % 2 == 1) newCorner[d] = boundary.GetCorner(d) - .5 * boundary.GetWidth(d); 172 172 else newCorner[d] = boundary.GetCorner(d) + .5 * boundary.GetWidth(d); 173 173 div *= 2; 174 174 } 175 children[i] = new SpacePartitioningTree(this, Data, newCorner, newWidth);175 children[i] = new SpacePartitioningTree(this, data, newCorner, newWidth); 176 176 } 177 177 178 178 // Move existing points to correct children 179 for 179 for(var i = 0; i < size; i++) { 180 180 var success = false; 181 for 182 if 181 for(var j = 0; j < noChildren; j++) { 182 if(!success) success = children[j].Insert(index[i]); 183 183 } 184 184 index[i] = int.MaxValue; … … 192 192 public bool IsCorrect() { 193 193 var row = new double[dimension]; 194 for (var n = 0; n < size; n++) Buffer.BlockCopy(Data, sizeof(double) * dimension * n, row, 0, sizeof(double) * dimension);195 if 196 if 194 for(var n = 0; n < size; n++) Buffer.BlockCopy(data, sizeof(double) * dimension * n, row, 0, sizeof(double) * dimension); 195 if(!boundary.ContainsPoint(row)) return false; 196 if(isLeaf) return true; 197 197 var correct = true; 198 for 198 for(var i = 0; i < noChildren; i++) correct = correct && children[i].IsCorrect(); 199 199 return correct; 200 200 } … … 206 206 public int GetAllIndices(int[] indices, int loc) { 207 207 // Gather indices in current quadrant 208 for 208 for(var i = 0; i < size; i++) indices[loc + i] = index[i]; 209 209 loc += (int)size; 210 210 // Gather indices in children 211 if 212 for 211 if(isLeaf) return loc; 212 for(var i = 0; i < noChildren; i++) loc = children[i].GetAllIndices(indices, loc); 213 213 return loc; 214 214 } … … 218 218 } 219 219 220 public void ComputeNonEdgeForces(int pointIndex, double theta, double[] negF, ref double sumQ) 221 { 220 public void ComputeNonEdgeForces(int pointIndex, double theta, double[] negF, ref double sumQ) { 222 221 // Make sure that we spend no time on empty nodes or self-interactions 223 if 222 if(cumulativeSize == 0 || (isLeaf && size == 1 && index[0] == pointIndex)) return; 224 223 225 224 // Compute distance between point and center-of-mass 225 // TODO: squared distance with normalized axes is used here! 226 226 var D = .0; 227 for (var d = 0; d < dimension; d++) buff[d] = Data[pointIndex, d] - centerOfMass[d];228 for 227 for(var d = 0; d < dimension; d++) buff[d] = data[pointIndex, d] - centerOfMass[d]; 228 for(var d = 0; d < dimension; d++) D += buff[d] * buff[d]; 229 229 230 230 // Check whether we can use this node as a "summary" 231 231 var maxWidth = 0.0; 232 for 232 for(var d = 0; d < dimension; d++) { 233 233 var curWidth = boundary.GetWidth(d); 234 234 maxWidth = (maxWidth > curWidth) ? maxWidth : curWidth; 235 235 } 236 if 236 if(isLeaf || maxWidth / Math.Sqrt(D) < theta) { 237 237 238 238 // Compute and add t-SNE force between point and current node … … 241 241 sumQ += mult; 242 242 mult *= D; 243 for 243 for(var d = 0; d < dimension; d++) negF[d] += mult * buff[d]; 244 244 } else { 245 245 246 246 // Recursively apply Barnes-Hut to children 247 for 247 for(var i = 0; i < noChildren; i++) children[i].ComputeNonEdgeForces(pointIndex, theta, negF, ref sumQ); 248 248 } 249 249 } … … 251 251 public void ComputeEdgeForces(int[] rowP, int[] colP, double[] valP, int n, double[,] posF) { 252 252 // Loop over all edges in the graph 253 for 254 for 253 for(var k = 0; k < n; k++) { 254 for(var i = rowP[k]; i < rowP[k + 1]; i++) { 255 255 256 256 // Compute pairwise distance and Q-value 257 // uses squared distance 257 258 var d = 1.0; 258 for (var j = 0; j < dimension; j++) buff[j] = Data[k, j] - Data[colP[i], j];259 for 259 for(var j = 0; j < dimension; j++) buff[j] = data[k, j] - data[colP[i], j]; 260 for(var j = 0; j < dimension; j++) d += buff[j] * buff[j]; 260 261 d = valP[i] / d; 261 262 262 263 // Sum positive force 263 for 264 for(var j = 0; j < dimension; j++) posF[k, j] += d * buff[j]; 264 265 } 265 266 } … … 268 269 #region Helpers 269 270 private void Fill(int n) { 270 for 271 for(var i = 0; i < n; i++) Insert(i); 271 272 } 272 273 private void Init(SpacePartitioningTree p, double[,] inpData, IEnumerable<double> inpCorner, IEnumerable<double> inpWidth) { … … 274 275 dimension = inpData.GetLength(1); 275 276 noChildren = 2; 276 for 277 Data = inpData;277 for(uint i = 1; i < dimension; i++) noChildren *= 2; 278 data = inpData; 278 279 isLeaf = true; 279 280 size = 0; … … 291 292 292 293 293 private class Cell 294 private class Cell { 294 295 private readonly uint dimension; 295 296 private readonly double[] corner; … … 315 316 } 316 317 public bool ContainsPoint(double[] point) { 317 for 318 if 318 for(var d = 0; d < dimension; d++) 319 if(corner[d] - width[d] > point[d] || corner[d] + width[d] < point[d]) return false; 319 320 return true; 320 321 } -
branches/TSNE/HeuristicLab.Algorithms.DataAnalysis/3.4/TSNE/TSNEStatic.cs
r14788 r14806 70 70 public sealed class TSNEState : DeepCloneable { 71 71 // initialized once 72 [Storable] 72 73 public IDistance<T> distance; 74 [Storable] 73 75 public IRandom random; 76 [Storable] 74 77 public double perplexity; 78 [Storable] 75 79 public bool exact; 80 [Storable] 76 81 public int noDatapoints; 82 [Storable] 77 83 public double finalMomentum; 84 [Storable] 78 85 public int momSwitchIter; 86 [Storable] 79 87 public int stopLyingIter; 88 [Storable] 80 89 public double theta; 90 [Storable] 81 91 public double eta; 92 [Storable] 82 93 public int newDimensions; 83 94 84 95 // for approximate version: sparse representation of similarity/distance matrix 96 [Storable] 85 97 public double[] valP; // similarity/distance 98 [Storable] 86 99 public int[] rowP; // row index 100 [Storable] 87 101 public int[] colP; // col index 88 102 89 103 // for exact version: dense representation of distance/similarity matrix 104 [Storable] 90 105 public double[,] p; 91 106 92 107 // mapped data 108 [Storable] 93 109 public double[,] newData; 94 110 111 [Storable] 95 112 public int iter; 113 [Storable] 96 114 public double currentMomentum; 97 115 98 116 // helper variables (updated in each iteration) 117 [Storable] 99 118 public double[,] gains; 119 [Storable] 100 120 public double[,] uY; 121 [Storable] 101 122 public double[,] dY; 102 123 103 124 private TSNEState(TSNEState original, Cloner cloner) : base(original, cloner) { 104 } 125 this.distance = cloner.Clone(original.distance); 126 this.random = cloner.Clone(original.random); 127 this.perplexity = original.perplexity; 128 this.exact = original.exact; 129 this.noDatapoints = original.noDatapoints; 130 this.finalMomentum = original.finalMomentum; 131 this.momSwitchIter = original.momSwitchIter; 132 this.stopLyingIter = original.stopLyingIter; 133 this.theta = original.theta; 134 this.eta = original.eta; 135 this.newDimensions = original.newDimensions; 136 if(original.valP != null) { 137 this.valP = new double[original.valP.Length]; 138 Array.Copy(original.valP, this.valP, this.valP.Length); 139 } 140 if(original.rowP != null) { 141 this.rowP = new int[original.rowP.Length]; 142 Array.Copy(original.rowP, this.rowP, this.rowP.Length); 143 } 144 if(original.colP != null) { 145 this.colP = new int[original.colP.Length]; 146 Array.Copy(original.colP, this.colP, this.colP.Length); 147 } 148 if(original.p != null) { 149 this.p = new double[original.p.GetLength(0), original.p.GetLength(1)]; 150 Array.Copy(original.p, this.p, this.p.Length); 151 } 152 this.newData = new double[original.newData.GetLength(0), original.newData.GetLength(1)]; 153 Array.Copy(original.newData, this.newData, this.newData.Length); 154 this.iter = original.iter; 155 this.currentMomentum = original.currentMomentum; 156 this.gains = new double[original.gains.GetLength(0), original.gains.GetLength(1)]; 157 Array.Copy(original.gains, this.gains, this.gains.Length); 158 this.uY = new double[original.uY.GetLength(0), original.uY.GetLength(1)]; 159 Array.Copy(original.uY, this.uY, this.uY.Length); 160 this.dY = new double[original.dY.GetLength(0), original.dY.GetLength(1)]; 161 Array.Copy(original.dY, this.dY, this.dY.Length); 162 } 163 105 164 public override IDeepCloneable Clone(Cloner cloner) { 106 165 return new TSNEState(this, cloner); … … 122 181 // initialize 123 182 noDatapoints = data.Length; 124 if (noDatapoints - 1 < 3 * perplexity) throw new ArgumentException("Perplexity too large for the number of data points!"); 183 if(noDatapoints - 1 < 3 * perplexity) 184 throw new ArgumentException("Perplexity too large for the number of data points!"); 125 185 126 186 exact = Math.Abs(theta) < double.Epsilon; … … 129 189 uY = new double[noDatapoints, newDimensions]; 130 190 gains = new double[noDatapoints, newDimensions]; 131 for 132 for 191 for(var i = 0; i < noDatapoints; i++) 192 for(var j = 0; j < newDimensions; j++) 133 193 gains[i, j] = 1.0; 134 194 … … 139 199 140 200 //Calculate Similarities 141 if 201 if(exact) p = CalculateExactSimilarites(data, distance, perplexity); 142 202 else CalculateApproximateSimilarities(data, distance, perplexity, out rowP, out colP, out valP); 143 203 144 204 // Lie about the P-values 145 if (exact) for (var i = 0; i < noDatapoints; i++) for(var j = 0; j < noDatapoints; j++) p[i, j] *= 12.0;146 else for 205 if(exact) for(var i = 0; i < noDatapoints; i++) for(var j = 0; j < noDatapoints; j++) p[i, j] *= 12.0; 206 else for(var i = 0; i < rowP[noDatapoints]; i++) valP[i] *= 12.0; 147 207 148 208 // Initialize solution (randomly) 149 209 var rand = new NormalDistributedRandom(random, 0, 1); 150 for 151 for 152 newData[i, j] = rand.NextDouble() * .0001; // TODO const 210 for(var i = 0; i < noDatapoints; i++) 211 for(var j = 0; j < newDimensions; j++) 212 newData[i, j] = rand.NextDouble() * .0001; // TODO const ? 153 213 } 154 214 155 215 public double EvaluateError() { 156 return exact ? EvaluateErrorExact(p, newData, noDatapoints, newDimensions) : EvaluateErrorApproximate(rowP, colP, valP, newData, theta); 216 return exact ? 217 EvaluateErrorExact(p, newData, noDatapoints, newDimensions) : 218 EvaluateErrorApproximate(rowP, colP, valP, newData, theta); 157 219 } 158 220 … … 168 230 valP = sValP; 169 231 var sumP = .0; 170 for (var i = 0; i < rowP[data.Length]; i++) sumP += valP[i]; 171 for (var i = 0; i < rowP[data.Length]; i++) valP[i] /= sumP; 172 } 232 for(var i = 0; i < rowP[data.Length]; i++) sumP += valP[i]; 233 for(var i = 0; i < rowP[data.Length]; i++) valP[i] /= sumP; 234 } 235 173 236 private static double[,] CalculateExactSimilarites(T[] data, IDistance<T> distance, double perplexity) { 174 237 // Compute similarities … … 176 239 ComputeGaussianPerplexity(data, distance, p, perplexity); 177 240 // Symmetrize input similarities 178 for 179 for 241 for(var n = 0; n < data.Length; n++) { 242 for(var m = n + 1; m < data.Length; m++) { 180 243 p[n, m] += p[m, n]; 181 244 p[m, n] = p[n, m]; … … 183 246 } 184 247 var sumP = .0; 185 for (var i = 0; i < data.Length; i++) for(var j = 0; j < data.Length; j++) sumP += p[i, j];186 for (var i = 0; i < data.Length; i++) for(var j = 0; j < data.Length; j++) p[i, j] /= sumP;248 for(var i = 0; i < data.Length; i++) for(var j = 0; j < data.Length; j++) sumP += p[i, j]; 249 for(var i = 0; i < data.Length; i++) for(var j = 0; j < data.Length; j++) p[i, j] /= sumP; 187 250 return p; 188 251 } 189 252 190 253 private static void ComputeGaussianPerplexity(IReadOnlyList<T> x, IDistance<T> distance, out int[] rowP, out int[] colP, out double[] valP, double perplexity, int k) { 191 if 254 if(perplexity > k) throw new ArgumentException("Perplexity should be lower than k!"); 192 255 193 256 int n = x.Count; … … 198 261 var curP = new double[n - 1]; 199 262 rowP[0] = 0; 200 for 263 for(var i = 0; i < n; i++) rowP[i + 1] = rowP[i] + k; 201 264 202 265 var objX = new List<IndexedItem<T>>(); 203 for 266 for(var i = 0; i < n; i++) objX.Add(new IndexedItem<T>(i, x[i])); 204 267 205 268 // Build ball tree on data set … … 207 270 208 271 // Loop over all points to find nearest neighbors 209 for 272 for(var i = 0; i < n; i++) { 210 273 IList<IndexedItem<T>> indices; 211 274 IList<double> distances; … … 223 286 // Iterate until we found a good perplexity 224 287 var iter = 0; double sumP = 0; 225 while (!found && iter < 200) {288 while(!found && iter < 200) { // TODO 200 iterations always ok? 226 289 227 290 // Compute Gaussian kernel row 228 for (var m = 0; m < k; m++) curP[m] = Math.Exp(-beta * distances[m + 1]);291 for(var m = 0; m < k; m++) curP[m] = Math.Exp(-beta * distances[m + 1]); // TODO distances m+1? 229 292 230 293 // Compute entropy of current row 231 294 sumP = double.Epsilon; 232 for 295 for(var m = 0; m < k; m++) sumP += curP[m]; 233 296 var h = .0; 234 for (var m = 0; m < k; m++) h += beta * (distances[m + 1] * curP[m]);297 for(var m = 0; m < k; m++) h += beta * (distances[m + 1] * curP[m]); // TODO: distances m+1? 235 298 h = h / sumP + Math.Log(sumP); 236 299 237 300 // Evaluate whether the entropy is within the tolerance level 238 301 var hdiff = h - Math.Log(perplexity); 239 if 302 if(hdiff < tol && -hdiff < tol) { 240 303 found = true; 241 304 } else { 242 if 305 if(hdiff > 0) { 243 306 minBeta = beta; 244 if 307 if(maxBeta.IsAlmost(double.MaxValue) || maxBeta.IsAlmost(double.MinValue)) 245 308 beta *= 2.0; 246 309 else … … 248 311 } else { 249 312 maxBeta = beta; 250 if 313 if(minBeta.IsAlmost(double.MinValue) || minBeta.IsAlmost(double.MaxValue)) 251 314 beta /= 2.0; 252 315 else … … 260 323 261 324 // Row-normalize current row of P and store in matrix 262 for 263 for 325 for(var m = 0; m < k; m++) curP[m] /= sumP; 326 for(var m = 0; m < k; m++) { 264 327 colP[rowP[i] + m] = indices[m + 1].Index; 265 328 valP[rowP[i] + m] = curP[m]; … … 273 336 int n = x.Length; 274 337 // Compute the Gaussian kernel row by row 275 for 338 for(var i = 0; i < n; i++) { 276 339 // Initialize some variables 277 340 var found = false; … … 284 347 // Iterate until we found a good perplexity 285 348 var iter = 0; 286 while 349 while(!found && iter < 200) { // TODO constant 287 350 288 351 // Compute Gaussian kernel row 289 for 352 for(var m = 0; m < n; m++) p[i, m] = Math.Exp(-beta * dd[i][m]); 290 353 p[i, i] = double.Epsilon; 291 354 292 355 // Compute entropy of current row 293 356 sumP = double.Epsilon; 294 for 357 for(var m = 0; m < n; m++) sumP += p[i, m]; 295 358 var h = 0.0; 296 for 359 for(var m = 0; m < n; m++) h += beta * (dd[i][m] * p[i, m]); 297 360 h = h / sumP + Math.Log(sumP); 298 361 299 362 // Evaluate whether the entropy is within the tolerance level 300 363 var hdiff = h - Math.Log(perplexity); 301 if 364 if(hdiff < tol && -hdiff < tol) { 302 365 found = true; 303 366 } else { 304 if 367 if(hdiff > 0) { 305 368 minBeta = beta; 306 if 369 if(maxBeta.IsAlmost(double.MaxValue) || maxBeta.IsAlmost(double.MinValue)) 307 370 beta *= 2.0; 308 371 else … … 310 373 } else { 311 374 maxBeta = beta; 312 if 375 if(minBeta.IsAlmost(double.MinValue) || minBeta.IsAlmost(double.MaxValue)) 313 376 beta /= 2.0; 314 377 else … … 322 385 323 386 // Row normalize P 324 for 387 for(var m = 0; m < n; m++) p[i, m] /= sumP; 325 388 } 326 389 } 327 390 328 391 private static double[][] ComputeDistances(T[] x, IDistance<T> distance) { 329 return x.Select(m => x.Select(n => distance.Get(m, n)).ToArray()).ToArray(); 330 } 331 332 392 var res = new double[x.Length][]; 393 for(int r = 0; r < x.Length; r++) { 394 var rowV = new double[x.Length]; 395 // all distances must be symmetric 396 for(int c = 0; c < r; c++) { 397 rowV[c] = res[c][r]; 398 } 399 rowV[r] = 0.0; // distance to self is zero for all distances 400 for(int c = r + 1; c < x.Length; c++) { 401 rowV[c] = distance.Get(x[r], x[c]); 402 } 403 res[r] = rowV; 404 } 405 return res; 406 // return x.Select(m => x.Select(n => distance.Get(m, n)).ToArray()).ToArray(); 407 } 333 408 334 409 private static double EvaluateErrorExact(double[,] p, double[,] y, int n, int d) { … … 336 411 var dd = new double[n, n]; 337 412 var q = new double[n, n]; 338 ComputeSquaredEuclideanDistance(y, n, d, dd); 413 ComputeSquaredEuclideanDistance(y, n, d, dd); // TODO: we use Euclidian distance regardless of the actual distance function 339 414 340 415 // Compute Q-matrix and normalization sum 341 416 var sumQ = double.Epsilon; 342 for 343 for 344 if 417 for(var n1 = 0; n1 < n; n1++) { 418 for(var m = 0; m < n; m++) { 419 if(n1 != m) { 345 420 q[n1, m] = 1 / (1 + dd[n1, m]); 346 421 sumQ += q[n1, m]; … … 348 423 } 349 424 } 350 for (var i = 0; i < n; i++) for(var j = 0; j < n; j++) q[i, j] /= sumQ;425 for(var i = 0; i < n; i++) for(var j = 0; j < n; j++) q[i, j] /= sumQ; 351 426 352 427 // Sum t-SNE error 353 428 var c = .0; 354 for 355 for 429 for(var i = 0; i < n; i++) 430 for(var j = 0; j < n; j++) { 356 431 c += p[i, j] * Math.Log((p[i, j] + float.Epsilon) / (q[i, j] + float.Epsilon)); 357 432 } 358 433 return c; 359 434 } 435 436 // TODO: there seems to be a bug in the error approximation. 437 // The mapping of the approximate tSNE looks good but the error curve never changes. 360 438 private static double EvaluateErrorApproximate(IReadOnlyList<int> rowP, IReadOnlyList<int> colP, IReadOnlyList<double> valP, double[,] y, double theta) { 361 439 // Get estimate of normalization term … … 365 443 var buff = new double[d]; 366 444 double sumQ = 0.0; 367 for 445 for(var i = 0; i < n; i++) tree.ComputeNonEdgeForces(i, theta, buff, ref sumQ); 368 446 369 447 // Loop over all edges to compute t-SNE error 370 448 var c = .0; 371 for 372 for 449 for(var k = 0; k < n; k++) { 450 for(var i = rowP[k]; i < rowP[k + 1]; i++) { 373 451 var q = .0; 374 for 375 for 376 for (var j = 0; j < d; j++) q += buff[j] * buff[j];452 for(var j = 0; j < d; j++) buff[j] = y[k, j]; 453 for(var j = 0; j < d; j++) buff[j] -= y[colP[i], j]; 454 for(var j = 0; j < d; j++) q += buff[j] * buff[j]; // TODO: squared error is used here! 377 455 q = 1.0 / (1.0 + q) / sumQ; 378 456 c += valP[i] * Math.Log((valP[i] + float.Epsilon) / (q + float.Epsilon)); … … 386 464 var n = rowP.Count - 1; 387 465 var rowCounts = new int[n]; 388 for 389 for 466 for(var j = 0; j < n; j++) { 467 for(var i = rowP[j]; i < rowP[j + 1]; i++) { 390 468 391 469 // Check whether element (col_P[i], n) is present 392 470 var present = false; 393 for 394 if 471 for(var m = rowP[colP[i]]; m < rowP[colP[i] + 1]; m++) { 472 if(colP[m] == j) present = true; 395 473 } 396 if 474 if(present) rowCounts[j]++; 397 475 else { 398 476 rowCounts[j]++; … … 402 480 } 403 481 var noElem = 0; 404 for 482 for(var i = 0; i < n; i++) noElem += rowCounts[i]; 405 483 406 484 // Allocate memory for symmetrized matrix … … 411 489 // Construct new row indices for symmetric matrix 412 490 symRowP[0] = 0; 413 for 491 for(var i = 0; i < n; i++) symRowP[i + 1] = symRowP[i] + rowCounts[i]; 414 492 415 493 // Fill the result matrix 416 494 var offset = new int[n]; 417 for 418 for 495 for(var j = 0; j < n; j++) { 496 for(var i = rowP[j]; i < rowP[j + 1]; i++) { // considering element(n, colP[i]) 419 497 420 498 // Check whether element (col_P[i], n) is present 421 499 var present = false; 422 for 423 if 500 for(var m = rowP[colP[i]]; m < rowP[colP[i] + 1]; m++) { 501 if(colP[m] != j) continue; 424 502 present = true; 425 if 503 if(j > colP[i]) continue; // make sure we do not add elements twice 426 504 symColP[symRowP[j] + offset[j]] = colP[i]; 427 505 symColP[symRowP[colP[i]] + offset[colP[i]]] = j; … … 431 509 432 510 // If (colP[i], n) is not present, there is no addition involved 433 if 511 if(!present) { 434 512 symColP[symRowP[j] + offset[j]] = colP[i]; 435 513 symColP[symRowP[colP[i]] + offset[colP[i]]] = j; … … 439 517 440 518 // Update offsets 441 if 519 if(present && (j > colP[i])) continue; 442 520 offset[j]++; 443 if (colP[i] != j) offset[colP[i]]++; 444 } 445 } 446 447 // Divide the result by two 448 for (var i = 0; i < noElem; i++) symValP[i] /= 2.0; 521 if(colP[i] != j) offset[colP[i]]++; 522 } 523 } 524 525 for(var i = 0; i < noElem; i++) symValP[i] /= 2.0; 449 526 } 450 527 … … 459 536 460 537 public static double[,] Iterate(TSNEState state) { 461 if 538 if(state.exact) 462 539 ComputeExactGradient(state.p, state.newData, state.noDatapoints, state.newDimensions, state.dY); 463 540 else … … 465 542 466 543 // Update gains 467 for 468 for 544 for(var i = 0; i < state.noDatapoints; i++) { 545 for(var j = 0; j < state.newDimensions; j++) { 469 546 state.gains[i, j] = Math.Sign(state.dY[i, j]) != Math.Sign(state.uY[i, j]) 470 547 ? state.gains[i, j] + .2 471 548 : state.gains[i, j] * .8; // 20% up or 20% down // TODO: +0.2?! 472 549 473 if (state.gains[i, j] < .01) state.gains[i, j] = .01;550 if(state.gains[i, j] < .01) state.gains[i, j] = .01; // TODO why limit the gains? 474 551 } 475 552 } … … 477 554 478 555 // Perform gradient update (with momentum and gains) 479 for 480 for 556 for(var i = 0; i < state.noDatapoints; i++) 557 for(var j = 0; j < state.newDimensions; j++) 481 558 state.uY[i, j] = state.currentMomentum * state.uY[i, j] - state.eta * state.gains[i, j] * state.dY[i, j]; 482 559 483 for 484 for 560 for(var i = 0; i < state.noDatapoints; i++) 561 for(var j = 0; j < state.newDimensions; j++) 485 562 state.newData[i, j] = state.newData[i, j] + state.uY[i, j]; 486 563 … … 489 566 // Stop lying about the P-values after a while, and switch momentum 490 567 491 if 492 if 493 for (var i = 0; i < state.noDatapoints; i++) for(var j = 0; j < state.noDatapoints; j++) state.p[i, j] /= 12.0; //XXX why 12?568 if(state.iter == state.stopLyingIter) { 569 if(state.exact) 570 for(var i = 0; i < state.noDatapoints; i++) for(var j = 0; j < state.noDatapoints; j++) state.p[i, j] /= 12.0; //XXX why 12? 494 571 else 495 for 496 } 497 498 if 572 for(var i = 0; i < state.rowP[state.noDatapoints]; i++) state.valP[i] /= 12.0; // XXX are we not scaling all values? 573 } 574 575 if(state.iter == state.momSwitchIter) 499 576 state.currentMomentum = state.finalMomentum; 500 577 … … 511 588 tree.ComputeEdgeForces(rowP, colP, valP, n, posF); 512 589 var row = new double[d]; 513 for 590 for(var n1 = 0; n1 < n; n1++) { 514 591 Buffer.BlockCopy(negF, (sizeof(double) * n1 * d), row, 0, d); 515 592 tree.ComputeNonEdgeForces(n1, theta, row, ref sumQ); … … 517 594 518 595 // Compute final t-SNE gradient 519 for 520 for 596 for(var i = 0; i < n; i++) 597 for(var j = 0; j < d; j++) { 521 598 dC[i, j] = posF[i, j] - negF[i, j] / sumQ; 522 599 } … … 526 603 527 604 // Make sure the current gradient contains zeros 528 for (var i = 0; i < n; i++) for(var j = 0; j < d; j++) dC[i, j] = 0.0;605 for(var i = 0; i < n; i++) for(var j = 0; j < d; j++) dC[i, j] = 0.0; 529 606 530 607 // Compute the squared Euclidean distance matrix 531 608 var dd = new double[n, n]; 532 ComputeSquaredEuclideanDistance(y, n, d, dd); 609 ComputeSquaredEuclideanDistance(y, n, d, dd); // TODO: we use Euclidian distance regardless which distance function is actually set! 533 610 534 611 // Compute Q-matrix and normalization sum 535 612 var q = new double[n, n]; 536 613 var sumQ = .0; 537 for 538 for 539 if 614 for(var n1 = 0; n1 < n; n1++) { 615 for(var m = 0; m < n; m++) { 616 if(n1 == m) continue; 540 617 q[n1, m] = 1 / (1 + dd[n1, m]); 541 618 sumQ += q[n1, m]; … … 544 621 545 622 // Perform the computation of the gradient 546 for 547 for 548 if 623 for(var n1 = 0; n1 < n; n1++) { 624 for(var m = 0; m < n; m++) { 625 if(n1 == m) continue; 549 626 var mult = (p[n1, m] - q[n1, m] / sumQ) * q[n1, m]; 550 for 627 for(var d1 = 0; d1 < d; d1++) { 551 628 dC[n1, d1] += (y[n1, d1] - y[m, d1]) * mult; 552 629 } … … 557 634 private static void ComputeSquaredEuclideanDistance(double[,] x, int n, int d, double[,] dd) { 558 635 var dataSums = new double[n]; 559 for 560 for 636 for(var i = 0; i < n; i++) { 637 for(var j = 0; j < d; j++) { 561 638 dataSums[i] += x[i, j] * x[i, j]; 562 639 } 563 640 } 564 for 565 for 641 for(var i = 0; i < n; i++) { 642 for(var m = 0; m < n; m++) { 566 643 dd[i, m] = dataSums[i] + dataSums[m]; 567 644 } 568 645 } 569 for 646 for(var i = 0; i < n; i++) { 570 647 dd[i, i] = 0.0; 571 for 648 for(var m = i + 1; m < n; m++) { 572 649 dd[i, m] = 0.0; 573 for 650 for(var j = 0; j < d; j++) { 574 651 dd[i, m] += (x[i, j] - x[m, j]) * (x[i, j] - x[m, j]); 575 652 } … … 584 661 var d = x.GetLength(1); 585 662 var mean = new double[d]; 586 for 587 for 663 for(var i = 0; i < n; i++) { 664 for(var j = 0; j < d; j++) { 588 665 mean[j] += x[i, j]; 589 666 } 590 667 } 591 for 668 for(var i = 0; i < d; i++) { 592 669 mean[i] /= n; 593 670 } 594 671 // Subtract data mean 595 for 596 for 672 for(var i = 0; i < n; i++) { 673 for(var j = 0; j < d; j++) { 597 674 x[i, j] -= mean[j]; 598 675 }
Note: See TracChangeset
for help on using the changeset viewer.