#region License Information
/* HeuristicLab
* Copyright (C) 2002-2018 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
*
* This file is part of HeuristicLab.
*
* HeuristicLab is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* HeuristicLab is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with HeuristicLab. If not, see .
*/
#endregion
using System;
using System.Collections.Generic;
using System.Linq;
using HeuristicLab.Core;
namespace HeuristicLab.Random {
///
/// See Radford Neal, MCMC using Hamiltonian dynamics, 2011, https://arxiv.org/pdf/1206.1901.pdf
/// Algorithm from the paper:
///
/// HMC = function (U, grad_U, epsilon, L, current_q)
/// {
/// q = current_q
/// p = rnorm(length(q), 0, 1) # independent standard normal variates
/// current_p = p
/// # Make a half step for momentum at the beginning
/// p = p - epsilon * grad_U(q) / 2
/// # Alternate full steps for position and momentum
/// for (i in 1:L)
/// {
/// # Make a full step for the position
/// q = q + epsilon* p
/// # Make a full step for the momentum, except at end of trajectory
/// if (i!=L) p = p - epsilon * grad_U(q)
/// }
/// # Make a half step for momentum at the end.
/// p = p - epsilon* grad_U(q) / 2
/// # Negate momentum at end of trajectory to make the proposal symmetric
/// p = -p
/// # Evaluate potential and kinetic energies at start and end of trajectory
/// current_U = U(current_q)
/// current_K = sum(current_p^2) / 2
/// proposed_U = U(q)
/// proposed_K = sum(p^2) / 2
/// # Accept or reject the state at end of trajectory, returning either
/// # the position at the end of the trajectory or the initial position
/// if (runif(1) < exp(current_U-proposed_U+current_K-proposed_K)) {
/// return (q) # accept
/// }
/// else {
/// return (current_q) # reject
/// }
/// }
///
public static class HamiltonianMonteCarlo {
public static IEnumerable SampleChain(double[] startingPosition,
Func> potentialEnergyFunction,
IRandom uniformRandom, double stepSize, int steps = 10) {
var x = (double[])startingPosition.Clone();
while(true) {
x = Sample(x, potentialEnergyFunction, uniformRandom, stepSize, steps);
yield return x;
}
}
public static double[] Sample(
double[] startingPosition,
Func> potentialEnergyFunction, // U
IRandom uniformRandom, double stepSize, int steps) {
// step 1
// new values for the momentum variables are randomly drawn from their Gaussian
// distribution, independently if the current values of the position variables
// step 2
// Metropolis update is performed, using Hamiltonian dynamics to propose a new state.
// Starting with the current state Hamiltonian dynamics is simulated for L steps
// using the Leapfrog method with a step size of eps.
// L and eps are parameters of the algorithm, which need to be tuned to obtain
// good performance.
// The proposed state is accepted as the next state of the Markov chain with
// probability [...].
// If the proposed state is not accepted the next state is the same as the current state
// (and is counted again when estimating the expectation of some function of
// state by its average over states of the Markov chain).
int nVars = startingPosition.Length;
var u_initial = potentialEnergyFunction(startingPosition).Item1;
// rename variables to match Neal's algorithm
var current_q = startingPosition;
var epsilon = stepSize;
var L = steps;
var q = current_q;
// p = rnorm(length(q), 0, 1) # independent standard normal variates
var p = q
.Select(_ => NormalDistributedRandom.NextDouble(uniformRandom, 0, 1))
.ToArray();
var current_p = p;
// Make a half step for momentum at the beginning
// p = p - epsilon * grad_U(q) / 2;
p = VectorSum(p, ScaleVector(-0.5 * epsilon, potentialEnergyFunction(q).Item2));
// Alternate full steps for position and momentum
for (int i = 1; i <= L; i++) {
// Make a full step for the position
// q = q + epsilon * p;
q = VectorSum(q, ScaleVector(epsilon, p));
// Make a full step for the momentum, except at end of trajectory
if (i != L)
p = VectorSum(p, ScaleVector(-epsilon, potentialEnergyFunction(q).Item2));
}
// Make a half step for momentum at the end.
/// p = p - epsilon* grad_U(q) / 2
p = VectorSum(p, ScaleVector(-0.5 * epsilon, potentialEnergyFunction(q).Item2));
// Negate momentum at end of trajectory to make the proposal symmetric
/// p = -p
p = ScaleVector(-1.0, p);
// Evaluate potential and kinetic energies at start and end of trajectory
double current_U = potentialEnergyFunction(current_q).Item1;
// sum(current_p ^ 2) / 2
double current_K = 0.5 * current_p.Sum(pi => pi * pi);
double proposed_U = potentialEnergyFunction(q).Item1;
// sum(p^2) / 2
double proposed_K = 0.5 * p.Sum(pi => pi * pi);
/// # Accept or reject the state at end of trajectory, returning either
/// # the position at the end of the trajectory or the initial position
if (uniformRandom.NextDouble() < Math.Exp(current_U - proposed_U + current_K - proposed_K)) {
return q; // accept
} else {
return current_q; // # reject
}
}
#region to be improved or removed
private static double VectorProd(double[] x, double[] y) {
double s = 0.0;
for (int i = 0; i < x.Length; i++)
s += x[i] * y[i];
return s;
}
private static double[] VectorSum(double[] x, double[] y) {
double[] s = new double[x.Length];
for (int i = 0; i < x.Length; i++)
s[i] += x[i] + y[i];
return s;
}
private static double[] ScaleVector(double a, double[] x) {
double[] s = new double[x.Length];
for (int i = 0; i < x.Length; i++)
s[i] = a * x[i];
return s;
}
#endregion
}
}