using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using HeuristicLab.Common; namespace HeuristicLab.Algorithms.Bandits { public class GaussianBandit : IBandit { public int NumArms { get; private set; } public double OptimalExpectedReward { get; private set; } // reward of the best arm, for calculating regret public int OptimalExpectedRewardArm { get; private set; } public int OptimalMaximalRewardArm { get; private set; } private readonly Random random; private readonly double[] exp; private readonly double[] stdDev; public GaussianBandit(Random random, int nArms) { this.random = random; this.NumArms = nArms; // expected reward of arms is iid and uniformly distributed exp = new double[nArms]; stdDev = new double[nArms]; OptimalExpectedReward = double.NegativeInfinity; var bestQ = double.NegativeInfinity; for (int i = 0; i < nArms; i++) { exp[i] = Rand.RandNormal(random); // exp values for arms is N(0,1) distributed stdDev[i] = 1.0 / Rand.GammaRand(random, 1); // variance is inv-gamma distributed if (exp[i] > OptimalExpectedReward) { OptimalExpectedReward = exp[i]; OptimalExpectedRewardArm = i; } var q = alglib.invnormaldistribution(0.99) * stdDev[i] + exp[i]; if (q > bestQ) { bestQ = q; OptimalMaximalRewardArm = i; } } } // pulling an arm results in a truncated normally distributed reward // with mean expReward[i] and std.dev 0.1 public double Pull(int arm) { var z = Rand.RandNormal(random); var x = z * stdDev[arm] + exp[arm]; return x; } } }