using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using HeuristicLab.Common; namespace HeuristicLab.Algorithms.Bandits { public class GaussianBandit : IBandit { public int NumArms { get; private set; } public double OptimalExpectedReward { get; private set; } // reward of the best arm, for calculating regret public int OptimalExpectedRewardArm { get; private set; } public int OptimalMaximalRewardArm { get; private set; } public double MaxReward { get; private set; } public double MinReward { get; private set; } private readonly Random random; private readonly double[] exp; private readonly double[] stdDev; public GaussianBandit(Random random, int nArms, double minReward = double.NegativeInfinity, double maxReward = double.PositiveInfinity) { this.MaxReward = maxReward; this.MinReward = minReward; this.random = random; this.NumArms = nArms; // expected reward of arms is iid and uniformly distributed exp = new double[nArms]; stdDev = new double[nArms]; OptimalExpectedReward = double.NegativeInfinity; var bestQ = double.NegativeInfinity; for (int i = 0; i < nArms; i++) { exp[i] = Rand.RandNormal(random); // exp values for arms is N(0,1) distributed stdDev[i] = 1.0 / Rand.GammaRand(random, 1); // variance is inv-gamma distributed if (exp[i] > OptimalExpectedReward) { OptimalExpectedReward = exp[i]; OptimalExpectedRewardArm = i; } var q = alglib.invnormaldistribution(0.999) * stdDev[i] + exp[i]; if (q > bestQ) { bestQ = q; OptimalMaximalRewardArm = i; } } } // pulling an arm results in a normally distributed reward // with mean expReward[i] and std.dev public double Pull(int arm) { double x; do { var z = Rand.RandNormal(random); x = z * stdDev[arm] + exp[arm]; } while (x <= MinReward || x > MaxReward); return x; } } }