[11710] | 1 | using System;
|
---|
| 2 | using System.Collections.Generic;
|
---|
[11727] | 3 | using System.Diagnostics;
|
---|
[11710] | 4 | using System.Linq;
|
---|
| 5 | using System.Text;
|
---|
| 6 | using System.Threading.Tasks;
|
---|
[11792] | 7 | using HeuristicLab.Common;
|
---|
[11710] | 8 |
|
---|
[11742] | 9 | namespace HeuristicLab.Algorithms.Bandits.BanditPolicies {
|
---|
| 10 | // policy for k-armed bandit (see Auer et al. 2002)
|
---|
[12876] | 11 | // specific to Bernoulli distributed rewards
|
---|
[11742] | 12 | public class UCB1TunedPolicy : IBanditPolicy {
|
---|
[11710] | 13 |
|
---|
[11742] | 14 | public int SelectAction(Random random, IEnumerable<IBanditPolicyActionInfo> actionInfos) {
|
---|
| 15 | var myActionInfos = actionInfos.OfType<MeanAndVariancePolicyActionInfo>();
|
---|
[11792] | 16 |
|
---|
[11832] | 17 | int totalTries = myActionInfos.Sum(a => a.Tries);
|
---|
[11710] | 18 |
|
---|
[11742] | 19 | int aIdx = -1;
|
---|
[11792] | 20 | double bestQ = double.NegativeInfinity;
|
---|
| 21 | var bestActions = new List<int>();
|
---|
[11742] | 22 | foreach (var aInfo in myActionInfos) {
|
---|
| 23 | aIdx++;
|
---|
[11792] | 24 | double q;
|
---|
| 25 | if (aInfo.Tries == 0) {
|
---|
| 26 | q = double.PositiveInfinity;
|
---|
| 27 | } else {
|
---|
| 28 | var sumReward = aInfo.SumReward;
|
---|
| 29 | var tries = aInfo.Tries;
|
---|
[11710] | 30 |
|
---|
[12893] | 31 | //var avgReward = aInfo.MaxReward;
|
---|
[11792] | 32 | var avgReward = sumReward / tries;
|
---|
| 33 | q = avgReward + Math.Sqrt((Math.Log(totalTries) / tries) * Math.Min(1.0 / 4, V(aInfo, totalTries)));
|
---|
| 34 | // 1/4 is upper bound of bernoulli distributed variable
|
---|
| 35 | }
|
---|
[11710] | 36 | if (q > bestQ) {
|
---|
| 37 | bestQ = q;
|
---|
[11792] | 38 | bestActions.Clear();
|
---|
| 39 | bestActions.Add(aIdx);
|
---|
[11806] | 40 | } else if (q.IsAlmost(bestQ)) {
|
---|
[11792] | 41 | bestActions.Add(aIdx);
|
---|
[11710] | 42 | }
|
---|
| 43 | }
|
---|
[11792] | 44 | Debug.Assert(bestActions.Any());
|
---|
| 45 |
|
---|
| 46 | return bestActions.SelectRandom(random);
|
---|
[11710] | 47 | }
|
---|
[11732] | 48 |
|
---|
[11742] | 49 | public IBanditPolicyActionInfo CreateActionInfo() {
|
---|
[11732] | 50 | return new MeanAndVariancePolicyActionInfo();
|
---|
[11710] | 51 | }
|
---|
[11727] | 52 |
|
---|
[11732] | 53 | private double V(MeanAndVariancePolicyActionInfo actionInfo, int totalTries) {
|
---|
| 54 | var s = actionInfo.Tries;
|
---|
| 55 | return actionInfo.RewardVariance + Math.Sqrt(2 * Math.Log(totalTries) / s);
|
---|
[11727] | 56 | }
|
---|
| 57 |
|
---|
[11730] | 58 | public override string ToString() {
|
---|
| 59 | return "UCB1TunedPolicy";
|
---|
| 60 | }
|
---|
[11710] | 61 | }
|
---|
| 62 | }
|
---|