Last change
on this file since 13620 was
12893,
checked in by gkronber, 9 years ago
|
#2283: experiments on grammatical optimization algorithms (maxreward instead of avg reward, ...)
|
File size:
1.7 KB
|
Rev | Line | |
---|
[11710] | 1 | using System;
|
---|
| 2 | using System.Collections.Generic;
|
---|
[11727] | 3 | using System.Diagnostics;
|
---|
[11710] | 4 | using System.Linq;
|
---|
| 5 | using System.Text;
|
---|
| 6 | using System.Threading.Tasks;
|
---|
[11747] | 7 | using HeuristicLab.Common;
|
---|
[11710] | 8 |
|
---|
[11742] | 9 | namespace HeuristicLab.Algorithms.Bandits.BanditPolicies {
|
---|
| 10 | // policy for k-armed bandit (see Auer et al. 2002)
|
---|
| 11 | public class UCB1Policy : IBanditPolicy {
|
---|
[12876] | 12 | public double MaxReward { get; private set; }
|
---|
| 13 | public UCB1Policy(double maxReward = 1.0) {
|
---|
| 14 | this.MaxReward = maxReward;
|
---|
| 15 | }
|
---|
[11742] | 16 | public int SelectAction(Random random, IEnumerable<IBanditPolicyActionInfo> actionInfos) {
|
---|
[11745] | 17 | var myActionInfos = actionInfos.OfType<DefaultPolicyActionInfo>();
|
---|
[11710] | 18 | double bestQ = double.NegativeInfinity;
|
---|
[11806] | 19 | int totalTries = myActionInfos.Sum(a => a.Tries);
|
---|
[11732] | 20 |
|
---|
[11747] | 21 | var bestActions = new List<int>();
|
---|
[11745] | 22 | int aIdx = -1;
|
---|
| 23 | foreach (var aInfo in myActionInfos) {
|
---|
| 24 | aIdx++;
|
---|
[11747] | 25 | double q;
|
---|
| 26 | if (aInfo.Tries == 0) {
|
---|
| 27 | q = double.PositiveInfinity;
|
---|
| 28 | } else {
|
---|
| 29 |
|
---|
[12893] | 30 | //q = aInfo.SumReward / aInfo.Tries + MaxReward * Math.Sqrt((2 * Math.Log(totalTries)) / aInfo.Tries);
|
---|
| 31 | q = aInfo.MaxReward + MaxReward * Math.Sqrt((2 * Math.Log(totalTries)) / aInfo.Tries);
|
---|
[11747] | 32 | }
|
---|
[11710] | 33 | if (q > bestQ) {
|
---|
| 34 | bestQ = q;
|
---|
[11747] | 35 | bestActions.Clear();
|
---|
| 36 | bestActions.Add(aIdx);
|
---|
[11806] | 37 | } else if (q.IsAlmost(bestQ)) {
|
---|
[11747] | 38 | bestActions.Add(aIdx);
|
---|
[11710] | 39 | }
|
---|
| 40 | }
|
---|
[11747] | 41 | Debug.Assert(bestActions.Any());
|
---|
| 42 | return bestActions.SelectRandom(random);
|
---|
[11710] | 43 | }
|
---|
[11727] | 44 |
|
---|
[11742] | 45 | public IBanditPolicyActionInfo CreateActionInfo() {
|
---|
[11732] | 46 | return new DefaultPolicyActionInfo();
|
---|
[11727] | 47 | }
|
---|
[11730] | 48 | public override string ToString() {
|
---|
[12893] | 49 | return string.Format("UCB1Policy({0})", MaxReward);
|
---|
[11730] | 50 | }
|
---|
[11710] | 51 | }
|
---|
| 52 | }
|
---|
Note: See
TracBrowser
for help on using the repository browser.