Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Hive-3.4/sources/HeuristicLab.HiveEngine/3.4/HiveEngine.cs @ 6373

Last change on this file since 6373 was 6373, checked in by cneumuel, 13 years ago

#1233

  • moved ExperimentManager into separate plugin
  • moved Administration into separate plugin
File size: 16.5 KB
Line 
1#region License Information
2/* HeuristicLab
3 * Copyright (C) 2002-2011 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
4 *
5 * This file is part of HeuristicLab.
6 *
7 * HeuristicLab is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * HeuristicLab is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with HeuristicLab. If not, see <http://www.gnu.org/licenses/>.
19 */
20#endregion
21
22using System;
23using System.Collections.Generic;
24using System.Linq;
25using System.Threading;
26using System.Threading.Tasks;
27using HeuristicLab.Clients.Hive;
28using HeuristicLab.Common;
29using HeuristicLab.Core;
30using HeuristicLab.Persistence.Default.CompositeSerializers.Storable;
31
32namespace HeuristicLab.HiveEngine {
33  /// <summary>
34  /// Represents an engine that executes operations which can be executed in parallel on the hive
35  /// </summary>
36  [StorableClass]
37  [Item("Hive Engine", "Engine for parallel execution on the hive. You need enable `Parallel` for at least one operator in your operator graph to have all childoperations parallelized. Also those childoperations must not have sideeffects on a higher scope.")]
38  public class HiveEngine : Engine {
39    private static object locker = new object();
40    private static object logLocker = new object();
41    private CancellationToken cancellationToken;
42    private bool firstRun = true;
43
44    [Storable]
45    private IOperator currentOperator;
46
47    [Storable]
48    public string ResourceNames { get; set; }
49
50    [Storable]
51    private int priority;
52    public int Priority {
53      get { return priority; }
54      set { priority = value; }
55    }
56
57    [Storable]
58    private TimeSpan executionTimeOnHive;
59    public TimeSpan ExecutionTimeOnHive {
60      get { return executionTimeOnHive; }
61      set {
62        if (value != executionTimeOnHive) {
63          executionTimeOnHive = value;
64          OnExecutionTimeOnHiveChanged();
65        }
66      }
67    }
68
69    [Storable]
70    private bool useLocalPlugins;
71    public bool UseLocalPlugins {
72      get { return useLocalPlugins; }
73      set { useLocalPlugins = value; }
74    }
75
76    // [Storable] -> HiveExperiment can't be storable, so RefreshableHiveExperiment can't be stored
77    private ItemCollection<RefreshableHiveExperiment> hiveExperiments = new ItemCollection<RefreshableHiveExperiment>();
78    public ItemCollection<RefreshableHiveExperiment> HiveExperiments {
79      get { return hiveExperiments; }
80      set { hiveExperiments = value; }
81    }
82
83    private List<Plugin> onlinePlugins;
84    public List<Plugin> OnlinePlugins {
85      get { return onlinePlugins; }
86      set { onlinePlugins = value; }
87    }
88
89    private List<Plugin> alreadyUploadedPlugins;
90    public List<Plugin> AlreadyUploadedPlugins {
91      get { return alreadyUploadedPlugins; }
92      set { alreadyUploadedPlugins = value; }
93    }
94
95    #region constructors and cloning
96    public HiveEngine() {
97      ResourceNames = "HEAL";
98      Priority = 0;
99    }
100
101    [StorableConstructor]
102    protected HiveEngine(bool deserializing) : base(deserializing) { }
103    protected HiveEngine(HiveEngine original, Cloner cloner)
104      : base(original, cloner) {
105      this.ResourceNames = original.ResourceNames;
106      this.currentOperator = cloner.Clone(original.currentOperator);
107      this.priority = original.priority;
108      this.executionTimeOnHive = original.executionTimeOnHive;
109      this.useLocalPlugins = original.useLocalPlugins;
110      // this.hiveExperiments = cloner.Clone(original.hiveExperiments); do not clone hiveExperiments - otherwise they would be sent with every job
111    }
112    public override IDeepCloneable Clone(Cloner cloner) {
113      return new HiveEngine(this, cloner);
114    }
115    #endregion
116
117    #region Events
118    protected override void OnPrepared() {
119      base.OnPrepared();
120      this.ExecutionTimeOnHive = TimeSpan.Zero;
121    }
122
123    public event EventHandler ExecutionTimeOnHiveChanged;
124    protected virtual void OnExecutionTimeOnHiveChanged() {
125      var handler = ExecutionTimeOnHiveChanged;
126      if (handler != null) handler(this, EventArgs.Empty);
127    }
128    #endregion
129
130    protected override void Run(CancellationToken cancellationToken) {
131      this.cancellationToken = cancellationToken;
132      Run(ExecutionStack);
133    }
134
135    private void Run(object state) {
136      Stack<IOperation> executionStack = (Stack<IOperation>)state;
137      IOperation next;
138      OperationCollection coll;
139      IAtomicOperation operation;
140
141      if (firstRun) {
142        TaskScheduler.UnobservedTaskException += new EventHandler<UnobservedTaskExceptionEventArgs>(TaskScheduler_UnobservedTaskException);
143        this.OnlinePlugins = ServiceLocator.Instance.CallHiveService(s => s.GetPlugins()).Where(x => x.IsLocal == false).ToList();
144        this.AlreadyUploadedPlugins = new List<Plugin>();
145        firstRun = false;
146      }
147
148      while (executionStack.Count > 0) {
149        cancellationToken.ThrowIfCancellationRequested();
150
151        next = executionStack.Pop();
152        //bool isOpCollection = next is OperationCollection;
153        //int collCount = isOpCollection ? ((OperationCollection)next).Count : 0;
154        //string opName = !isOpCollection ? ((IAtomicOperation)next).Operator.Name : "OpCollection";
155
156        if (next is OperationCollection) {
157          coll = (OperationCollection)next;
158
159          //bool isPMOEvaluator = coll.Count > 0 && coll.First() is HeuristicLab.Core.ExecutionContext && ((HeuristicLab.Core.ExecutionContext)coll.First()).Operator.GetType().Name == "PMOEvaluator";
160          //bool isAlgorithmEvaluator = coll.Count > 0 && coll.First() is HeuristicLab.Core.ExecutionContext && ((HeuristicLab.Core.ExecutionContext)coll.First()).Operator.GetType().Name == "AlgorithmEvaluator";
161
162          //if (coll.Parallel && isPMOEvaluator) {
163          //  Task[] tasks = new Task[coll.Count];
164          //  Stack<IOperation>[] stacks = new Stack<IOperation>[coll.Count];
165          //  for (int i = 0; i < coll.Count; i++) {
166          //    stacks[i] = new Stack<IOperation>();
167          //    stacks[i].Push(coll[i]);
168          //    tasks[i] = Task.Factory.StartNew(Run, stacks[i], cancellationToken);
169          //  }
170          //  try {
171          //    Task.WaitAll(tasks);
172          //  }
173          //  catch (AggregateException ex) {
174          //    OperationCollection remaining = new OperationCollection() { Parallel = true };
175          //    for (int i = 0; i < stacks.Length; i++) {
176          //      if (stacks[i].Count == 1)
177          //        remaining.Add(stacks[i].Pop());
178          //      if (stacks[i].Count > 1) {
179          //        OperationCollection ops = new OperationCollection();
180          //        while (stacks[i].Count > 0)
181          //          ops.Add(stacks[i].Pop());
182          //        remaining.Add(ops);
183          //      }
184          //    }
185          //    if (remaining.Count > 0) executionStack.Push(remaining);
186          //    throw ex;
187          //  }
188          //} else if (coll.Parallel) {
189          if(coll.Parallel) {
190            // clone the parent scope here and reuse it for each operation. otherwise for each job the whole scope-tree first needs to be copied and then cleaned, which causes a lot of work for the Garbage Collector
191            IScope parentScopeClone = (IScope)((IAtomicOperation)coll.First()).Scope.Parent.Clone();
192            parentScopeClone.SubScopes.Clear();
193            parentScopeClone.ClearParentScopes();
194
195            EngineJob[] jobs = new EngineJob[coll.Count];
196            for (int i = 0; i < coll.Count; i++) {
197              jobs[i] = new EngineJob(coll[i], new SequentialEngine.SequentialEngine());
198            }
199
200            var experiment = CreateHiveExperiment();
201            IScope[] scopes = ExecuteOnHive(experiment, jobs, parentScopeClone, cancellationToken);
202            DisposeHiveExperiment(experiment);
203
204            for (int i = 0; i < coll.Count; i++) {
205              if (coll[i] is IAtomicOperation) {
206                ExchangeScope(scopes[i], ((IAtomicOperation)coll[i]).Scope);
207              } else if (coll[i] is OperationCollection) {
208                // todo ??
209              }
210            }
211          } else {
212            for (int i = coll.Count - 1; i >= 0; i--)
213              if (coll[i] != null) executionStack.Push(coll[i]);
214          }
215        } else if (next is IAtomicOperation) {
216          operation = (IAtomicOperation)next;
217          try {
218            next = operation.Operator.Execute((IExecutionContext)operation, cancellationToken);
219          }
220          catch (Exception ex) {
221            executionStack.Push(operation);
222            if (ex is OperationCanceledException) throw ex;
223            else throw new OperatorExecutionException(operation.Operator, ex);
224          }
225          if (next != null) executionStack.Push(next);
226
227          if (operation.Operator.Breakpoint) {
228            LogMessage(string.Format("Breakpoint: {0}", operation.Operator.Name != string.Empty ? operation.Operator.Name : operation.Operator.ItemName));
229            Pause();
230          }
231        }
232      }
233    }
234
235    private void TaskScheduler_UnobservedTaskException(object sender, UnobservedTaskExceptionEventArgs e) {
236      e.SetObserved(); // avoid crash of process
237    }
238
239    private IRandom FindRandomParameter(IExecutionContext ec) {
240      try {
241        if (ec == null)
242          return null;
243
244        foreach (var p in ec.Parameters) {
245          if (p.Name == "Random" && p is IValueParameter)
246            return ((IValueParameter)p).Value as IRandom;
247        }
248        return FindRandomParameter(ec.Parent);
249      }
250      catch { return null; }
251    }
252
253    private static void ReIntegrateScope(IAtomicOperation source, IAtomicOperation target) {
254      ExchangeScope(source.Scope, target.Scope);
255    }
256
257    private static void ExchangeScope(IScope source, IScope target) {
258      target.Variables.Clear();
259      target.Variables.AddRange(source.Variables);
260      target.SubScopes.Clear();
261      target.SubScopes.AddRange(source.SubScopes);
262      // TODO: validate if parent scopes match - otherwise source is invalid
263    }
264
265    /// <summary>
266    /// This method blocks until all jobs are finished
267    /// TODO: Cancelation needs to be refined; all tasks currently stay in Semaphore.WaitOne after cancelation
268    /// </summary>
269    /// <param name="jobs"></param>
270    private IScope[] ExecuteOnHive(RefreshableHiveExperiment refreshableHiveExperiment, EngineJob[] jobs, IScope parentScopeClone, CancellationToken cancellationToken) {
271      LogMessage(string.Format("Executing {0} operations on the hive.", jobs.Length));
272      IScope[] scopes = new Scope[jobs.Length];
273      object locker = new object();
274      IDictionary<Guid, int> jobIndices = new Dictionary<Guid, int>();
275      var hiveExperiment = refreshableHiveExperiment.HiveExperiment;
276
277      try {
278        List<Guid> remainingJobIds = new List<Guid>();
279
280        // create upload-tasks
281        var uploadTasks = new List<Task<Job>>();
282        for (int i = 0; i < jobs.Length; i++) {
283          hiveExperiment.HiveJobs.Add(new EngineHiveJob(jobs[i], parentScopeClone));
284
285          // shuffle random variable to avoid the same random sequence in each operation; todo: does not yet work (it cannot find the random variable)
286          IRandom random = FindRandomParameter(jobs[i].InitialOperation as IExecutionContext);
287          if (random != null)
288            random.Reset(random.Next());
289        }
290        HiveClient.StartExperiment((e) => {
291          LogException(e);
292        }, refreshableHiveExperiment);
293
294        // do polling until experiment is finished and all jobs are downloaded
295        while (!refreshableHiveExperiment.AllJobsFinished()) {
296          Thread.Sleep(500);
297          this.ExecutionTimeOnHive = TimeSpan.FromMilliseconds(hiveExperiments.Sum(x => x.HiveExperiment.ExecutionTime.TotalMilliseconds));
298          cancellationToken.ThrowIfCancellationRequested();
299        }
300        LogMessage(string.Format("{0} finished (TotalExecutionTime: {1}).", refreshableHiveExperiment.ToString(), refreshableHiveExperiment.HiveExperiment.ExecutionTime));
301
302        // get scopes
303        int j = 0;
304        foreach (var hiveJob in hiveExperiment.HiveJobs) {
305          if (hiveJob.Job.State != JobState.Finished)
306            throw new HiveEngineException("Job failed: " + hiveJob.Job.StateLog.Last().Exception);
307
308          var scope = ((IAtomicOperation)((EngineJob)hiveJob.ItemJob).InitialOperation).Scope;
309          scopes[j++] = scope;
310        }
311        return scopes;
312      }
313      catch (OperationCanceledException e) {
314        lock (locker) {
315          if (jobIndices != null) DeleteHiveExperiment(hiveExperiment.Id);
316        }
317        throw e;
318      }
319      catch (Exception e) {
320        lock (locker) {
321          if (jobIndices != null) DeleteHiveExperiment(hiveExperiment.Id);
322        }
323        LogException(e);
324        throw e;
325      }
326    }
327
328    private RefreshableHiveExperiment CreateHiveExperiment() {
329      lock (locker) {
330        var hiveExperiment = new HiveExperiment();
331        hiveExperiment.Name = "HiveEngine Run " + hiveExperiments.Count;
332        hiveExperiment.DateCreated = DateTime.Now;
333        hiveExperiment.UseLocalPlugins = this.UseLocalPlugins;
334        hiveExperiment.ResourceNames = this.ResourceNames;
335        var refreshableHiveExperiment = new RefreshableHiveExperiment(hiveExperiment);
336        refreshableHiveExperiment.IsControllable = false;
337        hiveExperiments.Add(refreshableHiveExperiment);
338        return refreshableHiveExperiment;
339      }
340    }
341
342    private void DisposeHiveExperiment(RefreshableHiveExperiment refreshableHiveExperiment) {
343      refreshableHiveExperiment.RefreshAutomatically = false;
344      DeleteHiveExperiment(refreshableHiveExperiment.HiveExperiment.Id);
345      ClearData(refreshableHiveExperiment);
346    }
347
348    private void ClearData(RefreshableHiveExperiment refreshableHiveExperiment) {
349      var jobs = refreshableHiveExperiment.HiveExperiment.GetAllHiveJobs();
350      foreach (var job in jobs) {
351        job.ClearData();
352      }
353    }
354
355    private void DeleteHiveExperiment(Guid hiveExperimentId) {
356      HiveClient.TryAndRepeat(() => {
357        ServiceLocator.Instance.CallHiveService(s => s.DeleteHiveExperiment(hiveExperimentId));
358      }, 5, string.Format("Could not delete jobs"));
359    }
360
361    private List<Guid> GetResourceIds() {
362      return ServiceLocator.Instance.CallHiveService(service => {
363        var resourceNames = ResourceNames.Split(';');
364        var resourceIds = new List<Guid>();
365        foreach (var resourceName in resourceNames) {
366          Guid resourceId = service.GetResourceId(resourceName);
367          if (resourceId == Guid.Empty) {
368            throw new ResourceNotFoundException(string.Format("Could not find the resource '{0}'", resourceName));
369          }
370          resourceIds.Add(resourceId);
371        }
372        return resourceIds;
373      });
374    }
375
376    /// <summary>
377    /// Threadsafe message logging
378    /// </summary>
379    private void LogMessage(string message) {
380      lock (logLocker) {
381        Log.LogMessage(message);
382      }
383    }
384
385    /// <summary>
386    /// Threadsafe exception logging
387    /// </summary>
388    private void LogException(Exception exception) {
389      lock (logLocker) {
390        Log.LogException(exception);
391      }
392    }
393
394    // testfunction:
395    //private IScope[] ExecuteLocally(EngineJob[] jobs, IScope parentScopeClone, CancellationToken cancellationToken) {
396    //  IScope[] scopes = new Scope[jobs.Length];
397    //  for (int i = 0; i < jobs.Length; i++) {
398    //    var serialized = PersistenceUtil.Serialize(jobs[i]);
399    //    var deserialized = PersistenceUtil.Deserialize<IJob>(serialized);
400    //    deserialized.Start();
401    //    while (deserialized.ExecutionState != ExecutionState.Stopped) {
402    //      Thread.Sleep(100);
403    //    }
404    //    var serialized2 = PersistenceUtil.Serialize(deserialized);
405    //    var deserialized2 = PersistenceUtil.Deserialize<EngineJob>(serialized2);
406    //    var newScope = ((IAtomicOperation)deserialized2.InitialOperation).Scope;
407    //    scopes[i] = newScope;
408    //  }
409    //  return scopes;
410    //}
411  }
412}
Note: See TracBrowser for help on using the repository browser.