Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Hive-3.4/sources/HeuristicLab.HiveEngine/3.4/HiveEngine.cs @ 6426

Last change on this file since 6426 was 6426, checked in by ascheibe, 13 years ago

#1233 removed useLocalPlugins

File size: 16.4 KB
Line 
1#region License Information
2/* HeuristicLab
3 * Copyright (C) 2002-2011 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
4 *
5 * This file is part of HeuristicLab.
6 *
7 * HeuristicLab is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * HeuristicLab is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with HeuristicLab. If not, see <http://www.gnu.org/licenses/>.
19 */
20#endregion
21
22using System;
23using System.Collections.Generic;
24using System.Linq;
25using System.Threading;
26using System.Threading.Tasks;
27using HeuristicLab.Clients.Hive;
28using HeuristicLab.Common;
29using HeuristicLab.Core;
30using HeuristicLab.Persistence.Default.CompositeSerializers.Storable;
31
32namespace HeuristicLab.HiveEngine {
33  /// <summary>
34  /// Represents an engine that executes operations which can be executed in parallel on the hive
35  /// </summary>
36  [StorableClass]
37  [Item("Hive Engine", "Engine for parallel execution on the hive. You need enable `Parallel` for at least one operator in your operator graph to have all childoperations parallelized. Also those childoperations must not have sideeffects on a higher scope.")]
38  public class HiveEngine : Engine {
39    private static object locker = new object();
40    private static object logLocker = new object();
41    private CancellationToken cancellationToken;
42    private bool firstRun = true;
43
44    [Storable]
45    private IOperator currentOperator;
46
47    [Storable]
48    public string ResourceNames { get; set; }
49
50    [Storable]
51    private int priority;
52    public int Priority {
53      get { return priority; }
54      set { priority = value; }
55    }
56
57    [Storable]
58    private TimeSpan executionTimeOnHive;
59    public TimeSpan ExecutionTimeOnHive {
60      get { return executionTimeOnHive; }
61      set {
62        if (value != executionTimeOnHive) {
63          executionTimeOnHive = value;
64          OnExecutionTimeOnHiveChanged();
65        }
66      }
67    }
68
69    [Storable]
70    private bool isPrivileged;
71    public bool IsPrivileged {
72      get { return isPrivileged; }
73      set { isPrivileged = value; }
74    }
75
76    // [Storable] -> HiveExperiment can't be storable, so RefreshableHiveExperiment can't be stored
77    private ItemCollection<RefreshableHiveExperiment> hiveExperiments = new ItemCollection<RefreshableHiveExperiment>();
78    public ItemCollection<RefreshableHiveExperiment> HiveExperiments {
79      get { return hiveExperiments; }
80      set { hiveExperiments = value; }
81    }
82
83    private List<Plugin> onlinePlugins;
84    public List<Plugin> OnlinePlugins {
85      get { return onlinePlugins; }
86      set { onlinePlugins = value; }
87    }
88
89    private List<Plugin> alreadyUploadedPlugins;
90    public List<Plugin> AlreadyUploadedPlugins {
91      get { return alreadyUploadedPlugins; }
92      set { alreadyUploadedPlugins = value; }
93    }
94
95    #region constructors and cloning
96    public HiveEngine() {
97      ResourceNames = "HEAL";
98      Priority = 0;
99    }
100
101    [StorableConstructor]
102    protected HiveEngine(bool deserializing) : base(deserializing) { }
103    protected HiveEngine(HiveEngine original, Cloner cloner)
104      : base(original, cloner) {
105      this.ResourceNames = original.ResourceNames;
106      this.currentOperator = cloner.Clone(original.currentOperator);
107      this.priority = original.priority;
108      this.executionTimeOnHive = original.executionTimeOnHive;
109      this.IsPrivileged = original.IsPrivileged;
110      // this.hiveExperiments = cloner.Clone(original.hiveExperiments); do not clone hiveExperiments - otherwise they would be sent with every job
111    }
112    public override IDeepCloneable Clone(Cloner cloner) {
113      return new HiveEngine(this, cloner);
114    }
115    #endregion
116
117    #region Events
118    protected override void OnPrepared() {
119      base.OnPrepared();
120      this.ExecutionTimeOnHive = TimeSpan.Zero;
121    }
122
123    public event EventHandler ExecutionTimeOnHiveChanged;
124    protected virtual void OnExecutionTimeOnHiveChanged() {
125      var handler = ExecutionTimeOnHiveChanged;
126      if (handler != null) handler(this, EventArgs.Empty);
127    }
128    #endregion
129
130    protected override void Run(CancellationToken cancellationToken) {
131      this.cancellationToken = cancellationToken;
132      Run(ExecutionStack);
133    }
134
135    private void Run(object state) {
136      Stack<IOperation> executionStack = (Stack<IOperation>)state;
137      IOperation next;
138      OperationCollection coll;
139      IAtomicOperation operation;
140
141      if (firstRun) {
142        TaskScheduler.UnobservedTaskException += new EventHandler<UnobservedTaskExceptionEventArgs>(TaskScheduler_UnobservedTaskException);
143        this.OnlinePlugins = ServiceLocator.Instance.CallHiveService(s => s.GetPlugins()).Where(x => x.Hash != null).ToList();
144        this.AlreadyUploadedPlugins = new List<Plugin>();
145        firstRun = false;
146      }
147
148      while (executionStack.Count > 0) {
149        cancellationToken.ThrowIfCancellationRequested();
150
151        next = executionStack.Pop();
152        //bool isOpCollection = next is OperationCollection;
153        //int collCount = isOpCollection ? ((OperationCollection)next).Count : 0;
154        //string opName = !isOpCollection ? ((IAtomicOperation)next).Operator.Name : "OpCollection";
155
156        if (next is OperationCollection) {
157          coll = (OperationCollection)next;
158
159          //bool isPMOEvaluator = coll.Count > 0 && coll.First() is HeuristicLab.Core.ExecutionContext && ((HeuristicLab.Core.ExecutionContext)coll.First()).Operator.GetType().Name == "PMOEvaluator";
160          //bool isAlgorithmEvaluator = coll.Count > 0 && coll.First() is HeuristicLab.Core.ExecutionContext && ((HeuristicLab.Core.ExecutionContext)coll.First()).Operator.GetType().Name == "AlgorithmEvaluator";
161
162          //if (coll.Parallel && isPMOEvaluator) {
163          //  Task[] tasks = new Task[coll.Count];
164          //  Stack<IOperation>[] stacks = new Stack<IOperation>[coll.Count];
165          //  for (int i = 0; i < coll.Count; i++) {
166          //    stacks[i] = new Stack<IOperation>();
167          //    stacks[i].Push(coll[i]);
168          //    tasks[i] = Task.Factory.StartNew(Run, stacks[i], cancellationToken);
169          //  }
170          //  try {
171          //    Task.WaitAll(tasks);
172          //  }
173          //  catch (AggregateException ex) {
174          //    OperationCollection remaining = new OperationCollection() { Parallel = true };
175          //    for (int i = 0; i < stacks.Length; i++) {
176          //      if (stacks[i].Count == 1)
177          //        remaining.Add(stacks[i].Pop());
178          //      if (stacks[i].Count > 1) {
179          //        OperationCollection ops = new OperationCollection();
180          //        while (stacks[i].Count > 0)
181          //          ops.Add(stacks[i].Pop());
182          //        remaining.Add(ops);
183          //      }
184          //    }
185          //    if (remaining.Count > 0) executionStack.Push(remaining);
186          //    throw ex;
187          //  }
188          //} else if (coll.Parallel) {
189          if (coll.Parallel) {
190            try {
191              // clone the parent scope here and reuse it for each operation. otherwise for each job the whole scope-tree first needs to be copied and then cleaned, which causes a lot of work for the Garbage Collector
192              IScope parentScopeClone = (IScope)((IAtomicOperation)coll.First()).Scope.Parent.Clone();
193              parentScopeClone.SubScopes.Clear();
194              parentScopeClone.ClearParentScopes();
195
196              EngineJob[] jobs = new EngineJob[coll.Count];
197              for (int i = 0; i < coll.Count; i++) {
198                jobs[i] = new EngineJob(coll[i], new SequentialEngine.SequentialEngine());
199              }
200
201              var experiment = CreateHiveExperiment();
202              IScope[] scopes = ExecuteOnHive(experiment, jobs, parentScopeClone, cancellationToken);
203
204              for (int i = 0; i < coll.Count; i++) {
205                if (coll[i] is IAtomicOperation) {
206                  ExchangeScope(scopes[i], ((IAtomicOperation)coll[i]).Scope);
207                } else if (coll[i] is OperationCollection) {
208                  // todo ??
209                }
210              }
211            }
212            catch {
213              executionStack.Push(coll); throw;
214            }
215          } else {
216            for (int i = coll.Count - 1; i >= 0; i--)
217              if (coll[i] != null) executionStack.Push(coll[i]);
218          }
219        } else if (next is IAtomicOperation) {
220          operation = (IAtomicOperation)next;
221          try {
222            next = operation.Operator.Execute((IExecutionContext)operation, cancellationToken);
223          }
224          catch (Exception ex) {
225            executionStack.Push(operation);
226            if (ex is OperationCanceledException) throw ex;
227            else throw new OperatorExecutionException(operation.Operator, ex);
228          }
229          if (next != null) executionStack.Push(next);
230
231          if (operation.Operator.Breakpoint) {
232            LogMessage(string.Format("Breakpoint: {0}", operation.Operator.Name != string.Empty ? operation.Operator.Name : operation.Operator.ItemName));
233            Pause();
234          }
235        }
236      }
237    }
238
239    private void TaskScheduler_UnobservedTaskException(object sender, UnobservedTaskExceptionEventArgs e) {
240      e.SetObserved(); // avoid crash of process
241    }
242
243    private IRandom FindRandomParameter(IExecutionContext ec) {
244      try {
245        if (ec == null)
246          return null;
247
248        foreach (var p in ec.Parameters) {
249          if (p.Name == "Random" && p is IValueParameter)
250            return ((IValueParameter)p).Value as IRandom;
251        }
252        return FindRandomParameter(ec.Parent);
253      }
254      catch { return null; }
255    }
256
257    private static void ReIntegrateScope(IAtomicOperation source, IAtomicOperation target) {
258      ExchangeScope(source.Scope, target.Scope);
259    }
260
261    private static void ExchangeScope(IScope source, IScope target) {
262      target.Variables.Clear();
263      target.Variables.AddRange(source.Variables);
264      target.SubScopes.Clear();
265      target.SubScopes.AddRange(source.SubScopes);
266      // TODO: validate if parent scopes match - otherwise source is invalid
267    }
268
269    /// <summary>
270    /// This method blocks until all jobs are finished
271    /// TODO: Cancelation needs to be refined; all tasks currently stay in Semaphore.WaitOne after cancelation
272    /// </summary>
273    /// <param name="jobs"></param>
274    private IScope[] ExecuteOnHive(RefreshableHiveExperiment refreshableHiveExperiment, EngineJob[] jobs, IScope parentScopeClone, CancellationToken cancellationToken) {
275      LogMessage(string.Format("Executing {0} operations on the hive.", jobs.Length));
276      IScope[] scopes = new Scope[jobs.Length];
277      object locker = new object();
278      var hiveExperiment = refreshableHiveExperiment.HiveExperiment;
279
280      try {
281        // create upload-tasks
282        for (int i = 0; i < jobs.Length; i++) {
283          var engineHiveJob = new EngineHiveJob(jobs[i], parentScopeClone);
284          engineHiveJob.Job.Priority = this.Priority;
285          hiveExperiment.HiveJobs.Add(engineHiveJob);
286
287          // shuffle random variable to avoid the same random sequence in each operation; todo: does not yet work (it cannot find the random variable)
288          IRandom random = FindRandomParameter(jobs[i].InitialOperation as IExecutionContext);
289          if (random != null)
290            random.Reset(random.Next());
291        }
292        HiveClient.StartExperiment((e) => { LogException(e); }, refreshableHiveExperiment);
293
294        // do polling until experiment is finished and all jobs are downloaded
295        while (!refreshableHiveExperiment.AllJobsFinished()) {
296          Thread.Sleep(2000);
297          this.ExecutionTimeOnHive = TimeSpan.FromMilliseconds(hiveExperiments.Sum(x => x.HiveExperiment.ExecutionTime.TotalMilliseconds));
298          cancellationToken.ThrowIfCancellationRequested();
299        }
300        LogMessage(string.Format("{0} finished (TotalExecutionTime: {1}).", refreshableHiveExperiment.ToString(), refreshableHiveExperiment.HiveExperiment.ExecutionTime));
301
302        var failedJobs = hiveExperiment.HiveJobs.Where(x => x.Job.State == JobState.Failed);
303        if (failedJobs.Count() > 0) {
304          throw new HiveEngineException("Job failed: " + failedJobs.First().Job.StateLog.Last().Exception);
305        }
306
307        // get scopes
308        int j = 0;
309        foreach (var hiveJob in hiveExperiment.HiveJobs) {
310          var scope = ((IAtomicOperation)((EngineJob)hiveJob.ItemJob).InitialOperation).Scope;
311          scopes[j++] = scope;
312        }
313        return scopes;
314      }
315      catch (OperationCanceledException e) {
316        throw e;
317      }
318      catch (Exception e) {
319        LogException(e);
320        throw e;
321      }
322      finally {
323        DisposeHiveExperiment(refreshableHiveExperiment);
324      }
325    }
326
327    private RefreshableHiveExperiment CreateHiveExperiment() {
328      lock (locker) {
329        var hiveExperiment = new HiveExperiment();
330        hiveExperiment.Name = "HiveEngine Run " + hiveExperiments.Count;
331        hiveExperiment.DateCreated = DateTime.Now;
332        hiveExperiment.ResourceNames = this.ResourceNames;
333        hiveExperiment.IsPrivileged = this.IsPrivileged;
334        var refreshableHiveExperiment = new RefreshableHiveExperiment(hiveExperiment);
335        refreshableHiveExperiment.IsControllable = false;
336        hiveExperiments.Add(refreshableHiveExperiment);
337        return refreshableHiveExperiment;
338      }
339    }
340
341    private void DisposeHiveExperiment(RefreshableHiveExperiment refreshableHiveExperiment) {
342      refreshableHiveExperiment.RefreshAutomatically = false;
343      DeleteHiveExperiment(refreshableHiveExperiment.HiveExperiment.Id);
344      ClearData(refreshableHiveExperiment);
345    }
346
347    private void ClearData(RefreshableHiveExperiment refreshableHiveExperiment) {
348      var jobs = refreshableHiveExperiment.HiveExperiment.GetAllHiveJobs();
349      foreach (var job in jobs) {
350        job.ClearData();
351      }
352    }
353
354    private void DeleteHiveExperiment(Guid hiveExperimentId) {
355      HiveClient.TryAndRepeat(() => {
356        ServiceLocator.Instance.CallHiveService(s => s.DeleteHiveExperiment(hiveExperimentId));
357      }, 5, string.Format("Could not delete jobs"));
358    }
359
360    private List<Guid> GetResourceIds() {
361      return ServiceLocator.Instance.CallHiveService(service => {
362        var resourceNames = ResourceNames.Split(';');
363        var resourceIds = new List<Guid>();
364        foreach (var resourceName in resourceNames) {
365          Guid resourceId = service.GetResourceId(resourceName);
366          if (resourceId == Guid.Empty) {
367            throw new ResourceNotFoundException(string.Format("Could not find the resource '{0}'", resourceName));
368          }
369          resourceIds.Add(resourceId);
370        }
371        return resourceIds;
372      });
373    }
374
375    /// <summary>
376    /// Threadsafe message logging
377    /// </summary>
378    private void LogMessage(string message) {
379      lock (logLocker) {
380        Log.LogMessage(message);
381      }
382    }
383
384    /// <summary>
385    /// Threadsafe exception logging
386    /// </summary>
387    private void LogException(Exception exception) {
388      lock (logLocker) {
389        Log.LogException(exception);
390      }
391    }
392
393    // testfunction:
394    //private IScope[] ExecuteLocally(EngineJob[] jobs, IScope parentScopeClone, CancellationToken cancellationToken) {
395    //  IScope[] scopes = new Scope[jobs.Length];
396    //  for (int i = 0; i < jobs.Length; i++) {
397    //    var serialized = PersistenceUtil.Serialize(jobs[i]);
398    //    var deserialized = PersistenceUtil.Deserialize<IJob>(serialized);
399    //    deserialized.Start();
400    //    while (deserialized.ExecutionState != ExecutionState.Stopped) {
401    //      Thread.Sleep(100);
402    //    }
403    //    var serialized2 = PersistenceUtil.Serialize(deserialized);
404    //    var deserialized2 = PersistenceUtil.Deserialize<EngineJob>(serialized2);
405    //    var newScope = ((IAtomicOperation)deserialized2.InitialOperation).Scope;
406    //    scopes[i] = newScope;
407    //  }
408    //  return scopes;
409    //}
410  }
411}
Note: See TracBrowser for help on using the repository browser.