Free cookie consent management tool by TermsFeed Policy Generator

source: branches/HeuristicLab.Hive-3.4/sources/HeuristicLab.HiveEngine/3.4/HiveEngine.cs @ 6357

Last change on this file since 6357 was 6357, checked in by cneumuel, 13 years ago

#1233

  • refactoring of slave core
  • created JobManager, which is responsible for managing jobs without knowing anything about the service. this class is easier testable than slave core
  • lots of cleanup
  • created console test project for slave
File size: 16.4 KB
Line 
1#region License Information
2/* HeuristicLab
3 * Copyright (C) 2002-2011 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
4 *
5 * This file is part of HeuristicLab.
6 *
7 * HeuristicLab is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * HeuristicLab is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with HeuristicLab. If not, see <http://www.gnu.org/licenses/>.
19 */
20#endregion
21
22using System;
23using System.Collections.Generic;
24using System.Linq;
25using System.Threading;
26using System.Threading.Tasks;
27using HeuristicLab.Clients.Hive;
28using HeuristicLab.Common;
29using HeuristicLab.Core;
30using HeuristicLab.Persistence.Default.CompositeSerializers.Storable;
31
32namespace HeuristicLab.HiveEngine {
33  /// <summary>
34  /// Represents an engine that executes operations which can be executed in parallel on the hive
35  /// </summary>
36  [StorableClass]
37  [Item("Hive Engine", "Engine for parallel execution on the hive. You need enable `Parallel` for at least one operator in your operator graph to have all childoperations parallelized. Also those childoperations must not have sideeffects on a higher scope.")]
38  public class HiveEngine : Engine {
39    private static object locker = new object();
40    private static object logLocker = new object();
41    private CancellationToken cancellationToken;
42    private bool firstRun = true;
43
44    [Storable]
45    private IOperator currentOperator;
46
47    [Storable]
48    public string ResourceNames { get; set; }
49
50    [Storable]
51    private int priority;
52    public int Priority {
53      get { return priority; }
54      set { priority = value; }
55    }
56
57    [Storable]
58    private TimeSpan executionTimeOnHive;
59    public TimeSpan ExecutionTimeOnHive {
60      get { return executionTimeOnHive; }
61      set {
62        if (value != executionTimeOnHive) {
63          executionTimeOnHive = value;
64          OnExecutionTimeOnHiveChanged();
65        }
66      }
67    }
68
69    [Storable]
70    private bool useLocalPlugins;
71    public bool UseLocalPlugins {
72      get { return useLocalPlugins; }
73      set { useLocalPlugins = value; }
74    }
75
76    // [Storable] -> HiveExperiment can't be storable, so RefreshableHiveExperiment can't be stored
77    private ItemCollection<RefreshableHiveExperiment> hiveExperiments = new ItemCollection<RefreshableHiveExperiment>();
78    public ItemCollection<RefreshableHiveExperiment> HiveExperiments {
79      get { return hiveExperiments; }
80      set { hiveExperiments = value; }
81    }
82
83    private List<Plugin> onlinePlugins;
84    public List<Plugin> OnlinePlugins {
85      get { return onlinePlugins; }
86      set { onlinePlugins = value; }
87    }
88
89    private List<Plugin> alreadyUploadedPlugins;
90    public List<Plugin> AlreadyUploadedPlugins {
91      get { return alreadyUploadedPlugins; }
92      set { alreadyUploadedPlugins = value; }
93    }
94
95    #region constructors and cloning
96    public HiveEngine() {
97      ResourceNames = "HEAL";
98      Priority = 0;
99    }
100
101    [StorableConstructor]
102    protected HiveEngine(bool deserializing) : base(deserializing) { }
103    protected HiveEngine(HiveEngine original, Cloner cloner)
104      : base(original, cloner) {
105      this.ResourceNames = original.ResourceNames;
106      this.currentOperator = cloner.Clone(original.currentOperator);
107      this.priority = original.priority;
108      this.executionTimeOnHive = original.executionTimeOnHive;
109      this.useLocalPlugins = original.useLocalPlugins;
110      // this.hiveExperiments = cloner.Clone(original.hiveExperiments); do not clone hiveExperiments - otherwise they would be sent with every job
111    }
112    public override IDeepCloneable Clone(Cloner cloner) {
113      return new HiveEngine(this, cloner);
114    }
115    #endregion
116
117    #region Events
118    protected override void OnPrepared() {
119      base.OnPrepared();
120      this.ExecutionTimeOnHive = TimeSpan.Zero;
121    }
122
123    public event EventHandler ExecutionTimeOnHiveChanged;
124    protected virtual void OnExecutionTimeOnHiveChanged() {
125      var handler = ExecutionTimeOnHiveChanged;
126      if (handler != null) handler(this, EventArgs.Empty);
127    }
128    #endregion
129
130    protected override void Run(CancellationToken cancellationToken) {
131      this.cancellationToken = cancellationToken;
132      Run(ExecutionStack);
133    }
134
135    private void Run(object state) {
136      Stack<IOperation> executionStack = (Stack<IOperation>)state;
137      IOperation next;
138      OperationCollection coll;
139      IAtomicOperation operation;
140
141      if (firstRun) {
142        TaskScheduler.UnobservedTaskException += new EventHandler<UnobservedTaskExceptionEventArgs>(TaskScheduler_UnobservedTaskException);
143        this.OnlinePlugins = ServiceLocator.Instance.CallHiveService(s => s.GetPlugins()).Where(x => x.IsLocal == false).ToList();
144        this.AlreadyUploadedPlugins = new List<Plugin>();
145        firstRun = false;
146      }
147
148      while (executionStack.Count > 0) {
149        cancellationToken.ThrowIfCancellationRequested();
150
151        next = executionStack.Pop();
152        bool isOpCollection = next is OperationCollection;
153        int collCount = isOpCollection ? ((OperationCollection)next).Count : 0;
154        string opName = !isOpCollection ? ((IAtomicOperation)next).Operator.Name : "OpCollection";
155
156        if (next is OperationCollection) {
157          coll = (OperationCollection)next;
158
159          bool isPMOEvaluator = coll.Count > 0 && coll.First() is HeuristicLab.Core.ExecutionContext && ((HeuristicLab.Core.ExecutionContext)coll.First()).Operator.GetType().Name == "PMOEvaluator";
160          bool isAlgorithmEvaluator = coll.Count > 0 && coll.First() is HeuristicLab.Core.ExecutionContext && ((HeuristicLab.Core.ExecutionContext)coll.First()).Operator.GetType().Name == "AlgorithmEvaluator";
161
162          if (coll.Parallel && isPMOEvaluator) {
163            Task[] tasks = new Task[coll.Count];
164            Stack<IOperation>[] stacks = new Stack<IOperation>[coll.Count];
165            for (int i = 0; i < coll.Count; i++) {
166              stacks[i] = new Stack<IOperation>();
167              stacks[i].Push(coll[i]);
168              tasks[i] = Task.Factory.StartNew(Run, stacks[i], cancellationToken);
169            }
170            try {
171              Task.WaitAll(tasks);
172            }
173            catch (AggregateException ex) {
174              OperationCollection remaining = new OperationCollection() { Parallel = true };
175              for (int i = 0; i < stacks.Length; i++) {
176                if (stacks[i].Count == 1)
177                  remaining.Add(stacks[i].Pop());
178                if (stacks[i].Count > 1) {
179                  OperationCollection ops = new OperationCollection();
180                  while (stacks[i].Count > 0)
181                    ops.Add(stacks[i].Pop());
182                  remaining.Add(ops);
183                }
184              }
185              if (remaining.Count > 0) executionStack.Push(remaining);
186              throw ex;
187            }
188          } else if (coll.Parallel) {
189            // clone the parent scope here and reuse it for each operation. otherwise for each job the whole scope-tree first needs to be copied and then cleaned, which causes a lot of work for the Garbage Collector
190            IScope parentScopeClone = (IScope)((IAtomicOperation)coll.First()).Scope.Parent.Clone();
191            parentScopeClone.SubScopes.Clear();
192            parentScopeClone.ClearParentScopes();
193
194            EngineJob[] jobs = new EngineJob[coll.Count];
195            for (int i = 0; i < coll.Count; i++) {
196              jobs[i] = new EngineJob(coll[i], new SequentialEngine.SequentialEngine());
197            }
198
199            var experiment = CreateHiveExperiment();
200            IScope[] scopes = ExecuteOnHive(experiment, jobs, parentScopeClone, cancellationToken);
201            DisposeHiveExperiment(experiment);
202
203            for (int i = 0; i < coll.Count; i++) {
204              if (coll[i] is IAtomicOperation) {
205                ExchangeScope(scopes[i], ((IAtomicOperation)coll[i]).Scope);
206              } else if (coll[i] is OperationCollection) {
207                // todo ??
208              }
209            }
210          } else {
211            for (int i = coll.Count - 1; i >= 0; i--)
212              if (coll[i] != null) executionStack.Push(coll[i]);
213          }
214        } else if (next is IAtomicOperation) {
215          operation = (IAtomicOperation)next;
216          try {
217            next = operation.Operator.Execute((IExecutionContext)operation, cancellationToken);
218          }
219          catch (Exception ex) {
220            executionStack.Push(operation);
221            if (ex is OperationCanceledException) throw ex;
222            else throw new OperatorExecutionException(operation.Operator, ex);
223          }
224          if (next != null) executionStack.Push(next);
225
226          if (operation.Operator.Breakpoint) {
227            LogMessage(string.Format("Breakpoint: {0}", operation.Operator.Name != string.Empty ? operation.Operator.Name : operation.Operator.ItemName));
228            Pause();
229          }
230        }
231      }
232    }
233
234    private void TaskScheduler_UnobservedTaskException(object sender, UnobservedTaskExceptionEventArgs e) {
235      e.SetObserved(); // avoid crash of process
236    }
237
238    private IRandom FindRandomParameter(IExecutionContext ec) {
239      try {
240        if (ec == null)
241          return null;
242
243        foreach (var p in ec.Parameters) {
244          if (p.Name == "Random" && p is IValueParameter)
245            return ((IValueParameter)p).Value as IRandom;
246        }
247        return FindRandomParameter(ec.Parent);
248      }
249      catch { return null; }
250    }
251
252    private static void ReIntegrateScope(IAtomicOperation source, IAtomicOperation target) {
253      ExchangeScope(source.Scope, target.Scope);
254    }
255
256    private static void ExchangeScope(IScope source, IScope target) {
257      target.Variables.Clear();
258      target.Variables.AddRange(source.Variables);
259      target.SubScopes.Clear();
260      target.SubScopes.AddRange(source.SubScopes);
261      // TODO: validate if parent scopes match - otherwise source is invalid
262    }
263
264    /// <summary>
265    /// This method blocks until all jobs are finished
266    /// TODO: Cancelation needs to be refined; all tasks currently stay in Semaphore.WaitOne after cancelation
267    /// </summary>
268    /// <param name="jobs"></param>
269    private IScope[] ExecuteOnHive(RefreshableHiveExperiment refreshableHiveExperiment, EngineJob[] jobs, IScope parentScopeClone, CancellationToken cancellationToken) {
270      LogMessage(string.Format("Executing {0} operations on the hive.", jobs.Length));
271      IScope[] scopes = new Scope[jobs.Length];
272      object locker = new object();
273      IDictionary<Guid, int> jobIndices = new Dictionary<Guid, int>();
274      var hiveExperiment = refreshableHiveExperiment.HiveExperiment;
275
276      try {
277        List<Guid> remainingJobIds = new List<Guid>();
278
279        // create upload-tasks
280        var uploadTasks = new List<Task<Job>>();
281        for (int i = 0; i < jobs.Length; i++) {
282          hiveExperiment.HiveJobs.Add(new EngineHiveJob(jobs[i], parentScopeClone));
283
284          // shuffle random variable to avoid the same random sequence in each operation; todo: does not yet work (it cannot find the random variable)
285          IRandom random = FindRandomParameter(jobs[i].InitialOperation as IExecutionContext);
286          if (random != null)
287            random.Reset(random.Next());
288        }
289        ExperimentManagerClient.StartExperiment((e) => {
290          LogException(e);
291        }, refreshableHiveExperiment);
292
293        // do polling until experiment is finished and all jobs are downloaded
294        while (!refreshableHiveExperiment.AllJobsFinished()) {
295          Thread.Sleep(500);
296          this.ExecutionTimeOnHive = TimeSpan.FromMilliseconds(hiveExperiments.Sum(x => x.HiveExperiment.ExecutionTime.TotalMilliseconds));
297          cancellationToken.ThrowIfCancellationRequested();
298        }
299        LogMessage(string.Format("{0} finished (TotalExecutionTime: {1}).", refreshableHiveExperiment.ToString(), refreshableHiveExperiment.HiveExperiment.ExecutionTime));
300
301        // get scopes
302        int j = 0;
303        foreach (var hiveJob in hiveExperiment.HiveJobs) {
304          if (hiveJob.Job.State != JobState.Finished)
305            throw new HiveEngineException("Job failed: " + hiveJob.Job.StateLog.Last().Exception);
306
307          var scope = ((IAtomicOperation)((EngineJob)hiveJob.ItemJob).InitialOperation).Scope;
308          scopes[j++] = scope;
309        }
310        return scopes;
311      }
312      catch (OperationCanceledException e) {
313        lock (locker) {
314          if (jobIndices != null) DeleteHiveExperiment(hiveExperiment.Id);
315        }
316        throw e;
317      }
318      catch (Exception e) {
319        lock (locker) {
320          if (jobIndices != null) DeleteHiveExperiment(hiveExperiment.Id);
321        }
322        LogException(e);
323        throw e;
324      }
325    }
326
327    private RefreshableHiveExperiment CreateHiveExperiment() {
328      lock (locker) {
329        var hiveExperiment = new HiveExperiment();
330        hiveExperiment.Name = "HiveEngine Run " + hiveExperiments.Count;
331        hiveExperiment.DateCreated = DateTime.Now;
332        hiveExperiment.UseLocalPlugins = this.UseLocalPlugins;
333        hiveExperiment.ResourceNames = this.ResourceNames;
334        var refreshableHiveExperiment = new RefreshableHiveExperiment(hiveExperiment);
335        refreshableHiveExperiment.IsControllable = false;
336        hiveExperiments.Add(refreshableHiveExperiment);
337        return refreshableHiveExperiment;
338      }
339    }
340
341    private void DisposeHiveExperiment(RefreshableHiveExperiment refreshableHiveExperiment) {
342      refreshableHiveExperiment.RefreshAutomatically = false;
343      DeleteHiveExperiment(refreshableHiveExperiment.HiveExperiment.Id);
344      ClearData(refreshableHiveExperiment);
345    }
346
347    private void ClearData(RefreshableHiveExperiment refreshableHiveExperiment) {
348      var jobs = refreshableHiveExperiment.HiveExperiment.GetAllHiveJobs();
349      foreach (var job in jobs) {
350        job.ClearData();
351      }
352    }
353
354    private void DeleteHiveExperiment(Guid hiveExperimentId) {
355      ExperimentManagerClient.TryAndRepeat(() => {
356        ServiceLocator.Instance.CallHiveService(s => s.DeleteHiveExperiment(hiveExperimentId));
357      }, 5, string.Format("Could not delete jobs"));
358    }
359
360    private List<Guid> GetResourceIds() {
361      return ServiceLocator.Instance.CallHiveService(service => {
362        var resourceNames = ResourceNames.Split(';');
363        var resourceIds = new List<Guid>();
364        foreach (var resourceName in resourceNames) {
365          Guid resourceId = service.GetResourceId(resourceName);
366          if (resourceId == Guid.Empty) {
367            throw new ResourceNotFoundException(string.Format("Could not find the resource '{0}'", resourceName));
368          }
369          resourceIds.Add(resourceId);
370        }
371        return resourceIds;
372      });
373    }
374
375    /// <summary>
376    /// Threadsafe message logging
377    /// </summary>
378    private void LogMessage(string message) {
379      lock (logLocker) {
380        Log.LogMessage(message);
381      }
382    }
383
384    /// <summary>
385    /// Threadsafe exception logging
386    /// </summary>
387    private void LogException(Exception exception) {
388      lock (logLocker) {
389        Log.LogException(exception);
390      }
391    }
392
393    // testfunction:
394    //private IScope[] ExecuteLocally(EngineJob[] jobs, IScope parentScopeClone, CancellationToken cancellationToken) {
395    //  IScope[] scopes = new Scope[jobs.Length];
396    //  for (int i = 0; i < jobs.Length; i++) {
397    //    var serialized = PersistenceUtil.Serialize(jobs[i]);
398    //    var deserialized = PersistenceUtil.Deserialize<IJob>(serialized);
399    //    deserialized.Start();
400    //    while (deserialized.ExecutionState != ExecutionState.Stopped) {
401    //      Thread.Sleep(100);
402    //    }
403    //    var serialized2 = PersistenceUtil.Serialize(deserialized);
404    //    var deserialized2 = PersistenceUtil.Deserialize<EngineJob>(serialized2);
405    //    var newScope = ((IAtomicOperation)deserialized2.InitialOperation).Scope;
406    //    scopes[i] = newScope;
407    //  }
408    //  return scopes;
409    //}
410  }
411}
Note: See TracBrowser for help on using the repository browser.