[5134] | 1 | using System;
|
---|
| 2 | using System.Collections.Generic;
|
---|
| 3 | using System.Linq;
|
---|
| 4 | using System.Text;
|
---|
| 5 | using HeuristicLab.Persistence.Default.CompositeSerializers.Storable;
|
---|
| 6 | using HeuristicLab.Core;
|
---|
| 7 | using HeuristicLab.Common;
|
---|
[5136] | 8 | using HeuristicLab.Hive.Contracts.Interfaces;
|
---|
| 9 | using HeuristicLab.Clients.Common;
|
---|
| 10 | using HeuristicLab.Hive.ExperimentManager;
|
---|
| 11 | using HeuristicLab.Hive.Contracts.BusinessObjects;
|
---|
| 12 | using HeuristicLab.PluginInfrastructure;
|
---|
| 13 | using HeuristicLab.Hive.Contracts.ResponseObjects;
|
---|
| 14 | using System.Threading;
|
---|
[5153] | 15 | using HeuristicLab.Random;
|
---|
[5213] | 16 | using System.Threading.Tasks;
|
---|
[5134] | 17 |
|
---|
[5136] | 18 | namespace HeuristicLab.HiveEngine {
|
---|
[5134] | 19 | /// <summary>
|
---|
| 20 | /// Represents an engine that executes operations which can be executed in parallel on the hive
|
---|
| 21 | /// </summary>
|
---|
| 22 | [StorableClass]
|
---|
[5153] | 23 | [Item("Hive Engine", "Engine for parallel execution on the hive. You need enable `Parallel` for at least one operator in your operator graph to have all childoperations parallelized. Also those childoperations must not have sideeffects on a higher scope.")]
|
---|
[5134] | 24 | public class HiveEngine : Engine {
|
---|
[5228] | 25 | private Semaphore maxConcurrentConnections = new Semaphore(4, 4); // avoid too many connections
|
---|
[5263] | 26 | private Semaphore maxSerializedJobsInMemory = new Semaphore(4, 4); // avoid memory problems
|
---|
[5213] | 27 | private CancellationToken cancellationToken;
|
---|
| 28 |
|
---|
[5153] | 29 | [Storable]
|
---|
[5134] | 30 | private IOperator currentOperator;
|
---|
| 31 |
|
---|
[5153] | 32 | [Storable]
|
---|
| 33 | public string ResourceIds { get; set; }
|
---|
| 34 |
|
---|
[5282] | 35 | [Storable]
|
---|
| 36 | private int priority;
|
---|
| 37 | public int Priority {
|
---|
| 38 | get { return priority; }
|
---|
| 39 | set { priority = value; }
|
---|
| 40 | }
|
---|
| 41 |
|
---|
[5134] | 42 | #region constructors and cloning
|
---|
[5153] | 43 | public HiveEngine() {
|
---|
| 44 | ResourceIds = "HEAL";
|
---|
| 45 | }
|
---|
[5134] | 46 | [StorableConstructor]
|
---|
| 47 | protected HiveEngine(bool deserializing) : base(deserializing) { }
|
---|
[5136] | 48 | protected HiveEngine(HiveEngine original, Cloner cloner)
|
---|
| 49 | : base(original, cloner) {
|
---|
[5153] | 50 | this.ResourceIds = original.ResourceIds;
|
---|
| 51 | this.currentOperator = cloner.Clone(original.currentOperator);
|
---|
[5282] | 52 | this.priority = original.priority;
|
---|
[5134] | 53 | }
|
---|
| 54 | public override IDeepCloneable Clone(Cloner cloner) {
|
---|
| 55 | return new HiveEngine(this, cloner);
|
---|
| 56 | }
|
---|
| 57 | #endregion
|
---|
| 58 |
|
---|
[5213] | 59 | protected override void Run(CancellationToken cancellationToken) {
|
---|
| 60 | this.cancellationToken = cancellationToken;
|
---|
| 61 | Run(ExecutionStack);
|
---|
| 62 | }
|
---|
[5134] | 63 |
|
---|
[5213] | 64 | private void Run(object state) {
|
---|
| 65 | Stack<IOperation> executionStack = (Stack<IOperation>)state;
|
---|
| 66 | IOperation next;
|
---|
| 67 | OperationCollection coll;
|
---|
| 68 | IAtomicOperation operation;
|
---|
[5329] | 69 | TaskScheduler.UnobservedTaskException += new EventHandler<UnobservedTaskExceptionEventArgs>(TaskScheduler_UnobservedTaskException);
|
---|
[5153] | 70 |
|
---|
[5213] | 71 | while (ExecutionStack.Count > 0) {
|
---|
| 72 | cancellationToken.ThrowIfCancellationRequested();
|
---|
[5153] | 73 |
|
---|
[5213] | 74 | next = ExecutionStack.Pop();
|
---|
| 75 | if (next is OperationCollection) {
|
---|
| 76 | coll = (OperationCollection)next;
|
---|
| 77 | if (coll.Parallel) {
|
---|
[5268] | 78 | // clone the parent scope here and reuse it for each operation. otherwise for each job the whole scope-tree first needs to be copied and then cleaned, which causes a lot of work for the Garbage Collector
|
---|
| 79 | IScope parentScopeClone = (IScope)((IAtomicOperation)coll.First()).Scope.Parent.Clone();
|
---|
| 80 | parentScopeClone.SubScopes.Clear();
|
---|
| 81 | parentScopeClone.ClearParentScopes();
|
---|
| 82 |
|
---|
[5263] | 83 | OperationJob[] jobs = new OperationJob[coll.Count];
|
---|
| 84 | for (int i = 0; i < coll.Count; i++) {
|
---|
| 85 | jobs[i] = new OperationJob(coll[i]);
|
---|
[5153] | 86 | }
|
---|
[5213] | 87 |
|
---|
[5268] | 88 | IScope[] scopes = ExecuteOnHive(jobs, parentScopeClone, cancellationToken);
|
---|
[5213] | 89 |
|
---|
[5263] | 90 | for (int i = 0; i < coll.Count; i++) {
|
---|
| 91 | if (coll[i] is IAtomicOperation) {
|
---|
| 92 | ExchangeScope(scopes[i], ((IAtomicOperation)coll[i]).Scope);
|
---|
| 93 | } else if (coll[i] is OperationCollection) {
|
---|
[5228] | 94 | // todo ??
|
---|
[5213] | 95 | }
|
---|
| 96 | }
|
---|
| 97 | } else {
|
---|
| 98 | for (int i = coll.Count - 1; i >= 0; i--)
|
---|
| 99 | if (coll[i] != null) executionStack.Push(coll[i]);
|
---|
[5153] | 100 | }
|
---|
[5213] | 101 | } else if (next is IAtomicOperation) {
|
---|
| 102 | operation = (IAtomicOperation)next;
|
---|
| 103 | try {
|
---|
| 104 | next = operation.Operator.Execute((IExecutionContext)operation, cancellationToken);
|
---|
| 105 | }
|
---|
| 106 | catch (Exception ex) {
|
---|
| 107 | ExecutionStack.Push(operation);
|
---|
| 108 | if (ex is OperationCanceledException) throw ex;
|
---|
| 109 | else throw new OperatorExecutionException(operation.Operator, ex);
|
---|
| 110 | }
|
---|
| 111 | if (next != null) ExecutionStack.Push(next);
|
---|
[5134] | 112 |
|
---|
[5213] | 113 | if (operation.Operator.Breakpoint) {
|
---|
[5227] | 114 | LogMessage(string.Format("Breakpoint: {0}", operation.Operator.Name != string.Empty ? operation.Operator.Name : operation.Operator.ItemName));
|
---|
[5213] | 115 | Pause();
|
---|
| 116 | }
|
---|
[5134] | 117 | }
|
---|
| 118 | }
|
---|
| 119 | }
|
---|
| 120 |
|
---|
[5329] | 121 | private void TaskScheduler_UnobservedTaskException(object sender, UnobservedTaskExceptionEventArgs e) {
|
---|
| 122 | e.SetObserved(); // avoid crash of process
|
---|
| 123 | }
|
---|
| 124 |
|
---|
[5153] | 125 | private IRandom FindRandomParameter(IExecutionContext ec) {
|
---|
| 126 | try {
|
---|
| 127 | if (ec == null)
|
---|
| 128 | return null;
|
---|
| 129 |
|
---|
| 130 | foreach (var p in ec.Parameters) {
|
---|
| 131 | if (p.Name == "Random" && p is IValueParameter)
|
---|
| 132 | return ((IValueParameter)p).Value as IRandom;
|
---|
[5136] | 133 | }
|
---|
[5153] | 134 | return FindRandomParameter(ec.Parent);
|
---|
[5136] | 135 | }
|
---|
[5153] | 136 | catch { return null; }
|
---|
[5136] | 137 | }
|
---|
| 138 |
|
---|
[5153] | 139 | private static void ReIntegrateScope(IAtomicOperation source, IAtomicOperation target) {
|
---|
| 140 | ExchangeScope(source.Scope, target.Scope);
|
---|
[5136] | 141 | }
|
---|
| 142 |
|
---|
[5153] | 143 | private static void ExchangeScope(IScope source, IScope target) {
|
---|
| 144 | target.Variables.Clear();
|
---|
| 145 | target.Variables.AddRange(source.Variables);
|
---|
| 146 | target.SubScopes.Clear();
|
---|
| 147 | target.SubScopes.AddRange(source.SubScopes);
|
---|
| 148 | // TODO: validate if parent scopes match - otherwise source is invalid
|
---|
[5136] | 149 | }
|
---|
| 150 |
|
---|
[5153] | 151 | /// <summary>
|
---|
| 152 | /// This method blocks until all jobs are finished
|
---|
[5232] | 153 | /// TODO: Cancelation needs to be refined; all tasks currently stay in Semaphore.WaitOne after cancelation
|
---|
[5153] | 154 | /// </summary>
|
---|
[5263] | 155 | /// <param name="jobs"></param>
|
---|
[5268] | 156 | private IScope[] ExecuteOnHive(OperationJob[] jobs, IScope parentScopeClone, CancellationToken cancellationToken) {
|
---|
[5263] | 157 | LogMessage(string.Format("Executing {0} operations on the hive.", jobs.Length));
|
---|
| 158 | IScope[] scopes = new Scope[jobs.Length];
|
---|
| 159 | object locker = new object();
|
---|
[5228] | 160 |
|
---|
[5213] | 161 | try {
|
---|
[5263] | 162 | IDictionary<Guid, int> jobIndices = new Dictionary<Guid, int>();
|
---|
[5227] | 163 | List<Guid> remainingJobIds = new List<Guid>();
|
---|
[5213] | 164 | JobResultList results;
|
---|
[5227] | 165 | var pluginsNeeded = ApplicationManager.Manager.Plugins.Select(x => new HivePluginInfoDto { Name = x.Name, Version = x.Version }).ToList();
|
---|
[5228] | 166 | int finishedCount = 0;
|
---|
[5232] | 167 | int uploadCount = 0;
|
---|
[5136] | 168 |
|
---|
[5228] | 169 | // create upload-tasks
|
---|
| 170 | var uploadTasks = new List<Task<JobDto>>();
|
---|
[5263] | 171 | for (int i = 0; i < jobs.Length; i++) {
|
---|
| 172 | var job = jobs[i];
|
---|
| 173 |
|
---|
[5227] | 174 | // shuffle random variable to avoid the same random sequence in each operation; todo: does not yet work (it cannot find the random variable)
|
---|
[5263] | 175 | IRandom random = FindRandomParameter(job.Operation as IExecutionContext);
|
---|
[5227] | 176 | if (random != null)
|
---|
| 177 | random.Reset(random.Next());
|
---|
| 178 |
|
---|
[5228] | 179 | uploadTasks.Add(Task.Factory.StartNew<JobDto>((keyValuePairObj) => {
|
---|
[5268] | 180 | return UploadJob(pluginsNeeded, keyValuePairObj, parentScopeClone, cancellationToken);
|
---|
[5263] | 181 | }, new KeyValuePair<int, OperationJob>(i, job), cancellationToken));
|
---|
[5136] | 182 | }
|
---|
| 183 |
|
---|
[5232] | 184 | Task processUploadedJobsTask = new Task(() => {
|
---|
[5228] | 185 | // process finished upload-tasks
|
---|
| 186 | int uploadTasksCount = uploadTasks.Count;
|
---|
| 187 | for (int i = 0; i < uploadTasksCount; i++) {
|
---|
[5232] | 188 | cancellationToken.ThrowIfCancellationRequested();
|
---|
| 189 |
|
---|
[5228] | 190 | var uploadTasksArray = uploadTasks.ToArray();
|
---|
| 191 | var task = uploadTasksArray[Task.WaitAny(uploadTasksArray)];
|
---|
| 192 | if (task.Status == TaskStatus.Faulted) {
|
---|
| 193 | LogException(task.Exception);
|
---|
| 194 | throw task.Exception;
|
---|
| 195 | }
|
---|
[5227] | 196 |
|
---|
[5263] | 197 | int key = ((KeyValuePair<int, OperationJob>)task.AsyncState).Key;
|
---|
[5228] | 198 | JobDto jobDto = task.Result;
|
---|
[5232] | 199 | lock (locker) {
|
---|
| 200 | uploadCount++;
|
---|
[5263] | 201 | jobIndices.Add(jobDto.Id, key);
|
---|
[5232] | 202 | remainingJobIds.Add(jobDto.Id);
|
---|
| 203 | }
|
---|
[5263] | 204 | jobs[key] = null; // relax memory
|
---|
| 205 | LogMessage(string.Format("Submitted job #{0}", key + 1, jobDto.Id));
|
---|
[5228] | 206 | uploadTasks.Remove(task);
|
---|
| 207 | }
|
---|
[5232] | 208 | }, cancellationToken, TaskCreationOptions.PreferFairness);
|
---|
| 209 | processUploadedJobsTask.Start();
|
---|
[5228] | 210 |
|
---|
| 211 | // poll job-statuses and create tasks for those which are finished
|
---|
| 212 | var downloadTasks = new List<Task<OperationJob>>();
|
---|
| 213 | var executionTimes = new List<TimeSpan>();
|
---|
| 214 | while (processUploadedJobsTask.Status != TaskStatus.RanToCompletion || remainingJobIds.Count > 0) {
|
---|
[5232] | 215 | cancellationToken.ThrowIfCancellationRequested();
|
---|
| 216 |
|
---|
[5228] | 217 | Thread.Sleep(10000);
|
---|
[5268] | 218 | try {
|
---|
| 219 | using (Disposable<IClientFacade> service = ServiceLocator.Instance.ClientFacadePool.GetService()) {
|
---|
| 220 | results = service.Obj.GetJobResults(remainingJobIds).Obj;
|
---|
[5227] | 221 | }
|
---|
[5268] | 222 | var jobsFinished = results.Where(j => j.State == JobState.Finished || j.State == JobState.Failed || j.State == JobState.Aborted);
|
---|
| 223 | finishedCount += jobsFinished.Count();
|
---|
| 224 | var totalExecutionTime = TimeSpan.FromMilliseconds(results.Select(j => j.ExecutionTime).Union(executionTimes).Select(e => e.TotalMilliseconds).Sum());
|
---|
| 225 | LogMessage(string.Format("Results polled. Jobs finished: {0}/{1}, TotalExecutionTime: {2}", finishedCount, jobs.Length, totalExecutionTime));
|
---|
| 226 | foreach (var result in jobsFinished) {
|
---|
| 227 | if (result.State == JobState.Finished) {
|
---|
| 228 | downloadTasks.Add(Task.Factory.StartNew<OperationJob>((jobIdObj) => {
|
---|
| 229 | return DownloadJob(jobIndices, jobIdObj, cancellationToken);
|
---|
| 230 | }, result.Id, cancellationToken));
|
---|
| 231 | } else if (result.State == JobState.Aborted) {
|
---|
| 232 | LogMessage(string.Format("Job #{0} aborted (id: {1})", jobIndices[result.Id] + 1, result.Id));
|
---|
| 233 | } else if (result.State == JobState.Failed) {
|
---|
| 234 | LogMessage(string.Format("Job {0} failed (id: {1}): {2}", jobIndices[result.Id] + 1, result.Id, result.Exception));
|
---|
| 235 | }
|
---|
| 236 | remainingJobIds.Remove(result.Id);
|
---|
| 237 | executionTimes.Add(result.ExecutionTime);
|
---|
| 238 | }
|
---|
[5213] | 239 | }
|
---|
[5268] | 240 | catch (Exception e) {
|
---|
| 241 | LogException(e);
|
---|
| 242 | }
|
---|
[5136] | 243 | }
|
---|
| 244 |
|
---|
[5228] | 245 | // process finished download-tasks
|
---|
| 246 | int downloadTasksCount = downloadTasks.Count;
|
---|
| 247 | for (int i = 0; i < downloadTasksCount; i++) {
|
---|
[5232] | 248 | cancellationToken.ThrowIfCancellationRequested();
|
---|
| 249 |
|
---|
[5228] | 250 | var downloadTasksArray = downloadTasks.ToArray();
|
---|
| 251 | var task = downloadTasksArray[Task.WaitAny(downloadTasksArray)];
|
---|
| 252 | var jobId = (Guid)task.AsyncState;
|
---|
| 253 | if (task.Status == TaskStatus.Faulted) {
|
---|
| 254 | LogException(task.Exception);
|
---|
| 255 | throw task.Exception;
|
---|
| 256 | }
|
---|
[5263] | 257 | scopes[jobIndices[(Guid)task.AsyncState]] = ((IAtomicOperation)task.Result.Operation).Scope;
|
---|
[5228] | 258 | downloadTasks.Remove(task);
|
---|
[5227] | 259 | }
|
---|
| 260 |
|
---|
[5228] | 261 | LogMessage(string.Format("All jobs finished (TotalExecutionTime: {0}). Deleting jobs on hive.", TimeSpan.FromMilliseconds(executionTimes.Select(e => e.TotalMilliseconds).Sum())));
|
---|
[5213] | 262 | // delete jobs
|
---|
| 263 | using (Disposable<IClientFacade> service = ServiceLocator.Instance.ClientFacadePool.GetService()) {
|
---|
[5263] | 264 | foreach (Guid jobId in jobIndices.Keys) {
|
---|
[5213] | 265 | service.Obj.DeleteJob(jobId);
|
---|
| 266 | }
|
---|
[5153] | 267 | }
|
---|
[5213] | 268 |
|
---|
[5263] | 269 | LogMessage(string.Format("Operations on the hive finished.", jobs.Length));
|
---|
| 270 | return scopes;
|
---|
[5153] | 271 | }
|
---|
[5213] | 272 | catch (Exception e) {
|
---|
[5227] | 273 | LogException(e);
|
---|
[5213] | 274 | throw e;
|
---|
| 275 | }
|
---|
[5136] | 276 | }
|
---|
[5227] | 277 |
|
---|
[5263] | 278 | private static object locker = new object();
|
---|
[5268] | 279 | private JobDto UploadJob(List<HivePluginInfoDto> pluginsNeeded, object keyValuePairObj, IScope parentScopeClone, CancellationToken cancellationToken) {
|
---|
[5263] | 280 | var keyValuePair = (KeyValuePair<int, OperationJob>)keyValuePairObj;
|
---|
[5228] | 281 | var groups = ResourceIds.Split(';');
|
---|
[5329] | 282 | ResponseObject<JobDto> response = null;
|
---|
| 283 | try {
|
---|
| 284 | maxSerializedJobsInMemory.WaitOne();
|
---|
| 285 | SerializedJob serializedJob = null;
|
---|
| 286 | while (serializedJob == null) { // repeat until success; rare race-conditions occur at serializations (enumeration was changed-exceptions); maybe this is because all the parent-scopes and execution-contexts at some point contain the hiveengine and the Log in here
|
---|
| 287 | cancellationToken.ThrowIfCancellationRequested();
|
---|
| 288 | try {
|
---|
| 289 | lock (Log) {
|
---|
| 290 | serializedJob = new SerializedJob();
|
---|
| 291 | }
|
---|
[5232] | 292 | }
|
---|
[5329] | 293 | catch (Exception e) {
|
---|
| 294 | LogException(e);
|
---|
| 295 | }
|
---|
[5232] | 296 | }
|
---|
[5329] | 297 | // clone operation and remove unnecessary scopes; don't do this earlier to avoid memory problems
|
---|
| 298 | lock (locker) {
|
---|
| 299 | ((IAtomicOperation)keyValuePair.Value.Operation).Scope.Parent = parentScopeClone;
|
---|
| 300 | keyValuePair.Value.Operation = (IOperation)keyValuePair.Value.Operation.Clone();
|
---|
| 301 | if (keyValuePair.Value.Operation is IAtomicOperation)
|
---|
| 302 | ((IAtomicOperation)keyValuePair.Value.Operation).Scope.ClearParentScopes();
|
---|
| 303 | serializedJob.SerializedJobData = SerializedJob.Serialize(keyValuePair.Value);
|
---|
[5232] | 304 | }
|
---|
[5329] | 305 | serializedJob.JobInfo = new JobDto();
|
---|
| 306 | serializedJob.JobInfo.State = JobState.Offline;
|
---|
| 307 | serializedJob.JobInfo.CoresNeeded = 1;
|
---|
| 308 | serializedJob.JobInfo.PluginsNeeded = pluginsNeeded;
|
---|
| 309 | serializedJob.JobInfo.Priority = priority;
|
---|
[5232] | 310 | try {
|
---|
[5329] | 311 | maxConcurrentConnections.WaitOne();
|
---|
| 312 | while (response == null) { // repeat until success
|
---|
| 313 | cancellationToken.ThrowIfCancellationRequested();
|
---|
| 314 | try {
|
---|
| 315 | using (Disposable<IClientFacade> service = ServiceLocator.Instance.StreamedClientFacadePool.GetService()) {
|
---|
| 316 | response = service.Obj.AddJobWithGroupStrings(serializedJob, groups);
|
---|
| 317 | serializedJob = null;
|
---|
| 318 | }
|
---|
| 319 | }
|
---|
| 320 | catch (Exception e) {
|
---|
| 321 | LogException(e);
|
---|
| 322 | }
|
---|
[5232] | 323 | }
|
---|
| 324 | }
|
---|
[5329] | 325 | finally {
|
---|
| 326 | maxSerializedJobsInMemory.Release();
|
---|
[5232] | 327 | }
|
---|
[5228] | 328 | }
|
---|
[5329] | 329 | finally {
|
---|
| 330 | maxConcurrentConnections.Release();
|
---|
| 331 | }
|
---|
[5228] | 332 | return response.Obj;
|
---|
| 333 | }
|
---|
| 334 |
|
---|
[5263] | 335 | private OperationJob DownloadJob(IDictionary<Guid, int> jobIndices, object jobIdObj, CancellationToken cancellationToken) {
|
---|
[5228] | 336 | Guid jobId = (Guid)jobIdObj;
|
---|
[5232] | 337 | SerializedJob serializedJob = null;
|
---|
[5329] | 338 | OperationJob operationJob = null;
|
---|
| 339 | try {
|
---|
| 340 | maxSerializedJobsInMemory.WaitOne();
|
---|
| 341 | maxConcurrentConnections.WaitOne();
|
---|
| 342 | while (serializedJob == null) { // repeat until success
|
---|
| 343 | cancellationToken.ThrowIfCancellationRequested();
|
---|
| 344 | try {
|
---|
| 345 | using (Disposable<IClientFacade> service = ServiceLocator.Instance.StreamedClientFacadePool.GetService()) {
|
---|
| 346 | serializedJob = service.Obj.GetLastSerializedResult(jobId).Obj;
|
---|
| 347 | }
|
---|
[5232] | 348 | }
|
---|
[5329] | 349 | catch (Exception e) {
|
---|
| 350 | LogException(e);
|
---|
| 351 | }
|
---|
[5232] | 352 | }
|
---|
[5329] | 353 | operationJob = SerializedJob.Deserialize<OperationJob>(serializedJob.SerializedJobData);
|
---|
| 354 | serializedJob = null;
|
---|
| 355 | LogMessage(string.Format("Downloaded job #{0}", jobIndices[jobId] + 1, jobId));
|
---|
[5227] | 356 | }
|
---|
[5329] | 357 | finally {
|
---|
| 358 | maxConcurrentConnections.Release();
|
---|
| 359 | maxSerializedJobsInMemory.Release();
|
---|
| 360 | }
|
---|
[5228] | 361 | return operationJob;
|
---|
[5227] | 362 | }
|
---|
| 363 |
|
---|
| 364 | /// <summary>
|
---|
| 365 | /// Threadsafe message logging
|
---|
| 366 | /// </summary>
|
---|
| 367 | private void LogMessage(string message) {
|
---|
| 368 | lock (Log) {
|
---|
| 369 | Log.LogMessage(message);
|
---|
| 370 | }
|
---|
| 371 | }
|
---|
| 372 |
|
---|
| 373 | /// <summary>
|
---|
| 374 | /// Threadsafe exception logging
|
---|
| 375 | /// </summary>
|
---|
| 376 | private void LogException(Exception exception) {
|
---|
| 377 | lock (Log) {
|
---|
| 378 | Log.LogException(exception);
|
---|
| 379 | }
|
---|
| 380 | }
|
---|
[5134] | 381 | }
|
---|
[5263] | 382 |
|
---|
| 383 | public static class ScopeExtensions {
|
---|
| 384 | public static void ClearParentScopes(this IScope scope) {
|
---|
| 385 | scope.ClearParentScopes(null);
|
---|
| 386 | }
|
---|
| 387 |
|
---|
| 388 | public static void ClearParentScopes(this IScope scope, IScope childScope) {
|
---|
| 389 | if (childScope != null) {
|
---|
| 390 | scope.SubScopes.Clear();
|
---|
| 391 | scope.SubScopes.Add(childScope);
|
---|
| 392 | }
|
---|
| 393 | if (scope.Parent != null)
|
---|
| 394 | scope.Parent.ClearParentScopes(scope);
|
---|
| 395 | }
|
---|
| 396 | }
|
---|
[5134] | 397 | }
|
---|