Free cookie consent management tool by TermsFeed Policy Generator

source: branches/3.1/sources/HeuristicLab.Grid/JobManager.cs @ 6253

Last change on this file since 6253 was 520, checked in by gkronber, 16 years ago

fixed a small bug in the JobManager. The results gathering thread slept 5 seconds before downloading each result. This is bad if it has to collect 100 results.

File size: 10.4 KB
Line 
1#region License Information
2/* HeuristicLab
3 * Copyright (C) 2002-2008 Heuristic and Evolutionary Algorithms Laboratory (HEAL)
4 *
5 * This file is part of HeuristicLab.
6 *
7 * HeuristicLab is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation, either version 3 of the License, or
10 * (at your option) any later version.
11 *
12 * HeuristicLab is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with HeuristicLab. If not, see <http://www.gnu.org/licenses/>.
19 */
20#endregion
21
22using System;
23using System.Collections.Generic;
24using System.Linq;
25using System.Text;
26using System.ServiceModel;
27using HeuristicLab.Grid;
28using System.Threading;
29using HeuristicLab.Core;
30using System.IO;
31using System.Windows.Forms;
32using System.Diagnostics;
33
34namespace HeuristicLab.Grid {
35  public class JobExecutionException : ApplicationException {
36    public JobExecutionException(string msg) : base(msg) { }
37  }
38
39  public class JobManager {
40    private const int MAX_RESTARTS = 5;
41    private const int MAX_CONNECTION_RETRIES = 10;
42    private const int RETRY_TIMEOUT_SEC = 60;
43    private const int RESULT_POLLING_TIMEOUT = 5;
44
45    private class Job {
46      public Guid guid;
47      public ProcessingEngine engine;
48      public ManualResetEvent waitHandle;
49      public int restarts;
50    }
51
52    private IGridServer server;
53    private string address;
54    private object waitingQueueLock = new object();
55    private Queue<Job> waitingJobs = new Queue<Job>();
56    private object runningQueueLock = new object();
57    private Queue<Job> runningJobs = new Queue<Job>();
58    private Dictionary<AtomicOperation, byte[]> results = new Dictionary<AtomicOperation, byte[]>();
59
60    private List<IOperation> erroredOperations = new List<IOperation>();
61    private object connectionLock = new object();
62    private object dictionaryLock = new object();
63
64    private AutoResetEvent runningWaitHandle = new AutoResetEvent(false);
65    private AutoResetEvent waitingWaitHandle = new AutoResetEvent(false);
66
67    private ChannelFactory<IGridServer> factory;
68
69    public JobManager(string address) {
70      Trace.Listeners.Clear();
71      Trace.Listeners.Add(new EventLogTraceListener("HeuristicLab.Grid"));
72      this.address = address;
73      Thread starterThread = new Thread(StartEngines);
74      Thread resultsGatheringThread = new Thread(GetResults);
75      starterThread.Start();
76      resultsGatheringThread.Start();
77    }
78
79    public void Reset() {
80      ResetConnection();
81      lock(dictionaryLock) {
82        foreach(Job j in waitingJobs) {
83          j.waitHandle.Close();
84        }
85        waitingJobs.Clear();
86        foreach(Job j in runningJobs) {
87          j.waitHandle.Close();
88        }
89        runningJobs.Clear();
90        results.Clear();
91        erroredOperations.Clear();
92      }
93    }
94
95    private void ResetConnection() {
96      Trace.TraceInformation("Reset connection in JobManager");
97      lock(connectionLock) {
98        // open a new channel
99        NetTcpBinding binding = new NetTcpBinding();
100        binding.MaxReceivedMessageSize = 100000000; // 100Mbytes
101        binding.ReaderQuotas.MaxStringContentLength = 100000000; // also 100M chars
102        binding.ReaderQuotas.MaxArrayLength = 100000000; // also 100M elements;
103        binding.Security.Mode = SecurityMode.None;
104
105        factory = new ChannelFactory<IGridServer>(binding);
106        server = factory.CreateChannel(new EndpointAddress(address));
107      }
108    }
109
110    public void StartEngines() {
111      try {
112        while(true) {
113          Job job = null;
114          lock(waitingQueueLock) {
115            if(waitingJobs.Count > 0) job = waitingJobs.Dequeue();
116          }
117          if(job==null) waitingWaitHandle.WaitOne(); // no jobs waiting
118          else {
119            Guid currentEngineGuid = TryStartExecuteEngine(job.engine);
120            if(currentEngineGuid == Guid.Empty) {
121              // couldn't start the job -> requeue
122              if(job.restarts < MAX_RESTARTS) {
123                job.restarts++;
124                lock(waitingQueueLock) waitingJobs.Enqueue(job);
125                waitingWaitHandle.Set();
126              } else {
127                // max restart count reached -> give up on this job and flag error
128                lock(dictionaryLock) {
129                  erroredOperations.Add(job.engine.InitialOperation);
130                  job.waitHandle.Set();
131                }
132              }
133            } else {
134              // job started successfully
135              job.guid = currentEngineGuid;
136              lock(runningQueueLock) {
137                runningJobs.Enqueue(job);
138                runningWaitHandle.Set();
139              }
140            }
141          }
142        }
143      } catch(Exception e) {
144        Trace.TraceError("Exception "+e+" in JobManager.StartEngines() killed the start-engine thread\n"+e.StackTrace);
145      }
146    }
147
148
149    public void GetResults() {
150      try {
151        while(true) {
152          Job job = null;
153          lock(runningQueueLock) {
154            if(runningJobs.Count > 0) job = runningJobs.Dequeue();
155          }
156          if(job == null) runningWaitHandle.WaitOne(); // no jobs running
157          else {
158            byte[] zippedResult = TryEndExecuteEngine(server, job.guid);
159            if(zippedResult != null) { // successful
160              lock(dictionaryLock) {
161                // store result
162                results[job.engine.InitialOperation] = zippedResult;
163                // notify consumer that result is ready
164                job.waitHandle.Set();
165              }
166            } else {
167              // there was a problem -> check the state of the job and restart if necessary
168              JobState jobState = TryGetJobState(server, job.guid);
169              if(jobState == JobState.Unknown) {
170                job.restarts++;
171                lock(waitingQueueLock) {
172                  waitingJobs.Enqueue(job);
173                  waitingWaitHandle.Set();
174                }
175              } else {
176                // job still active at the server
177                lock(runningQueueLock) {
178                  runningJobs.Enqueue(job);
179                  runningWaitHandle.Set();
180                }
181                Thread.Sleep(TimeSpan.FromSeconds(RESULT_POLLING_TIMEOUT)); // sleep a while before trying to get the next result
182              }
183            }
184          }
185        }
186      } catch(Exception e) {
187        Trace.TraceError("Exception " + e + " in JobManager.GetResults() killed the results-gathering thread\n"+ e.StackTrace);
188      }
189    }
190
191    public WaitHandle BeginExecuteOperation(IScope globalScope, AtomicOperation operation) {
192      return BeginExecuteEngine(new ProcessingEngine(globalScope, operation));
193    }
194
195    public WaitHandle BeginExecuteEngine(ProcessingEngine engine) {
196      Job job = new Job();
197      job.engine = engine;
198      job.waitHandle = new ManualResetEvent(false);
199      job.restarts = 0;
200      lock(waitingQueueLock) {
201        waitingJobs.Enqueue(job);
202      }
203      waitingWaitHandle.Set();
204      return job.waitHandle;
205    }
206
207    private byte[] ZipEngine(ProcessingEngine engine) {
208      return PersistenceManager.SaveToGZip(engine);
209    }
210
211    public ProcessingEngine EndExecuteOperation(AtomicOperation operation) {
212      if(erroredOperations.Contains(operation)) {
213        erroredOperations.Remove(operation);
214        throw new JobExecutionException("Maximal number of job restarts reached. There is a problem with the connection to the grid-server.");
215      } else {
216        byte[] zippedResult = null;
217        lock(dictionaryLock) {
218          zippedResult = results[operation];
219          results.Remove(operation);
220        }
221        // restore the engine
222        return (ProcessingEngine)PersistenceManager.RestoreFromGZip(zippedResult);
223      }
224    }
225
226    private Guid TryStartExecuteEngine(ProcessingEngine engine) {
227      byte[] zippedEngine = ZipEngine(engine);
228      int retries = 0;
229      Guid guid = Guid.Empty;
230      do {
231        try {
232          lock(connectionLock) {
233            guid = server.BeginExecuteEngine(zippedEngine);
234          }
235          return guid;
236        } catch(TimeoutException) {
237          retries++;
238          Thread.Sleep(TimeSpan.FromSeconds(RETRY_TIMEOUT_SEC));
239        } catch(CommunicationException) {
240          ResetConnection();
241          retries++;
242          Thread.Sleep(TimeSpan.FromSeconds(RETRY_TIMEOUT_SEC));
243        }
244      } while(retries < MAX_CONNECTION_RETRIES);
245      Trace.TraceWarning("Reached max connection retries in TryStartExecuteEngine");
246      return Guid.Empty;
247    }
248
249    private byte[] TryEndExecuteEngine(IGridServer server, Guid engineGuid) {
250      int retries = 0;
251      do {
252        try {
253          lock(connectionLock) {
254            byte[] zippedResult = server.TryEndExecuteEngine(engineGuid);
255            return zippedResult;
256          }
257        } catch(TimeoutException) {
258          retries++;
259          Thread.Sleep(TimeSpan.FromSeconds(RETRY_TIMEOUT_SEC));
260        } catch(CommunicationException) {
261          ResetConnection();
262          retries++;
263          Thread.Sleep(TimeSpan.FromSeconds(RETRY_TIMEOUT_SEC));
264        }
265      } while(retries < MAX_CONNECTION_RETRIES);
266      Trace.TraceWarning("Reached max connection retries in TryEndExecuteEngine");
267      return null;
268    }
269
270    private JobState TryGetJobState(IGridServer server, Guid engineGuid) {
271      // check if the server is still working on the job
272      int retries = 0;
273      do {
274        try {
275          lock(connectionLock) {
276            JobState jobState = server.JobState(engineGuid);
277            return jobState;
278          }
279        } catch(TimeoutException) {
280          retries++;
281          Thread.Sleep(TimeSpan.FromSeconds(RETRY_TIMEOUT_SEC));
282        } catch(CommunicationException) {
283          ResetConnection();
284          retries++;
285          Thread.Sleep(TimeSpan.FromSeconds(RETRY_TIMEOUT_SEC));
286        }
287      } while(retries < MAX_CONNECTION_RETRIES);
288      Trace.TraceWarning("Reached max connection retries in TryGetJobState");
289      return JobState.Unknown;
290    }
291  }
292}
Note: See TracBrowser for help on using the repository browser.