1 """Module generator
2 This module is used to run the optimization algorithms.
3 It provides the class Generator and starts a new thread.
4 The priority of the thread can be set by an global function in this module.
5 (needed modules: win32api, win32process ,win32con)
6 The interface between the Generator and the DSC is based on dictionaries.
7 @author: Tobias Heimpold
8 @version 2.0
9 """
10 import threading
11 from time import gmtime,strftime,clock
12 from numpy import float64, mean
13 from random import random
14 from dsc_suite.ds.data_structure import COST_CRITERIA_LIST
15 from dsc_suite.gui.config import DATA_STRUCTURE_LIST, BENCHMARK_LIST, OPTIMIZATION_LIST
16 from dsc_suite.analyses.characteristics import get_statistic_characteristics
17
18 '''The following code was taken from
19 http://code.activestate.com/recipes/496767-set-process-priority-in-windows/
20 and was greated by Bryan Niederberger.
21 '''
23 """ Set The Priority of a Windows Process. Priority is a value between 0-5 where
24 2 is normal priority. Default sets the priority of the current
25 python process but can take any valid process ID.
26 Recipe 496767 from ActiveState Code """
27 import win32api,win32process,win32con
28 priorityclasses = [win32process.IDLE_PRIORITY_CLASS,
29 win32process.BELOW_NORMAL_PRIORITY_CLASS,
30 win32process.NORMAL_PRIORITY_CLASS,
31 win32process.ABOVE_NORMAL_PRIORITY_CLASS,
32 win32process.HIGH_PRIORITY_CLASS,
33 win32process.REALTIME_PRIORITY_CLASS]
34 if pid == None:
35 pid = win32api.GetCurrentProcessId()
36 handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
37 win32process.SetPriorityClass(handle, priorityclasses[priority])
38
39
41 """class Generator(threading.Thread)
42 This class inherits the class Thread from the Python module threading.
43 It must override the function run() to can be executed as a separate thread.
44 This is necessary, because the GUI would freeze during the calculation and the OS will offer the termination of the GUI.
45 The Generator class is just a box object for separately executed Python code.
46 It starts the calculation of the given trials which are generated with the GUI.
47 It defines the necessary functions for the algorithms and collects the information about the created data files.
48 """
49
50
51 LockObject = threading.Lock()
52
53 - def __init__(self, dict, priority, time_check):
54 """Uses a dictionary for the creation of new datafiles.
55 dictionary structure:
56 { index : { "init_params" : {...},
57 "individual_params" : {..}}}
58 """
59 threading.Thread.__init__(self)
60 self.todo = dict
61
62 self.total = len(dict)
63 self.current = 0
64
65 self.times = []
66 self.printabletimes = []
67
68
69 self.time_check = time_check
70
72 """starts a new thread for the calculation of new datafiles
73 """
74
75 try:
76 setpriority(None, self.__priority)
77 except:
78 pass
79
80 for index in self.todo.keys():
81
82 start = clock()
83
84 Generator.LockObject.acquire()
85 self.current = index
86 Generator.LockObject.release()
87
88 current_dict = self.todo[index]
89 init_params = current_dict["init_params"]
90 individual_params = current_dict["individual_params"]
91
92
93 benchmark = BENCHMARK_LIST[init_params["benchmark"]][0](init_params["benchmark"])
94 if benchmark.status['number of dimensions'] != 3:
95 if init_params["z-Dimension"] == "auto":
96 benchmark.to_3d()
97 else:
98 benchmark.to_3d(init_params["z-Dimension"])
99
100 datastructure = DATA_STRUCTURE_LIST[init_params["datastructure"]][0](benchmark)
101
102 randomSolution = datastructure.get_random_representation
103 try:
104 changeFunctions = datastructure.get_operations()
105 except:
106 if init_params["algorithm"] != "Monte Carlo":
107 raise ValueError
108 recombineSolutions = datastructure.merge_representations
109
110 mean_samples = init_params["mean_samples"]
111
112 if self.time_check and mean_samples != None:
113 init_params["mean_samples"] = 1
114 if mean_samples != None:
115 istart = clock()
116
117 calculable_costs = set(COST_CRITERIA_LIST.keys())
118
119 cost_criteria = set(init_params["cost_criteria"].keys())
120
121 intersection_criteria = list(calculable_costs & cost_criteria)
122 cost_data = []
123
124 for criteria in intersection_criteria:
125 cost_data.append([])
126 for i in range(0, init_params["mean_samples"],1):
127 solution = randomSolution()
128 costs = datastructure.cost_evaluation(solution, intersection_criteria)
129 for cost in costs:
130 cost_data[costs.index(cost)].append(cost)
131 for criteria in intersection_criteria:
132 data = float64(cost_data[intersection_criteria.index(criteria)])
133 mean_value = data.mean()
134 init_params["cost_criteria"]["SUM"].update({"norm_value_"+criteria : mean_value})
135 iend = clock()
136
137 if self.time_check:
138 iruntime = (iend - istart)*mean_samples/init_params["mean_samples"]
139 init_params["mean_samples"] = mean_samples
140 else:
141 iruntime = 0
142
143 def get_costs(rep):
144
145 criteria_list = COST_CRITERIA_LIST.keys()
146 criteria_list.sort()
147
148 calculable_costs = set(criteria_list)
149
150 cost_criteria = set(init_params["cost_criteria"].keys())
151 intersection_criteria = list(calculable_costs & cost_criteria)
152 intersection_criteria.sort()
153
154 costs = datastructure.cost_evaluation(rep, intersection_criteria)
155 sum = 0
156
157 for criteria in intersection_criteria:
158 sum += init_params["cost_criteria"]["SUM"]["weight_factor_"+criteria] * \
159 costs[intersection_criteria.index(criteria)] / \
160 init_params["cost_criteria"]["SUM"]["norm_value_"+criteria]
161 costs.append(sum)
162 return costs
163
164 def changeSolution():
165 k = random()
166 index = int(k*len(changeFunctions.keys()))
167 key = changeFunctions.keys()[index]
168 operation = changeFunctions[key][0]
169 return operation
170
171 functions = {"randomSolution" : randomSolution,
172 "changeSolution" : changeSolution,
173 "costEvaluation" : get_costs,
174 "recombineSolutions" : recombineSolutions}
175 end = clock()
176
177
178 criteria_list = init_params["cost_criteria"].keys()
179 criteria_list.sort()
180 if init_params["algorithm"] != "Monte Carlo":
181 criteria_list += ["ITER"]
182
183 file_info = {}
184 file_info.update({"trial_name" : str(init_params["trial_name"])})
185 file_info.update({"file_name_list" : []})
186 for key in criteria_list:
187 file_info["file_name_list"].append(str(init_params["trial_name"]+" "+key+ " " + init_params["datastructure"] + " " + init_params["benchmark"]))
188
189 if self.time_check:
190 time = OPTIMIZATION_LIST[init_params["algorithm"]][0](functions, individual_params, file_info, self.time_check)
191 time += (end-start)+iruntime
192 self.times.append(time)
193
194 else:
195
196 start = clock()
197 files = OPTIMIZATION_LIST[init_params["algorithm"]][0](functions, individual_params, file_info, self.time_check)
198 end = clock()
199
200 init_params.update({"runtime" : (end-start)})
201
202 for criteria in criteria_list:
203 try:
204 init_params["cost_criteria"][criteria].update({"filename" : files[criteria_list.index(criteria)]})
205 except KeyError:
206 init_params["cost_criteria"].update({criteria : {"filename" : files[criteria_list.index(criteria)]}})
207 result = get_statistic_characteristics(files[criteria_list.index(criteria)])
208 if criteria != "ITER":
209 for key in result:
210 init_params["cost_criteria"][criteria].update({key.replace(" ","_") : result[key]})
211
212 if self.time_check:
213 total_time = sum(self.times)
214 for time in self.times:
215 self.printabletimes.append(strftime("%H:%M:%S", gmtime(time)))
216 self.printabletimes.append(strftime("%H:%M:%S", gmtime(total_time)))
217 return
218 else:
219 return
220