1 | from reduction.reducer import ReductionStep |
---|
2 | import mantid |
---|
3 | from mantid import config |
---|
4 | from mantid.simpleapi import * |
---|
5 | import string |
---|
6 | import os |
---|
7 | |
---|
8 | class LoadData(ReductionStep): |
---|
9 | """Handles the loading of the data for Indirect instruments. The summing |
---|
10 | of input workspaces is handled in this routine, as well as the identifying |
---|
11 | of detectors that require masking. |
---|
12 | |
---|
13 | This step will use the following parameters from the Instrument's parameter |
---|
14 | file: |
---|
15 | |
---|
16 | * Workflow.Masking - identifies the method (if any) on which detectors that |
---|
17 | are to be masked should be identified. |
---|
18 | * Workflow.ChopDataIfGreaterThan - if this parameter is specified on the |
---|
19 | instrument, then the raw data will be split into multiple frames if |
---|
20 | the largest TOF (X) value in the workspace is greater than the provided |
---|
21 | value. |
---|
22 | """ |
---|
23 | |
---|
24 | _multiple_frames = False |
---|
25 | _sum = False |
---|
26 | _load_logs = False |
---|
27 | _monitor_index = None |
---|
28 | _detector_range_start = None |
---|
29 | _detector_range_end = None |
---|
30 | _masking_detectors = [] |
---|
31 | _parameter_file = None |
---|
32 | _data_files = {} |
---|
33 | _extra_load_opts = {} |
---|
34 | |
---|
35 | def __init__(self): |
---|
36 | """Initialise the ReductionStep. Constructor should set the initial |
---|
37 | parameters for the step. |
---|
38 | """ |
---|
39 | super(LoadData, self).__init__() |
---|
40 | self._sum = False |
---|
41 | self._load_logs = False |
---|
42 | self._multiple_frames = False |
---|
43 | self._monitor_index = None |
---|
44 | self._detector_range_start = None |
---|
45 | self._detector_range_end = None |
---|
46 | self._parameter_file = None |
---|
47 | self._data_files = {} |
---|
48 | |
---|
49 | def execute(self, reducer, file_ws): |
---|
50 | """Loads the data. |
---|
51 | """ |
---|
52 | wsname = '' |
---|
53 | |
---|
54 | for output_ws, filename in self._data_files.iteritems(): |
---|
55 | try: |
---|
56 | self._load_single_file(filename,output_ws) |
---|
57 | if wsname == "": |
---|
58 | wsname = output_ws |
---|
59 | except RuntimeError, exc: |
---|
60 | logger.warning("Error loading '%s': %s. File skipped" % (filename, str(exc))) |
---|
61 | continue |
---|
62 | |
---|
63 | if ( self._sum ) and ( len(self._data_files) > 1 ): |
---|
64 | ## Sum files |
---|
65 | merges = [] |
---|
66 | if ( self._multiple_frames ): |
---|
67 | self._sum_chopped(wsname) |
---|
68 | else: |
---|
69 | self._sum_regular(wsname) |
---|
70 | ## Need to adjust the reducer's list of workspaces |
---|
71 | self._data_files = {} |
---|
72 | self._data_files[wsname] = wsname |
---|
73 | |
---|
74 | def set_load_logs(self, value): |
---|
75 | self._load_logs = value |
---|
76 | |
---|
77 | def set_sum(self, value): |
---|
78 | self._sum = value |
---|
79 | |
---|
80 | def set_parameter_file(self, value): |
---|
81 | self._parameter_file = value |
---|
82 | |
---|
83 | def set_monitor_index(self, index): |
---|
84 | self._monitor_index = index |
---|
85 | |
---|
86 | def set_detector_range(self, start, end): |
---|
87 | self._detector_range_start = start |
---|
88 | self._detector_range_end = end |
---|
89 | |
---|
90 | def set_extra_load_opts(self, opts): |
---|
91 | self._extra_load_opts = opts |
---|
92 | |
---|
93 | def get_mask_list(self): |
---|
94 | return self._masking_detectors |
---|
95 | |
---|
96 | def set_ws_list(self, value): |
---|
97 | self._data_files = value |
---|
98 | |
---|
99 | def get_ws_list(self): |
---|
100 | return self._data_files |
---|
101 | |
---|
102 | def _load_single_file(self, filename, output_ws): |
---|
103 | logger.notice("Loading file %s" % filename) |
---|
104 | |
---|
105 | loader_name = self._load_data(filename, output_ws) |
---|
106 | |
---|
107 | inst_name = mtd[output_ws].getInstrument().getName() |
---|
108 | if inst_name == 'BASIS': |
---|
109 | ModeratorTzero(InputWorkspace=output_ws,OutputWorkspace= output_ws) |
---|
110 | basis_mask = mtd[output_ws].getInstrument().getStringParameter( |
---|
111 | 'Workflow.MaskFile')[0] |
---|
112 | # Quick hack for older BASIS files that only have one side |
---|
113 | #if (mtd[file].getRun()['run_number'] < 16693): |
---|
114 | # basis_mask = "BASIS_Mask_before_16693.xml" |
---|
115 | basis_mask_filename = os.path.join(config.getString('maskFiles.directory') |
---|
116 | , basis_mask) |
---|
117 | if os.path.isfile(basis_mask_filename): |
---|
118 | LoadMask(Instrument="BASIS", OutputWorkspace="__basis_mask", |
---|
119 | InputFile=basis_mask_filename) |
---|
120 | <<<<<<< HEAD |
---|
121 | MaskDetectors(Workspace=output_ws, MaskedWorkspace="__basis_mask") |
---|
122 | ======= |
---|
123 | MaskDetectors(Workspace=filename, MaskedWorkspace="__basis_mask") |
---|
124 | >>>>>>> origin/feature/4843_load_logs |
---|
125 | else: |
---|
126 | logger.notice("Couldn't find specified mask file : " + str(basis_mask_filename)) |
---|
127 | |
---|
128 | if self._parameter_file != None: |
---|
129 | LoadParameterFile(Workspace=output_ws,Filename= self._parameter_file) |
---|
130 | |
---|
131 | |
---|
132 | if self._require_chop_data(output_ws): |
---|
133 | ChopData(InputWorkspace=output_ws,OutputWorkspace= output_ws,Step= 20000.0,NChops= 5, IntegrationRangeLower=5000.0, |
---|
134 | IntegrationRangeUpper=10000.0, |
---|
135 | MonitorWorkspaceIndex=self._monitor_index) |
---|
136 | self._multiple_frames = True |
---|
137 | else: |
---|
138 | self._multiple_frames = False |
---|
139 | |
---|
140 | if ( self._multiple_frames ): |
---|
141 | workspaces = mtd[output_ws].getNames() |
---|
142 | else: |
---|
143 | workspaces = [output_ws] |
---|
144 | |
---|
145 | |
---|
146 | logger.debug('self._monitor_index = ' + str(self._monitor_index)) |
---|
147 | |
---|
148 | for ws in workspaces: |
---|
149 | <<<<<<< HEAD |
---|
150 | if isinstance(mtd[ws],mantid.api.IEventWorkspace): |
---|
151 | LoadNexusMonitors(Filename=self._data_files[output_ws], |
---|
152 | OutputWorkspace= ws+'_mon') |
---|
153 | ======= |
---|
154 | if (loader_name.endswith('Nexus')): |
---|
155 | LoadNexusMonitors(Filename=self._data_files[output_ws],OutputWorkspace= ws+'_mon') |
---|
156 | >>>>>>> origin/feature/4843_load_logs |
---|
157 | else: |
---|
158 | ## Extract Monitor Spectrum |
---|
159 | ExtractSingleSpectrum(InputWorkspace=ws,OutputWorkspace= ws+'_mon',WorkspaceIndex= self._monitor_index) |
---|
160 | ## Crop the workspace to remove uninteresting detectors |
---|
161 | CropWorkspace(InputWorkspace=ws,OutputWorkspace= ws, |
---|
162 | StartWorkspaceIndex=self._detector_range_start, |
---|
163 | EndWorkspaceIndex=self._detector_range_end) |
---|
164 | |
---|
165 | try: |
---|
166 | msk = mtd[workspaces[0]].getInstrument().getStringParameter('Workflow.Masking')[0] |
---|
167 | except IndexError: |
---|
168 | msk = 'None' |
---|
169 | if ( msk == 'IdentifyNoisyDetectors' ): |
---|
170 | self._identify_bad_detectors(workspaces[0]) |
---|
171 | |
---|
172 | def _load_data(self, filename, output_ws): |
---|
173 | if self._parameter_file is not None and "VESUVIO" in self._parameter_file: |
---|
174 | loaded_ws = LoadVesuvio(Filename=filename, OutputWorkspace=output_ws, SpectrumList="1-198", **self._extra_load_opts) |
---|
175 | loader_name = "LoadVesuvio" |
---|
176 | else: |
---|
177 | <<<<<<< HEAD |
---|
178 | loaded_ws = Load(Filename=filename, OutputWorkspace=output_ws, LoadLogFiles=False, **self._extra_load_opts) |
---|
179 | ======= |
---|
180 | if self._load_logs == True: |
---|
181 | loaded_ws = Load(Filename=filename, OutputWorkspace=output_ws, LoadLogFiles=True, **self._extra_load_opts) |
---|
182 | logger.notice("Loaded logs") |
---|
183 | else: |
---|
184 | loaded_ws = Load(Filename=filename, OutputWorkspace=output_ws, LoadLogFiles=False, **self._extra_load_opts) |
---|
185 | >>>>>>> origin/feature/4843_load_logs |
---|
186 | loader_handle = loaded_ws.getHistory().lastAlgorithm() |
---|
187 | loader_name = loader_handle.getPropertyValue("LoaderName") |
---|
188 | return loader_name |
---|
189 | |
---|
190 | def _sum_regular(self, wsname): |
---|
191 | merges = [[], []] |
---|
192 | for ws in self._data_files: |
---|
193 | merges[0].append(ws) |
---|
194 | merges[1].append(ws+'_mon') |
---|
195 | MergeRuns(InputWorkspaces=','.join(merges[0]),OutputWorkspace= wsname) |
---|
196 | MergeRuns(InputWorkspaces=','.join(merges[1]),OutputWorkspace= wsname+'_mon') |
---|
197 | for n in range(1, len(merges[0])): |
---|
198 | DeleteWorkspace(Workspace=merges[0][n]) |
---|
199 | DeleteWorkspace(Workspace=merges[1][n]) |
---|
200 | factor = 1.0 / len(self._data_files) |
---|
201 | Scale(InputWorkspace=wsname,OutputWorkspace= wsname,Factor= factor) |
---|
202 | Scale(InputWorkspace=wsname+'_mon',OutputWorkspace= wsname+'_mon',Factor= factor) |
---|
203 | |
---|
204 | def _sum_chopped(self, wsname): |
---|
205 | merges = [] |
---|
206 | nmerges = len(mtd[wsname].getNames()) |
---|
207 | for n in range(0, nmerges): |
---|
208 | merges.append([]) |
---|
209 | merges.append([]) |
---|
210 | for file in self._data_files: |
---|
211 | try: |
---|
212 | merges[2*n].append(mtd[file].getNames()[n]) |
---|
213 | merges[2*n+1].append(mtd[file].getNames()[n]+'_mon') |
---|
214 | except AttributeError: |
---|
215 | if n == 0: |
---|
216 | merges[0].append(file) |
---|
217 | merges[1].append(file+'_mon') |
---|
218 | for merge in merges: |
---|
219 | MergeRuns(InputWorkspaces=','.join(merge),OutputWorkspace= merge[0]) |
---|
220 | factor = 1.0 / len(merge) |
---|
221 | Scale(InputWorkspace=merge[0],OutputWorkspace= merge[0],Factor= factor) |
---|
222 | for n in range(1, len(merge)): |
---|
223 | DeleteWorkspace(Workspace=merge[n]) |
---|
224 | |
---|
225 | def _identify_bad_detectors(self, workspace): |
---|
226 | IdentifyNoisyDetectors(InputWorkspace=workspace,OutputWorkspace= '__temp_tsc_noise') |
---|
227 | ws = mtd['__temp_tsc_noise'] |
---|
228 | nhist = ws.getNumberHistograms() |
---|
229 | self._masking_detectors = [] |
---|
230 | for i in range(0, nhist): |
---|
231 | if ( ws.readY(i)[0] == 0.0 ): |
---|
232 | self._masking_detectors.append(i) |
---|
233 | DeleteWorkspace(Workspace='__temp_tsc_noise') |
---|
234 | return self._masking_detectors |
---|
235 | |
---|
236 | def _require_chop_data(self, ws): |
---|
237 | try: |
---|
238 | cdigt = mtd[ws].getInstrument().getNumberParameter( |
---|
239 | 'Workflow.ChopDataIfGreaterThan')[0] |
---|
240 | except IndexError: |
---|
241 | return False |
---|
242 | if ( mtd[ws].readX(0)[mtd[ws].blocksize()] > cdigt ): |
---|
243 | return True |
---|
244 | else: |
---|
245 | return False |
---|
246 | |
---|
247 | def is_multiple_frames(self): |
---|
248 | return self._multiple_frames |
---|
249 | |
---|
250 | #-------------------------------------------------------------------------------------------------- |
---|
251 | |
---|
252 | class BackgroundOperations(ReductionStep): |
---|
253 | """Removes, if requested, a background from the detectors data in TOF |
---|
254 | units. Currently only uses the FlatBackground algorithm, more options |
---|
255 | to cover SNS use to be added at a later point. |
---|
256 | """ |
---|
257 | _multiple_frames = False |
---|
258 | _background_start = None |
---|
259 | _background_end = None |
---|
260 | |
---|
261 | def __init__(self, MultipleFrames=False): |
---|
262 | super(BackgroundOperations, self).__init__() |
---|
263 | self._multiple_frames = MultipleFrames |
---|
264 | self._background_start = None |
---|
265 | self._background_end = None |
---|
266 | |
---|
267 | def execute(self, reducer, file_ws): |
---|
268 | if ( self._multiple_frames ): |
---|
269 | try: |
---|
270 | workspaces = mtd[file_ws].getNames() |
---|
271 | except AttributeError: |
---|
272 | workspaces = [file_ws] |
---|
273 | else: |
---|
274 | workspaces = [file_ws] |
---|
275 | |
---|
276 | for ws in workspaces: |
---|
277 | ConvertToDistribution(Workspace=ws) |
---|
278 | FlatBackground(InputWorkspace=ws,OutputWorkspace= ws,StartX= self._background_start, |
---|
279 | EndX=self._background_end, Mode='Mean') |
---|
280 | ConvertFromDistribution(Workspace=ws) |
---|
281 | |
---|
282 | def set_range(self, start, end): |
---|
283 | self._background_start = start |
---|
284 | self._background_end = end |
---|
285 | |
---|
286 | class CreateCalibrationWorkspace(ReductionStep): |
---|
287 | """Creates a calibration workspace from a White-Beam Vanadium run. |
---|
288 | """ |
---|
289 | |
---|
290 | _back_min = None |
---|
291 | _back_max = None |
---|
292 | _peak_min = None |
---|
293 | _peak_max = None |
---|
294 | _detector_range_start = None |
---|
295 | _detector_range_end = None |
---|
296 | _calib_raw_files = [] |
---|
297 | _calib_workspace = None |
---|
298 | _analyser = None |
---|
299 | _reflection = None |
---|
300 | |
---|
301 | def __init__(self): |
---|
302 | super(CreateCalibrationWorkspace, self).__init__() |
---|
303 | self._back_min = None |
---|
304 | self._back_max = None |
---|
305 | self._peak_min = None |
---|
306 | self._peak_max = None |
---|
307 | self._detector_range_start = None |
---|
308 | self._detector_range_end = None |
---|
309 | self._calib_raw_files = [] |
---|
310 | self._calib_workspace = None |
---|
311 | self._analyser = None |
---|
312 | self._reflection = None |
---|
313 | |
---|
314 | def execute(self, reducer, file_ws): |
---|
315 | """The information we use here is not from the main reducer object |
---|
316 | (ie, we are not looking at one of the data files.) |
---|
317 | |
---|
318 | The ApplyCalibration step is related to this. |
---|
319 | """ |
---|
320 | rawfiles = self._calib_raw_files |
---|
321 | if ( len(rawfiles) == 0 ): |
---|
322 | print "Indirect: No calibration run specified." |
---|
323 | return |
---|
324 | |
---|
325 | backMin, backMax, peakMin, peakMax = self._get_calib_details() |
---|
326 | specMin = self._detector_range_start + 1 |
---|
327 | specMax = self._detector_range_end + 1 |
---|
328 | |
---|
329 | runs = [] |
---|
330 | for file in rawfiles: |
---|
331 | (direct, filename) = os.path.split(file) |
---|
332 | (root, ext) = os.path.splitext(filename) |
---|
333 | try: |
---|
334 | Load(Filename=file,OutputWorkspace= root, SpectrumMin=specMin, SpectrumMax=specMax, |
---|
335 | LoadLogFiles=False) |
---|
336 | runs.append(root) |
---|
337 | except: |
---|
338 | sys.exit('Indirect: Could not load raw file: ' + file) |
---|
339 | cwsn = 'calibration' |
---|
340 | if ( len(runs) > 1 ): |
---|
341 | MergeRuns(InputWorkspaces=",".join(runs),OutputWorkspace= cwsn) |
---|
342 | factor = 1.0 / len(runs) |
---|
343 | Scale(InputWorkspace=cwsn,OutputWorkspace= cwsn,Factor= factor) |
---|
344 | else: |
---|
345 | cwsn = runs[0] |
---|
346 | FlatBackground(InputWorkspace=cwsn,OutputWorkspace= cwsn,StartX= backMin,EndX= backMax, Mode='Mean') |
---|
347 | Integration(InputWorkspace=cwsn,OutputWorkspace= cwsn,RangeLower= peakMin,RangeUpper= peakMax) |
---|
348 | cal_ws = mtd[cwsn] |
---|
349 | sum = 0 |
---|
350 | for i in range(0, cal_ws.getNumberHistograms()): |
---|
351 | sum += cal_ws.readY(i)[0] |
---|
352 | |
---|
353 | runNo = cal_ws.getRun().getLogData("run_number").value |
---|
354 | outWS_n = runs[0][:3] + runNo + '_' + self._analyser + self._reflection + '_calib' |
---|
355 | |
---|
356 | value = 1.0 / ( sum / cal_ws.getNumberHistograms() ) |
---|
357 | Scale(InputWorkspace=cwsn,OutputWorkspace= cwsn,Factor= value,Operation= 'Multiply') |
---|
358 | |
---|
359 | RenameWorkspace(InputWorkspace=cwsn,OutputWorkspace= outWS_n) |
---|
360 | self._calib_workspace = outWS_n # Set result workspace value |
---|
361 | if ( len(runs) > 1 ): |
---|
362 | for run in runs: |
---|
363 | DeleteWorkspace(Workspace=run) |
---|
364 | |
---|
365 | def set_parameters(self, back_min, back_max, peak_min, peak_max): |
---|
366 | self._back_min = back_min |
---|
367 | self._back_max = back_max |
---|
368 | self._peak_min = peak_min |
---|
369 | self._peak_max = peak_max |
---|
370 | |
---|
371 | def set_detector_range(self, start, end): |
---|
372 | self._detector_range_start = start |
---|
373 | self._detector_range_end = end |
---|
374 | |
---|
375 | def set_instrument_workspace(self, workspace): |
---|
376 | self._instrument_workspace = workspace |
---|
377 | |
---|
378 | def set_files(self, files): |
---|
379 | if len(files) > 0: |
---|
380 | self._calib_raw_files = files |
---|
381 | else: |
---|
382 | raise ValueError("Indirect: Can't set calib files if you don't " |
---|
383 | "specify a calib file.") |
---|
384 | |
---|
385 | def set_analyser(self, analyser): |
---|
386 | self._analyser = str(analyser) |
---|
387 | |
---|
388 | def set_reflection(self, reflection): |
---|
389 | self._reflection = str(reflection) |
---|
390 | |
---|
391 | def result_workspace(self): |
---|
392 | return self._calib_workspace |
---|
393 | |
---|
394 | def _get_calib_details(self): |
---|
395 | if ( self._back_min is None and |
---|
396 | self._back_max is None and |
---|
397 | self._peak_min is None and |
---|
398 | self._peak_max is None ): |
---|
399 | instrument = mtd[self._instrument_workspace].getInstrument() |
---|
400 | try: |
---|
401 | backMin = instrument.getNumberParameter('back-start')[0] |
---|
402 | backMax = instrument.getNumberParameter('back-end')[0] |
---|
403 | peakMin = instrument.getNumberParameter('peak-start')[0] |
---|
404 | peakMax = instrument.getNumberParameter('peak-end')[0] |
---|
405 | except IndexError: |
---|
406 | sys.exit("Indirect: Unable to retrieve calibration details " |
---|
407 | "from instrument of workspace.") |
---|
408 | else: |
---|
409 | return backMin, backMax, peakMin, peakMax |
---|
410 | else: |
---|
411 | return ( self._back_min, self._back_max, self._peak_min, |
---|
412 | self._peak_max ) |
---|
413 | |
---|
414 | class ApplyCalibration(ReductionStep): |
---|
415 | """Applies a calibration workspace to the data. |
---|
416 | """ |
---|
417 | |
---|
418 | _multiple_frames = False |
---|
419 | _calib_workspace = None |
---|
420 | |
---|
421 | def __init__(self): |
---|
422 | super(ApplyCalibration, self).__init__() |
---|
423 | self._multiple_frames = False |
---|
424 | self._calib_workspace = None |
---|
425 | |
---|
426 | def execute(self, reducer, file_ws): |
---|
427 | if self._calib_workspace is None: # No calibration workspace set |
---|
428 | return |
---|
429 | if ( self._multiple_frames ): |
---|
430 | try: |
---|
431 | workspaces = mtd[file_ws].getNames() |
---|
432 | except AttributeError: |
---|
433 | workspaces = [file_ws] |
---|
434 | else: |
---|
435 | workspaces = [file_ws] |
---|
436 | |
---|
437 | for ws in workspaces: |
---|
438 | Divide(LHSWorkspace=ws,RHSWorkspace= self._calib_workspace,OutputWorkspace= ws) |
---|
439 | |
---|
440 | def set_is_multiple_frames(self, value): |
---|
441 | self._multiple_frames = value |
---|
442 | |
---|
443 | def set_calib_workspace(self, value): |
---|
444 | self._calib_workspace = value |
---|
445 | |
---|
446 | class HandleMonitor(ReductionStep): |
---|
447 | """Handles the montior for the reduction of inelastic indirect data. |
---|
448 | |
---|
449 | This uses the following parameters from the instrument: |
---|
450 | * Workflow.MonitorArea |
---|
451 | * Workflow.MonitorThickness |
---|
452 | * Workflow.MonitorScalingFactor |
---|
453 | * Workflow.UnwrapMonitor |
---|
454 | """ |
---|
455 | _multiple_frames = False |
---|
456 | |
---|
457 | def __init__(self, MultipleFrames=False): |
---|
458 | """Constructor for HandleMonitor routine. |
---|
459 | """ |
---|
460 | super(HandleMonitor, self).__init__() |
---|
461 | self._multiple_frames = MultipleFrames |
---|
462 | |
---|
463 | def execute(self, reducer, file_ws): |
---|
464 | """Does everything we want to with the Monitor. |
---|
465 | """ |
---|
466 | if ( self._multiple_frames ): |
---|
467 | try: |
---|
468 | workspaces = mtd[file_ws].getNames() |
---|
469 | except AttributeError: |
---|
470 | workspaces = [file_ws] |
---|
471 | else: |
---|
472 | workspaces = [file_ws] |
---|
473 | |
---|
474 | for ws in workspaces: |
---|
475 | monitor = ws+'_mon' |
---|
476 | self._rebin_monitor(ws) |
---|
477 | if self._need_to_unwrap(ws): |
---|
478 | self._unwrap_monitor(ws) |
---|
479 | else: |
---|
480 | ConvertUnits(InputWorkspace=monitor,OutputWorkspace= monitor,Target= 'Wavelength') |
---|
481 | self._monitor_efficiency(monitor) |
---|
482 | self._scale_monitor(monitor) |
---|
483 | |
---|
484 | def _rebin_monitor(self, ws): |
---|
485 | """For some instruments (e.g. BASIS) the monitor binning is too |
---|
486 | fine and needs to be rebinned. This is controlled |
---|
487 | by the 'Workflow.Monitor.RebinStep' parameter set on the |
---|
488 | instrument. If no parameter is present, no rebinning will occur. |
---|
489 | """ |
---|
490 | try: |
---|
491 | stepsize = mtd[ws].getInstrument().getNumberParameter('Workflow.Monitor.RebinStep')[0] |
---|
492 | except IndexError: |
---|
493 | logger.notice("Monitor is not being rebinned.") |
---|
494 | else: |
---|
495 | Rebin(InputWorkspace=ws+'_mon',OutputWorkspace= ws+'_mon',Params= stepsize) |
---|
496 | |
---|
497 | def _need_to_unwrap(self, ws): |
---|
498 | try: |
---|
499 | unwrap = mtd[ws].getInstrument().getStringParameter( |
---|
500 | 'Workflow.UnwrapMonitor')[0] |
---|
501 | except IndexError: |
---|
502 | return False # Default it to not unwrap |
---|
503 | if ( unwrap == 'Never' ): |
---|
504 | return False |
---|
505 | elif ( unwrap == 'Always' ): |
---|
506 | return True |
---|
507 | elif ( unwrap == 'BaseOnTimeRegime' ): |
---|
508 | SpecMon = mtd[ws+'_mon'].readX(0)[0] |
---|
509 | SpecDet = mtd[ws].readX(0)[0] |
---|
510 | if ( SpecMon == SpecDet ): |
---|
511 | return True |
---|
512 | else: |
---|
513 | return False |
---|
514 | else: |
---|
515 | return False |
---|
516 | |
---|
517 | def _unwrap_monitor(self, ws): |
---|
518 | l_ref = self._get_reference_length(ws, 0) |
---|
519 | monitor = ws+'_mon' |
---|
520 | unwrapped_ws, join = UnwrapMonitor(InputWorkspace=monitor, OutputWorkspace=monitor, LRef=l_ref) |
---|
521 | RemoveBins(InputWorkspace=monitor,OutputWorkspace= monitor,XMin= join-0.001,XMax= join+0.001, |
---|
522 | Interpolation='Linear') |
---|
523 | FFTSmooth(InputWorkspace=monitor,OutputWorkspace=monitor,WorkspaceIndex=0) |
---|
524 | |
---|
525 | def _get_reference_length(self, ws, index): |
---|
526 | workspace = mtd[ws] |
---|
527 | instrument = workspace.getInstrument() |
---|
528 | sample = instrument.getSample() |
---|
529 | source = instrument.getSource() |
---|
530 | detector = workspace.getDetector(index) |
---|
531 | sample_to_source = sample.getPos() - source.getPos() |
---|
532 | r = detector.getDistance(sample) |
---|
533 | x = sample_to_source.getZ() |
---|
534 | result = x + r |
---|
535 | return result |
---|
536 | |
---|
537 | def _monitor_efficiency(self, monitor): |
---|
538 | inst = mtd[monitor].getInstrument() |
---|
539 | try: |
---|
540 | area = inst.getNumberParameter('Workflow.MonitorArea')[0] |
---|
541 | thickness = inst.getNumberParameter('Workflow.MonitorThickness')[0] |
---|
542 | except IndexError: |
---|
543 | raise ValueError('Unable to retrieve monitor thickness and ' |
---|
544 | 'area from Instrument Parameter file.') |
---|
545 | else: |
---|
546 | if ( area == -1 or thickness == -1 ): |
---|
547 | return |
---|
548 | OneMinusExponentialCor(InputWorkspace=monitor,OutputWorkspace= monitor,C= (8.3 * thickness),C1= area) |
---|
549 | |
---|
550 | def _scale_monitor(self, monitor): |
---|
551 | """Some instruments wish to scale their data. Doing this at the |
---|
552 | monitor is the most efficient way to do this. This is controlled |
---|
553 | by the 'Workflow.MonitorScalingFactor' parameter set on the |
---|
554 | instrument. |
---|
555 | """ |
---|
556 | try: |
---|
557 | factor = mtd[monitor].getInstrument().getNumberParameter( |
---|
558 | 'Workflow.MonitorScalingFactor')[0] |
---|
559 | except IndexError: |
---|
560 | print "Monitor is not being scaled." |
---|
561 | else: |
---|
562 | if factor != 1.0: |
---|
563 | Scale(InputWorkspace=monitor,OutputWorkspace= monitor,Factor= ( 1.0 / factor ),Operation= 'Multiply') |
---|
564 | |
---|
565 | class CorrectByMonitor(ReductionStep): |
---|
566 | """ |
---|
567 | """ |
---|
568 | |
---|
569 | _multiple_frames = False |
---|
570 | _emode = "Indirect" |
---|
571 | |
---|
572 | def __init__(self, MultipleFrames=False, EMode="Indirect"): |
---|
573 | super(CorrectByMonitor, self).__init__() |
---|
574 | self._multiple_frames = MultipleFrames |
---|
575 | self._emode = EMode |
---|
576 | |
---|
577 | def execute(self, reducer, file_ws): |
---|
578 | if ( self._multiple_frames ): |
---|
579 | try: |
---|
580 | workspaces = mtd[file_ws].getNames() |
---|
581 | except AttributeError: |
---|
582 | workspaces = [file_ws] |
---|
583 | else: |
---|
584 | workspaces = [file_ws] |
---|
585 | |
---|
586 | for ws in workspaces: |
---|
587 | ConvertUnits(InputWorkspace=ws,OutputWorkspace= ws,Target= "Wavelength",EMode= self._emode) |
---|
588 | RebinToWorkspace(WorkspaceToRebin=ws,WorkspaceToMatch= ws+'_mon',OutputWorkspace= ws) |
---|
589 | Divide(LHSWorkspace=ws,RHSWorkspace= ws+'_mon',OutputWorkspace= ws) |
---|
590 | DeleteWorkspace(Workspace=ws+'_mon') |
---|
591 | |
---|
592 | def set_emode(self, emode): |
---|
593 | """ |
---|
594 | """ |
---|
595 | self._emode = emode |
---|
596 | |
---|
597 | class FoldData(ReductionStep): |
---|
598 | _result_workspaces = [] |
---|
599 | |
---|
600 | def __init__(self): |
---|
601 | super(FoldData, self).__init__() |
---|
602 | self._result_workspaces = [] |
---|
603 | |
---|
604 | def execute(self, reducer, file_ws): |
---|
605 | try: |
---|
606 | wsgroup = mtd[file_ws].getNames() |
---|
607 | except AttributeError: |
---|
608 | return # Not a grouped workspace |
---|
609 | ws = file_ws+'_merged' |
---|
610 | MergeRuns(InputWorkspaces=','.join(wsgroup),OutputWorkspace= ws) |
---|
611 | scaling = self._create_scaling_workspace(wsgroup, ws) |
---|
612 | for workspace in wsgroup: |
---|
613 | DeleteWorkspace(Workspace=workspace) |
---|
614 | Divide(LHSWorkspace=ws,RHSWorkspace= scaling,OutputWorkspace= ws) |
---|
615 | DeleteWorkspace(Workspace=scaling) |
---|
616 | RenameWorkspace(InputWorkspace=ws,OutputWorkspace= file_ws) |
---|
617 | self._result_workspaces.append(file_ws) |
---|
618 | |
---|
619 | def get_result_workspaces(self): |
---|
620 | return self._result_workspaces |
---|
621 | |
---|
622 | def _create_scaling_workspace(self, wsgroup, merged): |
---|
623 | wsname = '__scaling' |
---|
624 | unit = '' |
---|
625 | ranges = [] |
---|
626 | lowest = 0 |
---|
627 | highest = 0 |
---|
628 | for ws in wsgroup: |
---|
629 | if ( unit == '' ): |
---|
630 | unit = mtd[ws].getAxis(0).getUnit().unitID() |
---|
631 | low = mtd[ws].dataX(0)[0] |
---|
632 | high = mtd[ws].dataX(0)[mtd[ws].blocksize()-1] |
---|
633 | ranges.append([low, high]) |
---|
634 | if low < lowest: lowest = low |
---|
635 | if high > highest: highest = high |
---|
636 | dataX = mtd[merged].readX(0) |
---|
637 | dataY = [] |
---|
638 | dataE = [] |
---|
639 | for i in range(0, mtd[merged].blocksize()): |
---|
640 | dataE.append(0.0) |
---|
641 | dataY.append(self._ws_in_range(ranges, dataX[i])) |
---|
642 | CreateWorkspace(OutputWorkspace=wsname,DataX= dataX,DataY= dataY,DataE= dataE, UnitX=unit) |
---|
643 | return wsname |
---|
644 | |
---|
645 | def _ws_in_range(self, ranges, xval): |
---|
646 | result = 0 |
---|
647 | for range in ranges: |
---|
648 | if ( xval >= range[0] and xval <= range[1] ): result += 1 |
---|
649 | return result |
---|
650 | |
---|
651 | class ConvertToCm1(ReductionStep): |
---|
652 | """ |
---|
653 | Converts the workspaces to cm-1. |
---|
654 | """ |
---|
655 | |
---|
656 | _multiple_frames = False |
---|
657 | _save_to_cm_1 = False |
---|
658 | |
---|
659 | def __init__(self, MultipleFrames=False): |
---|
660 | super(ConvertToCm1, self).__init__() |
---|
661 | self._multiple_frames = MultipleFrames |
---|
662 | |
---|
663 | def execute(self, reducer, file_ws): |
---|
664 | |
---|
665 | if self._save_to_cm_1 == False: |
---|
666 | return |
---|
667 | |
---|
668 | if ( self._multiple_frames ): |
---|
669 | try: |
---|
670 | workspaceNames = mtd[file_ws].getNames() |
---|
671 | except AttributeError: |
---|
672 | workspaceNames = [file_ws] |
---|
673 | else: |
---|
674 | workspaceNames = [file_ws] |
---|
675 | |
---|
676 | for wsName in workspaceNames: |
---|
677 | try: |
---|
678 | ws = mtd[wsName] |
---|
679 | except: |
---|
680 | continue |
---|
681 | ConvertUnits(InputWorkspace=ws,OutputWorkspace=ws,EMode='Indirect',Target='DeltaE_inWavenumber') |
---|
682 | |
---|
683 | def set_save_to_cm_1(self, save_to_cm_1): |
---|
684 | self._save_to_cm_1 = save_to_cm_1 |
---|
685 | |
---|
686 | class ConvertToEnergy(ReductionStep): |
---|
687 | """ |
---|
688 | """ |
---|
689 | _rebin_string = None |
---|
690 | _multiple_frames = False |
---|
691 | |
---|
692 | def __init__(self, MultipleFrames=False): |
---|
693 | super(ConvertToEnergy, self).__init__() |
---|
694 | self._rebin_string = None |
---|
695 | self._multiple_frames = MultipleFrames |
---|
696 | |
---|
697 | def execute(self, reducer, file_ws): |
---|
698 | if ( self._multiple_frames ): |
---|
699 | try: |
---|
700 | workspaces = mtd[file_ws].getNames() |
---|
701 | except AttributeError: |
---|
702 | workspaces = [file_ws] |
---|
703 | else: |
---|
704 | workspaces = [file_ws] |
---|
705 | |
---|
706 | for ws in workspaces: |
---|
707 | ConvertUnits(InputWorkspace=ws,OutputWorkspace= ws,Target= 'DeltaE',EMode= 'Indirect') |
---|
708 | CorrectKiKf(InputWorkspace=ws,OutputWorkspace= ws,EMode= 'Indirect') |
---|
709 | if self._rebin_string is not None: |
---|
710 | if not self._multiple_frames: |
---|
711 | Rebin(InputWorkspace=ws,OutputWorkspace= ws,Params= self._rebin_string) |
---|
712 | |
---|
713 | if self._multiple_frames: |
---|
714 | self._rebin_mf(workspaces) |
---|
715 | |
---|
716 | def set_rebin_string(self, value): |
---|
717 | if value is not None: |
---|
718 | self._rebin_string = value |
---|
719 | |
---|
720 | def _rebin_mf(self, workspaces): |
---|
721 | nbin = 0 |
---|
722 | rstwo = self._rebin_string.split(",") |
---|
723 | if len(rstwo) >= 5: |
---|
724 | rstwo = ",".join(rstwo[2:]) |
---|
725 | else: |
---|
726 | rstwo = self._rebin_string |
---|
727 | for ws in workspaces: |
---|
728 | nbins = mtd[ws].blocksize() |
---|
729 | if nbins > nbin: nbin = nbins |
---|
730 | for ws in workspaces: |
---|
731 | if (mtd[ws].blocksize() == nbin): |
---|
732 | Rebin(InputWorkspace=ws,OutputWorkspace= ws,Params= self._rebin_string) |
---|
733 | else: |
---|
734 | Rebin(InputWorkspace=ws,OutputWorkspace= ws,Params= rstwo) |
---|
735 | |
---|
736 | class DetailedBalance(ReductionStep): |
---|
737 | """ |
---|
738 | """ |
---|
739 | _temp = None |
---|
740 | _multiple_frames = False |
---|
741 | |
---|
742 | def __init__(self, MultipleFrames=False): |
---|
743 | super(DetailedBalance, self).__init__() |
---|
744 | self._temp = None |
---|
745 | self._multiple_frames = MultipleFrames |
---|
746 | |
---|
747 | def execute(self, reducer, file_ws): |
---|
748 | if self._temp is None: |
---|
749 | return |
---|
750 | |
---|
751 | correction = 11.606 / ( 2 * self._temp ) |
---|
752 | |
---|
753 | if ( self._multiple_frames ): |
---|
754 | workspaces = mtd[file_ws].getNames() |
---|
755 | else: |
---|
756 | workspaces = [file_ws] |
---|
757 | |
---|
758 | for ws in workspaces: |
---|
759 | ExponentialCorrection(InputWorkspace=ws,OutputWorkspace= ws,C0= 1.0,C1= correction, Operation="Multiply") |
---|
760 | |
---|
761 | def set_temperature(self, temp): |
---|
762 | self._temp = temp |
---|
763 | |
---|
764 | class Scaling(ReductionStep): |
---|
765 | """ |
---|
766 | """ |
---|
767 | _scale_factor = None |
---|
768 | _multiple_frames = False |
---|
769 | |
---|
770 | def __init__(self, MultipleFrames=False): |
---|
771 | super(Scaling, self).__init__() |
---|
772 | self._scale_factor = None |
---|
773 | self._multiple_frames = MultipleFrames |
---|
774 | |
---|
775 | def execute(self, reducer, file_ws): |
---|
776 | if self._scale_factor is None: # Scale factor is the default value, 1.0 |
---|
777 | return |
---|
778 | |
---|
779 | if ( self._multiple_frames ): |
---|
780 | workspaces = mtd[file_ws].getNames() |
---|
781 | else: |
---|
782 | workspaces = [file_ws] |
---|
783 | |
---|
784 | for ws in workspaces: |
---|
785 | Scale(InputWorkspace=ws,OutputWorkspace= ws,Factor= self._scale_factor, Operation="Multiply") |
---|
786 | |
---|
787 | def set_scale_factor(self, scaleFactor): |
---|
788 | self._scale_factor = scaleFactor |
---|
789 | |
---|
790 | class Grouping(ReductionStep): |
---|
791 | """This ReductionStep handles the grouping and renaming of the final |
---|
792 | workspace. In most cases, this will require a Rebin on the data. The option |
---|
793 | to do this is given in the ConvertToEnergy step. |
---|
794 | |
---|
795 | The step will use the following parameters on the workspace: |
---|
796 | * 'Workflow.GroupingMethod' - if this is set to Fixed, it indicates that |
---|
797 | the grouping is defined at an instrument level and this can not be |
---|
798 | altered by the user. In this case, the value given in the function |
---|
799 | set_grouping_policy() is ignored and an XML grouping file is created |
---|
800 | based on the string in |
---|
801 | * 'Workflow.FixedGrouping', which is of the form: "0-69,70-139" where the |
---|
802 | comma seperates a group, and the hyphen indicates a range. The numbers |
---|
803 | given are taken to be the workspace indices. |
---|
804 | |
---|
805 | If a masking list has been set using set_mask_list(), then the workspace |
---|
806 | indices listed will not be included in the group (if any grouping is in |
---|
807 | fact performed). |
---|
808 | """ |
---|
809 | _grouping_policy = None |
---|
810 | _masking_detectors = [] |
---|
811 | _result_workspaces = [] |
---|
812 | _multiple_frames = False |
---|
813 | |
---|
814 | def __init__(self, MultipleFrames=False): |
---|
815 | super(Grouping, self).__init__() |
---|
816 | self._grouping_policy = None |
---|
817 | self._masking_detectors = [] |
---|
818 | self._result_workspaces = [] |
---|
819 | self._multiple_frames = MultipleFrames |
---|
820 | |
---|
821 | def execute(self, reducer, file_ws): |
---|
822 | if ( self._multiple_frames ): |
---|
823 | try: |
---|
824 | workspaces = mtd[file_ws].getNames() |
---|
825 | except AttributeError: |
---|
826 | workspaces = [file_ws] |
---|
827 | else: |
---|
828 | workspaces = [file_ws] |
---|
829 | |
---|
830 | for ws in workspaces: |
---|
831 | if self._grouping_policy is not None: |
---|
832 | self._result_workspaces.append(self._group_data(ws)) |
---|
833 | else: |
---|
834 | try: |
---|
835 | group = mtd[ws].getInstrument().getStringParameter( |
---|
836 | 'Workflow.GroupingMethod')[0] |
---|
837 | except IndexError: |
---|
838 | group = 'User' |
---|
839 | if ( group == 'Fixed' ): |
---|
840 | self._result_workspaces.append(self._group_fixed(ws)) |
---|
841 | elif (group == 'File' ): |
---|
842 | self._grouping_policy = mtd[ws].getInstrument().getStringParameter( |
---|
843 | 'Workflow.GroupingFile')[0] |
---|
844 | self._result_workspaces.append(self._group_data(ws)) |
---|
845 | else: |
---|
846 | self._result_workspaces.append(self._group_data(ws)) |
---|
847 | |
---|
848 | def set_grouping_policy(self, value): |
---|
849 | self._grouping_policy = value |
---|
850 | |
---|
851 | def set_mask_list(self, value): |
---|
852 | self._masking_detectors = value |
---|
853 | |
---|
854 | def get_result_workspaces(self): |
---|
855 | return self._result_workspaces |
---|
856 | |
---|
857 | def _group_fixed(self, workspace): |
---|
858 | try: |
---|
859 | grps = mtd[workspace].getInstrument().getStringParameter( |
---|
860 | 'Workflow.FixedGrouping')[0] |
---|
861 | except IndexError: |
---|
862 | raise AttributeError('Could not retrieve fixed grouping setting ' |
---|
863 | 'from the instrument parameter file.') |
---|
864 | |
---|
865 | groups = grps.split(",") |
---|
866 | group_list = [] |
---|
867 | for group in groups: |
---|
868 | group_to_from = group.split("-") |
---|
869 | group_vals = range(int(group_to_from[0]), int(group_to_from[1])+1) |
---|
870 | group_list.append(group_vals) |
---|
871 | |
---|
872 | for i in self._masking_detectors: |
---|
873 | for grp in group_list: |
---|
874 | try: |
---|
875 | grp.remove(i) |
---|
876 | except ValueError: |
---|
877 | pass |
---|
878 | |
---|
879 | xml = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n" |
---|
880 | xml += "<detector-grouping>\n" |
---|
881 | for grp in group_list: |
---|
882 | xml += "<group name=\"group\">\n" |
---|
883 | xml += " <ids val=\"" |
---|
884 | for i in grp: |
---|
885 | xml += str(i + 1) |
---|
886 | if i != ( grp[len(grp)-1] ): |
---|
887 | xml += "," |
---|
888 | xml += "\"/>\n" |
---|
889 | xml += "</group>\n" |
---|
890 | xml += "</detector-grouping>\n" |
---|
891 | |
---|
892 | xfile = os.path.join(config.getString('defaultsave.directory'), 'fixedGrp.xml') |
---|
893 | file = open(xfile, 'w') |
---|
894 | file.write(xml) |
---|
895 | file.close() |
---|
896 | GroupDetectors(InputWorkspace=workspace,OutputWorkspace= workspace, MapFile=xfile, |
---|
897 | Behaviour='Average') |
---|
898 | return workspace |
---|
899 | |
---|
900 | def _group_data(self, workspace): |
---|
901 | grouping = self._grouping_policy |
---|
902 | if ( grouping == 'Individual' ) or ( grouping is None ): |
---|
903 | return workspace |
---|
904 | elif ( grouping == 'All' ): |
---|
905 | nhist = mtd[workspace].getNumberHistograms() |
---|
906 | wslist = [] |
---|
907 | for i in range(0, nhist): |
---|
908 | if i not in self._masking_detectors: |
---|
909 | wslist.append(i) |
---|
910 | GroupDetectors(InputWorkspace=workspace,OutputWorkspace= workspace, |
---|
911 | WorkspaceIndexList=wslist, Behaviour='Average') |
---|
912 | else: |
---|
913 | # Assume we have a grouping file. |
---|
914 | # First lets, find the file... |
---|
915 | if (os.path.isfile(grouping)): |
---|
916 | grouping_filename = grouping |
---|
917 | else: |
---|
918 | grouping_filename = os.path.join(config.getString('groupingFiles.directory'), |
---|
919 | grouping) |
---|
920 | # Final check that the Mapfile exists, if not don't run the alg. |
---|
921 | if os.path.isfile(grouping_filename): |
---|
922 | GroupDetectors(InputWorkspace=workspace,OutputWorkspace= workspace, MapFile=grouping_filename, |
---|
923 | Behaviour='Average') |
---|
924 | return workspace |
---|
925 | |
---|
926 | class SaveItem(ReductionStep): |
---|
927 | """This routine will save a given workspace in the selected file formats. |
---|
928 | The currently recognised formats are: |
---|
929 | * 'spe' - SPE ASCII format |
---|
930 | * 'nxs' - NeXus compressed file format |
---|
931 | * 'nxspe' - NeXus SPE file format |
---|
932 | * 'ascii' - Comma Seperated Values (file extension '.dat') |
---|
933 | * 'gss' - GSAS file format (N.B.: units will be converted to Time of |
---|
934 | Flight if not already in that unit for saving in this format). |
---|
935 | """ |
---|
936 | _formats = [] |
---|
937 | _save_to_cm_1 = False |
---|
938 | |
---|
939 | def __init__(self): |
---|
940 | super(SaveItem, self).__init__() |
---|
941 | self._formats = [] |
---|
942 | |
---|
943 | def execute(self, reducer, file_ws): |
---|
944 | naming = Naming() |
---|
945 | filename = naming._get_ws_name(file_ws) |
---|
946 | for format in self._formats: |
---|
947 | if format == 'spe': |
---|
948 | SaveSPE(InputWorkspace=file_ws,Filename= filename+'.spe') |
---|
949 | elif format == 'nxs': |
---|
950 | SaveNexusProcessed(InputWorkspace=file_ws,Filename= filename+'.nxs') |
---|
951 | elif format == 'nxspe': |
---|
952 | SaveNXSPE(InputWorkspace=file_ws,Filename= filename+'.nxspe') |
---|
953 | elif format == 'ascii': |
---|
954 | SaveAscii(InputWorkspace=file_ws,Filename= filename+'.dat') |
---|
955 | elif format == 'gss': |
---|
956 | ConvertUnits(InputWorkspace=file_ws,OutputWorkspace= "__save_item_temp",Target= "TOF") |
---|
957 | SaveGSS(InputWorkspace="__save_item_temp",Filename= filename+".gss") |
---|
958 | DeleteWorkspace(Workspace="__save_item_temp") |
---|
959 | elif format == 'aclimax': |
---|
960 | if (self._save_to_cm_1 == False): |
---|
961 | bins = '3, -0.005, 500' #meV |
---|
962 | else: |
---|
963 | bins = '24, -0.005, 4000' #cm-1 |
---|
964 | Rebin(InputWorkspace=file_ws,OutputWorkspace= file_ws + '_aclimax_save_temp',Params= bins) |
---|
965 | SaveAscii(InputWorkspace=file_ws + '_aclimax_save_temp',Filename= filename+ '_aclimax.dat', Separator='Tab') |
---|
966 | DeleteWorkspace(Workspace=file_ws + '_aclimax_save_temp') |
---|
967 | |
---|
968 | def set_formats(self, formats): |
---|
969 | self._formats = formats |
---|
970 | def set_save_to_cm_1(self, save_to_cm_1): |
---|
971 | self._save_to_cm_1 = save_to_cm_1 |
---|
972 | |
---|
973 | class Naming(ReductionStep): |
---|
974 | """Takes the responsibility of naming the results away from the Grouping |
---|
975 | step so that ws names are consistent right up until the last step. This |
---|
976 | uses the following instrument parameters: |
---|
977 | * 'Workflow.NamingConvention' - to decide how to name the result workspace. |
---|
978 | The default (when nothing is selected) is to use the run title. |
---|
979 | """ |
---|
980 | _result_workspaces = [] |
---|
981 | |
---|
982 | def __init__(self): |
---|
983 | super(Naming, self).__init__() |
---|
984 | self._result_workspaces = [] |
---|
985 | |
---|
986 | def execute(self, reducer, file_ws): |
---|
987 | wsname = self._get_ws_name(file_ws) |
---|
988 | RenameWorkspace(InputWorkspace=file_ws,OutputWorkspace= wsname) |
---|
989 | self._result_workspaces.append(wsname) |
---|
990 | |
---|
991 | def get_result_workspaces(self): |
---|
992 | return self._result_workspaces |
---|
993 | |
---|
994 | def _get_ws_name(self, workspace): |
---|
995 | try: |
---|
996 | type = mtd[workspace].getInstrument().getStringParameter( |
---|
997 | 'Workflow.NamingConvention')[0] |
---|
998 | except IndexError: |
---|
999 | type = 'RunTitle' |
---|
1000 | |
---|
1001 | if ( type == 'AnalyserReflection' ): |
---|
1002 | return self._analyser_reflection(workspace) |
---|
1003 | elif ( type == 'RunTitle' ): |
---|
1004 | return self._run_title(workspace) |
---|
1005 | else: |
---|
1006 | raise NotImplementedError('Unknown \'Workflow.NamingConvention\'' |
---|
1007 | ' parameter encountered on workspace: ' + workspace) |
---|
1008 | |
---|
1009 | def _run_title(self, workspace): |
---|
1010 | ws = mtd[workspace] |
---|
1011 | title = ws.getRun()['run_title'].value.strip() |
---|
1012 | runNo = ws.getRun()['run_number'].value |
---|
1013 | inst = ws.getInstrument().getName() |
---|
1014 | isn = config.getFacility().instrument(inst).shortName().upper() |
---|
1015 | valid = "-_.() %s%s" % (string.ascii_letters, string.digits) |
---|
1016 | title = ''.join(ch for ch in title if ch in valid) |
---|
1017 | title = isn + runNo + '-' + title |
---|
1018 | return title |
---|
1019 | |
---|
1020 | def _analyser_reflection(self, workspace): |
---|
1021 | if workspace == '': |
---|
1022 | return '' |
---|
1023 | ws = mtd[workspace] |
---|
1024 | ins = ws.getInstrument().getName() |
---|
1025 | ins = config.getFacility().instrument(ins).shortName().lower() |
---|
1026 | run = ws.getRun().getLogData('run_number').value |
---|
1027 | try: |
---|
1028 | analyser = ws.getInstrument().getStringParameter('analyser')[0] |
---|
1029 | reflection = ws.getInstrument().getStringParameter('reflection')[0] |
---|
1030 | except IndexError: |
---|
1031 | analyser = '' |
---|
1032 | reflection = '' |
---|
1033 | prefix = ins + run + '_' + analyser + reflection + '_red' |
---|
1034 | return prefix |
---|