Ticket #7238: ReduceMandi306.config

File ReduceMandi306.config, 6.0 KB (added by Dennis Mikkelson, 7 years ago)

Script configuration file for protein data, MANDI 306

Line 
1# Configuration file for ReduceOneSCD_Run.py and ReduceSCD_Parallel.py.
2#
3# Each line can either start with a comment, indicated by a '#' mark or start
4# with a parameter name and value, optionally followed by a comment.  ALL
5# parameters used by the script must be specified.  If a required parameter
6# is not specified, the script will terminate with a message indicating which
7# parameter was missing.
8#
9
10# ==========================================================================
11# Parameters needed by ReduceOneSCD_Run.py, to process ONE run.
12# ==========================================================================
13#
14instrument_name   MANDI                        # prefix for run file names
15
16#
17# Specify calibration file(s).  SNAP requires two calibration files, one
18# for each bank.  If the default detector position is to be used, specify
19# None as the calibration file name.
20#
21calibration_file_1  None
22calibration_file_2  None
23
24#
25# Set the data_directory to None to use findnexus to get the run file when
26# running this on the SNS systems.  On other systems, all of the input files
27# must be copied into one directory and that directory must be specified as
28# the data_directory
29#
30data_directory    None
31output_directory  /SNS/users/eu7/SCRIPT_TEST/
32
33#
34# Min & max tof determine the range of events loaded.
35# Max Q determines the range of Q values that will be mapped to
36# reciprocal space.
37# Min & max monitor tof determine the range of tofs integrated in the
38# monitor data to get the total monitor counts
39#
40min_tof          14738.6
41max_tof          31406.5
42max_Q            4
43
44monitor_index        0
45min_monitor_tof  16000
46max_monitor_tof  30000
47
48#
49# Specifiy a conventional cell type and centering.  If these are None, only
50# one .mat and .integrate file will be written for this run, and they will
51# be in terms of the Niggli reduced cell.  If these specifiy a valid
52# cell type and centering, an additional .mat and .integrate file will be
53# written for the specified cell_type and centering.  NOTE: If run in
54# parallel, the driving script will only read the Niggli version of the
55# .integrate file, and will combine, re-index and convert to a conventional
56# cell, so these can usually be left as None.
57#
58cell_type     Hexagonal
59centering     P
60
61#
62# Number of peaks to find, per run, both for getting the UB matrix,
63# AND to determine how many peaks are integrated, if peak positions are
64# NOT predicted.  NOTE: This number must be choosen carefully.  If too
65# many peaks are requested, find peaks will take a very long time and
66# the returned peaks will probably not even index, since most of them
67# will be "noise" peaks.  If too few are requested, then there will be
68# few peaks to be integrated, and the UB matrix may not be as accurate
69# as it should be for predicting peaks to integrate.
70#
71num_peaks_to_find  450
72
73#
74# min_d, max_d and tolerance control indexing peaks.  max_d is also
75# used to specify a threshold for the separation between peaks
76# returned by FindPeaksMD, so it should be specified somewhat larger
77# than the largest cell edge in the Niggli reduced cell for the
78# sample.
79#
80min_d        50
81max_d        120
82tolerance  0.12
83
84#
85# If predicted peak positions are to be integrated,
86# the integrate_predicted_peaks flag should be set to True and the range
87# of wavelengths and d-spacings must be specified
88#
89integrate_predicted_peaks   False
90min_pred_wl                 0.2
91max_pred_wl                 3.5
92min_pred_dspacing           0.2
93max_pred_dspacing           2.5
94
95#
96# Select only ONE of the following integration methods, by setting the
97# use_*****_integration flag True.
98#
99use_sphere_integration      True
100use_ellipse_integration     False
101use_fit_peaks_integration   False
102
103#
104# Specify sphere and ellipse integration control parameters. Check that these
105# are correct, if use_sphere_integration, or use_ellipse_integration is True.
106# Otherwise the values aren't used.
107#
108peak_radius             0.08       # for sphere integration only
109bkg_inner_radius        0.08       # for sphere or ellipse integration
110bkg_outer_radius        0.11       # for sphere or ellipse integration
111integrate_if_edge_peak  True       # for sphere integration only
112
113#
114# Specify ellispe integration control parameters
115#
116ellipse_region_radius   0.45
117ellipse_size_specified  False
118
119#
120# Specify fit peaks integration control parameters.  Check that these are
121# correct, if use_fit_peaks_integration = True.  Otherwise the values
122# aren't used.
123#
124rebin_step            -0.004
125preserve_events       True
126use_ikeda_carpenter   False
127n_bad_edge_pixels      10
128
129# ==========================================================================
130# Additional Parameters needed by ReduceSCD_Parallel.py, to process
131# multiple runs in parallel.
132# ==========================================================================
133#
134exp_name               SAPPHIRE_JUNE_SPHERE               
135reduce_one_run_script  ReduceOneSCD_Run.py
136
137#
138# Specify the run numbers that should be reduced.  This can be done on several
139# lines.  Each line must start with the parameter name run_nums and be followed
140# by a comma separated list of individual run numbers or ranges of run numbers.
141# A range of run numbers is specified by listing the first number and last
142# number in the range, separated by a colon.
143#
144run_nums  5637:5644
145
146#
147# Specify the slurm partion, or None to use local processes.  The parameter
148# max_processes controls the maximum number of processes that will be run
149# simultaneously locally, or that will be simultaneously submitted to slurm.
150# The value of max_processes should be choosen carefully with the size of the
151# system in mind, to avoid overloading the system.  Since the lower level
152# calculations are all multi-threaded, this should be substantially lower than
153# the total number of cores available.
154# All runs will be processed eventually.  If there are more runs than then
155# max_processes, as some processes finish, new ones will be started, until
156# all runs have been processed.
157#
158#slurm_queue_name    topazq
159slurm_queue_name    None
160max_processes       8
161