forked from jchibane/srf
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig_loader.py
executable file
·307 lines (289 loc) · 10.9 KB
/
config_loader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def config_parser():
import configargparse
parser = configargparse.ArgumentParser()
parser.add_argument('--config',
is_config_file=True,
default='./configs/train_DTU.txt',
help='config file path')
parser.add_argument("--expname", type=str, help='experiment name')
parser.add_argument("--basedir",
type=str,
default='./logs/',
help='where to store ckpts and logs')
parser.add_argument("--datadir",
type=str,
default='./data/DTU_MVS/',
help='input data directory')
parser.add_argument(
"--num_workers",
type=int,
default=8,
help='Number of worker processes preparing input data for SRF.'
'The larger the better, but should not exceed the number of available CPUs.'
)
# architecture options
parser.add_argument("--reduce_features",
action='store_true',
help='Reduce feature sizes and number of features. No compression by default.')
parser.add_argument("--no_compression",
action='store_true',
help='Disable compression')
parser.add_argument("--intermediate_feature_size",
type=int,
default=256,
help="Intermediate compression size")
parser.add_argument("--compressed_feature_size",
type=int,
default=128,
help="Final compression size")
parser.add_argument("--num_attn_heads",
type=int,
default=8,
help="Number of attention heads")
parser.add_argument("--num_transformer_layers",
type=int,
default=2,
help="Number of transformer layers")
parser.add_argument("--disable_pos_encoding",
action='store_true',
help="Disable positional encoding")
# pretraining and freezing options
parser.add_argument("--pretrained_path",
type=str,
default=None,
help='path to pretrained model weights')
parser.add_argument(
"--cnn_weight_path",
type=str,
default=None,
help=
'path to pretrained cnn weights. setting this will override pretrained model weights.'
)
parser.add_argument("--freeze_cnn",
action='store_true',
help='whether to freeze cnn weights')
# training options
parser.add_argument(
"--shuffle_combis",
action='store_true',
help=
'Do we want to randomly sample similarity combinations in SRF? True, if set, False else.'
)
parser.add_argument("--fine_tune",
type=str,
default=None,
help='scan to fine tune to')
parser.add_argument("--lrate_decay_off",
action='store_true',
help='turn off lrate decay')
parser.add_argument("--split",
type=str,
default='split.pkl',
help='name of split file')
parser.add_argument("--model",
type=str,
default='model1',
help='the neural model to use')
parser.add_argument(
"--fine_model_duplicate",
action='store_true',
help=
'if true use the same model for fine and coarse hierarchical sampling')
parser.add_argument(
"--N_rand",
type=int,
default=32 * 32 * 4,
help='batch size (number of random rays per gradient step)')
parser.add_argument("--lrate",
type=float,
default=5e-4,
help='learning rate')
parser.add_argument("--lrate_decay",
type=int,
default=250,
help='exponential learning rate decay (in 1000 steps)')
parser.add_argument("--no_reload",
action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument(
"--ckpt_path",
type=str,
default=None,
help='Specific NN parameter file (npy) to reload for the network.'
'Given string is appended to the experiments folder path.')
parser.add_argument(
"--ckpt_expname",
type=str,
default=None,
help=
'Experiment name to load checkpoint and render from. If not given, the experiment from the config file is used.'
)
parser.add_argument("--batch_size",
type=int,
default=1,
help='number of training scenes used per batch')
parser.add_argument("--precrop_iters",
type=int,
default=0,
help='number of steps to train on central crops')
parser.add_argument("--precrop_frac",
type=float,
default=.5,
help='fraction of img taken for central crops')
parser.add_argument(
"--sigmoid",
action='store_true',
help=
'if true, use sigmoid to activate raw predicion with sigmoid, relu else'
)
parser.add_argument(
"--num_reference_views",
type=int,
default=10,
help=
'number of reference views given to the network as input to predict a novel view'
)
# rendering options
parser.add_argument('--video', dest='video', action='store_true')
parser.set_defaults(video=False)
parser.add_argument(
"--N_rays_test",
type=int,
default=128,
help='number of rays considered par batch in test mode')
parser.add_argument("--N_samples",
type=int,
default=64,
help='number of coarse samples per ray')
parser.add_argument("--N_importance",
type=int,
default=0,
help='number of additional fine samples per ray')
parser.add_argument("--perturb",
type=float,
default=1.,
help='set to 0. for no jitter, 1. for jitter')
parser.add_argument(
"--raw_noise_std",
type=float,
default=0.,
help=
'std dev of noise added to regularize sigma_a output, 1e0 recommended')
parser.add_argument(
"--render_factor",
type=int,
default=4,
help='Downsampling factor to speed up test time rendering of images. '
'1 is full size, set to 4 or 8 for faster preview.')
parser.add_argument(
"--vis_render_factor",
type=int,
default=16,
help=
'Downsampling factor to speed up training visualization rendering of images. '
'1 is full size, set to 16 and above to avoid memory problems.')
# dataset options
parser.add_argument("--dataset_type",
type=str,
default='DTU',
help='options: DTU')
parser.add_argument("--near",
type=int,
default=None,
help='near clipping plane location')
parser.add_argument("--far",
type=int,
default=None,
help='far clipping plane location')
## generation options
parser.add_argument(
"--eval",
action='store_true',
help='turn on eval mode - render images from eval poses')
parser.add_argument(
"--generate_specific_samples",
nargs='+',
type=str,
default=None,
help=
'Visualize specific samples during generation and for visualizing training progress.'
)
parser.add_argument(
"--gen_pose",
nargs='+',
type=int,
default=None,
help=
'List index of pose to generate. Where the list of poses is provided by the dataset '
'specific class and represents a camera path.')
parser.add_argument(
"--fixed_batch",
type=int,
default=0,
help=
'A fixed batch of input reference images is loaded. Inputs are divided into batches'
'of size num_reference_views keeping their ordering in the split file.'
)
parser.add_argument(
"--white_bkgd",
action='store_true',
help=
'set to render synthetic data on a white bkgd (always use for dvoxels)'
)
parser.add_argument(
"--half_res",
action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
parser.add_argument(
"--no_ndc",
action='store_true',
help=
'use normalized device coordinates (set for non-forward facing scenes)'
)
# this is set false in all cases in the paper
parser.add_argument(
"--lindisp",
action='store_true',
help='sampling linearly in disparity rather than depth')
# logging/saving options
parser.add_argument("--i_print",
type=int,
default=100,
help='frequency of console printout and metric loggin')
parser.add_argument(
"--i_val_fine_tune",
type=int,
default=200,
help='frequency of val loss computation in fine tuning mode')
parser.add_argument("--i_saveckpt",
type=int,
default=1000,
help='frequency of weight ckpt saving')
parser.add_argument("--i_no_val",
action='store_true',
help='turn off validation computation')
parser.add_argument("--i_testset",
type=int,
default=1000,
help='frequency of testset saving')
parser.add_argument("--i_validation_loss",
type=int,
default=500,
help='frequency of val loss computation')
# gpu options
from torch.cuda import device_count
parser.add_argument("--n_gpus",
type=int,
default=device_count(),
help='number of gpus to use')
parser.add_argument(
"--no_parallel",
action='store_true',
help='do not use GPU parallelization, overrides n_gpus')
return parser
def get_config():
parser = config_parser()
cfg = parser.parse_args()
if cfg.gen_pose is None:
cfg.gen_pose = [0]
return cfg