-
Notifications
You must be signed in to change notification settings - Fork 28
/
Copy pathtrain_options.py
65 lines (51 loc) · 4.03 KB
/
train_options.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import argparse
import torch
def get_train_options(dataset_name):
train_parser = argparse.ArgumentParser(description='training parameter')
train_parser.add_argument('--clip', type=int, default=10, help='clipping of gradients')
train_parser.add_argument('--lr_scheduler_nstart', type=int, default=10, help='learning rate scheduler start epoch')
train_parser.add_argument('--print_every', type=int, default=1, help='output print of training')
train_parser.add_argument('--test_every', type=int, default=5, help='test during training after every n epoch')
"""Not used datasets"""
"""if dataset_name == 'cascaded_tank':
train_parser.add_argument('--n_epochs', type=int, default=500, help='number of epochs')
train_parser.add_argument('--init_lr', type=float, default=1e-3, help='initial learning rate')
train_parser.add_argument('--min_lr', type=float, default=1e-6, help='minimal learning rate')
train_parser.add_argument('--lr_scheduler_nepochs', type=float, default=4, help='check learning rater after') # 50/10
train_parser.add_argument('--lr_scheduler_factor', type=float, default=10, help='adapt learning rate by')
elif dataset_name == 'f16gvt':
train_parser.add_argument('--n_epochs', type=int, default=500, help='number of epochs')
train_parser.add_argument('--init_lr', type=float, default=1e-3, help='initial learning rate')
train_parser.add_argument('--min_lr', type=float, default=1e-6, help='minimal learning rate')
train_parser.add_argument('--lr_scheduler_nepochs', type=float, default=10/2, help='check learning rater after')
train_parser.add_argument('--lr_scheduler_factor', type=float, default=10, help='adapt learning rate by')"""
if dataset_name == 'narendra_li':
train_parser.add_argument('--n_epochs', type=int, default=750, help='number of epochs')
train_parser.add_argument('--init_lr', type=float, default=1e-3, help='initial learning rate')
train_parser.add_argument('--min_lr', type=float, default=1e-6, help='minimal learning rate')
train_parser.add_argument('--lr_scheduler_nepochs', type=float, default=10, help='check learning rater after')
train_parser.add_argument('--lr_scheduler_factor', type=float, default=10, help='adapt learning rate by')
elif dataset_name == 'toy_lgssm':
train_parser.add_argument('--n_epochs', type=int, default=750, help='number of epochs')
train_parser.add_argument('--init_lr', type=float, default=1e-3, help='initial learning rate')
train_parser.add_argument('--min_lr', type=float, default=1e-6, help='minimal learning rate')
train_parser.add_argument('--lr_scheduler_nepochs', type=float, default=10, help='check learning rater after')
train_parser.add_argument('--lr_scheduler_factor', type=float, default=10, help='adapt learning rate by')
elif dataset_name == 'wiener_hammerstein':
train_parser.add_argument('--n_epochs', type=int, default=750, help='number of epochs')
train_parser.add_argument('--init_lr', type=float, default=1e-3, help='initial learning rate')
train_parser.add_argument('--min_lr', type=float, default=1e-6, help='minimal learning rate')
train_parser.add_argument('--lr_scheduler_nepochs', type=float, default=20, help='check learning rater after')
train_parser.add_argument('--lr_scheduler_factor', type=float, default=10, help='adapt learning rate by')
# change batch size to higher value if trained on cuda device
if torch.cuda.is_available():
train_parser.add_argument('--batch_size', type=int, default=2048, help='batch size')
else:
train_parser.add_argument('--batch_size', type=int, default=128, help='batch size')
train_options = train_parser.parse_args()
return train_options
def get_test_options():
test_parser = argparse.ArgumentParser(description='testing parameter')
test_parser.add_argument('--batch_size', type=int, default=32, help='batch size') # 128
test_options = test_parser.parse_args()
return test_options