-
Notifications
You must be signed in to change notification settings - Fork 11
/
Copy pathsweep.py
100 lines (85 loc) · 3.86 KB
/
sweep.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import argparse
from data.dataloader import MyDataLoader
from models.model import HyperFuseNet
from training import Trainer
import wandb
import numpy as np
import torch
import random
from multiprocessing import cpu_count
def tuning():
# Set seed
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# Set number of classes
num_classes = 3
wandb.init(project="MHyEEG")
epochs = wandb.config.epochs
dropout_rate = wandb.config.dropout_rate
lr = wandb.config.lr
max_lr = 10 * lr
min_mom = wandb.config.min_mom
max_mom = wandb.config.max_mom
train_loader, eval_loader, sample_weights = MyDataLoader(train_file=args.train_file_path,
test_file=args.test_file_path,
batch_size=args.batch_size,
num_workers=n_workers)
net = HyperFuseNet(n=args.n, dropout_rate=dropout_rate)
wandb.config.update({"max_lr": max_lr, 'sample_weights': sample_weights.tolist()})
wandb.config.update(args) # to also log args
wandb.watch(net)
# Count NN parameters
params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print(f'Number of parameters:', params)
print()
# Initialize optimizers
optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=args.weight_decay, eps=1e-7)
# Train/Evaluate model
trainer = Trainer(net, optimizer, epochs=epochs,
use_cuda=args.cuda, gpu_num=args.gpu_num,
checkpoint_folder=args.checkpoint_folder,
max_lr=max_lr, min_mom=min_mom,
max_mom=max_mom, l1_reg=args.l1_reg,
num_classes=num_classes,
sample_weights=sample_weights)
trainer.train(train_loader, eval_loader)
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--train_file_path', type=str, help='Path to training .pt file')
parser.add_argument('--test_file_path', type=str, help='Path to test .pt file')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--num_workers', default=1, help="Number of workers, 'max' for maximum number")
parser.add_argument('--cuda', type=bool, default=True)
parser.add_argument('--gpu_num', type=int, default=0)
parser.add_argument('--n', type=int, default=4, help="n parameter for PHM layers")
parser.add_argument('--l1_reg', type=bool, default=False)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--checkpoint_folder', type=str, default='checkpoints')
parser.add_argument('--label_kind', type=str, default='Arsl', help="Choose valence (Vlnc) or arousal (Arsl) label")
args = parser.parse_args()
seed = args.seed
n_workers = args.num_workers
if n_workers == 'max':
n_workers = cpu_count() # get the count of the number of CPUs in your system
sweep_configuration = {
'method': 'bayes',
'metric': {'goal': 'minimize', 'name': 'val_loss'},
'parameters':
{
'dropout_rate': {'min': 0.125, 'max': 0.4},
'epochs': {'values': [50, 60, 70]},
'lr': {'min': 0.001, 'max': 0.008},
'min_mom': {'min': 0.75, 'max': 0.89}, # min momentum in one cycle policy, in Adam case mom=beta 1
'max_mom': {'min': 0.90, 'max': 0.99}, # max momentum in one cycle policy, in Adam case mom=beta 1
'label': {'value': args.label_kind}
}
}
sweep_id = wandb.sweep(sweep=sweep_configuration, project='MHyEEG')
wandb.agent(sweep_id, function=tuning, count=3, project='MHyEEG')