-
Notifications
You must be signed in to change notification settings - Fork 8
/
run_HP.py
73 lines (64 loc) · 1.65 KB
/
run_HP.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import json
import os
import sys
hparams = dict(
lr=[0.01, 0.001, 0.0001],
# weight_decay=[0, 1e-4, 1e-3, 1e-2],
weight_decay=[0, 1e-5, 5e-5, 1e-4],
dropout=[0.1, 0.2, 0.5, 0.7],
epochs=[30, 50, 70],
dim_hidden=[128, 256, 512],
num_layers=[2, 4, 8],
# batch_size=[1000, 2000, 5000],
)
gpu = int(sys.argv[1])
model = sys.argv[2]
dataset = sys.argv[3]
num_steps = 5
if dataset in ["AmazonProducts", "Products"]:
num_steps = 30
def_config = dict(
cuda_num=gpu,
type_model=model,
dataset=dataset,
lr=0.01,
weight_decay=0.0,
dropout=0.2,
epochs=50,
dim_hidden=128,
num_layers=4,
batch_size=5000,
N_exp=3,
num_steps=num_steps,
)
def make_command_line(exp_name, **kwargs):
config = def_config.copy()
config.update(kwargs)
args = " ".join([f"--{k} {v}" for k, v in config.items()])
return "python main.py " + f"--resume --exp_name {exp_name} " + args
optim_config = {}
keys = [
"lr",
"weight_decay",
"dropout",
"epochs",
"dim_hidden",
"num_layers",
# "batch_size",
]
for k in keys:
best_idx, best_acc = -1, -1
for idx, p in enumerate(hparams[k]):
print(f"Running {model} {k}={p}")
fn = f"{model}_{k}_{p}"
args = optim_config.copy()
args[k] = p
cmd = make_command_line(fn, **args)
print(cmd)
os.system(cmd)
with open(os.path.join("./logs", dataset, fn + ".json"), "rb") as f:
perform = json.load(f)
if best_acc < perform["mean_test_acc"]:
best_acc = perform["mean_test_acc"]
best_idx = idx
optim_config[k] = hparams[k][best_idx]