Skip to content

Commit 0c919be

Browse files
more_changes
1 parent b771f5d commit 0c919be

19 files changed

+3170
-1703
lines changed

README.md

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,13 +4,17 @@
44
- Faster research and experimentation
55
- Based on [fastai](https://github.com/fastai)
66
- Not tooo high level and meant only for the "bit" advanced researcher
7+
- Many many convenience functions that one would need
78
- (This library is not stable right now. Everything works of course, but they will be changed and rearranged eventually)
89

10+
# Features
11+
- Check features.md for a full list
12+
913
## Documentation
10-
- Check demo.ipynb for initial example
11-
- There will be blogs on the topics that were required and [more here](https://www.subhadityamukherjee.me/)
14+
- Check the syntax folder for documentation
15+
- Check demos folder
1216
- [Function documentation](https://subhadityamukherjee.github.io/sprintdl/)
13-
- More will be added soon along with proper documentation
17+
- There will be blogs on the topics that were required and [more here](https://www.subhadityamukherjee.me/)
1418

1519
## Why
1620
- I loved fastai, but in many things didnt agree with the way they went. I also needed some extra features that might have not ever been approved

demos/.ipynb_checkpoints/ArtClass-checkpoint.ipynb

Lines changed: 0 additions & 845 deletions
This file was deleted.

demos/.ipynb_checkpoints/ArtClassification-checkpoint.ipynb

Lines changed: 1229 additions & 0 deletions
Large diffs are not rendered by default.

demos/ArtClass.ipynb

Lines changed: 0 additions & 845 deletions
This file was deleted.

demos/ArtClassification.ipynb

Lines changed: 1229 additions & 0 deletions
Large diffs are not rendered by default.

demos/ArtClassification.py

Lines changed: 150 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,150 @@
1+
# ---
2+
# jupyter:
3+
# jupytext:
4+
# text_representation:
5+
# extension: .py
6+
# format_name: light
7+
# format_version: '1.5'
8+
# jupytext_version: 1.10.1
9+
# kernelspec:
10+
# display_name: Python 3
11+
# language: python
12+
# name: python3
13+
# ---
14+
15+
# +
16+
# %load_ext autoreload
17+
# %autoreload 2
18+
19+
# %matplotlib inline
20+
21+
import os
22+
os.environ['TORCH_HOME'] = "/media/hdd/Datasets/"
23+
import sys
24+
sys.path.append("../")
25+
# -
26+
27+
from sprintdl.main import *
28+
from sprintdl.nets import *
29+
30+
device = torch.device('cuda',0)
31+
from torch.nn import init
32+
import torch
33+
import math
34+
35+
# # Define required
36+
37+
# +
38+
fpath = Path("/media/hdd/Datasets/ArtClass/")
39+
40+
tfms = [make_rgb, ResizeFixed(128), to_byte_tensor, to_float_tensor]
41+
bs = 256
42+
# -
43+
44+
# # Actual process
45+
46+
il = ImageList.from_files(fpath, tfms=tfms)
47+
48+
il
49+
50+
tm= Path("/media/hdd/Datasets/ArtClass/Unpopular/mimang.art/69030963_140928767119437_3621699865915593113_n.jpg")
51+
52+
str(tm).split("/")[-3]
53+
54+
sd = SplitData.split_by_func(il, partial(random_splitter, p_valid = .2))
55+
ll = label_by_func(sd, lambda x: str(x).split("/")[-3], proc_y=CategoryProcessor())
56+
57+
n_classes = len(set(ll.train.y.items))
58+
59+
data = ll.to_databunch(bs, c_in=3, c_out=2)
60+
61+
show_batch(data, 4)
62+
63+
# +
64+
lr = .001
65+
pct_start = 0.5
66+
phases = create_phases(pct_start)
67+
sched_lr = combine_scheds(phases, cos_1cycle_anneal(lr/10., lr, lr/1e5))
68+
sched_mom = combine_scheds(phases, cos_1cycle_anneal(0.95, 0.85, 0.95))
69+
70+
cbfs = [
71+
partial(AvgStatsCallback,accuracy),
72+
partial(ParamScheduler, 'lr', sched_lr),
73+
partial(ParamScheduler, 'mom', sched_mom),
74+
partial(BatchTransformXCallback, norm_imagenette),
75+
ProgressCallback,
76+
Recorder,
77+
# MixUp,
78+
partial(CudaCallback, device)]
79+
80+
loss_func=LabelSmoothingCrossEntropy()
81+
# arch = partial(xresnet34, n_classes)
82+
arch = get_vision_model("resnet34", n_classes=n_classes, pretrained=True)
83+
84+
# opt_func = partial(sgd_mom_opt, wd=0.01)
85+
opt_func = adam_opt(mom=0.9, mom_sqr=0.99, eps=1e-6, wd=1e-2)
86+
# opt_func = lamb
87+
# -
88+
89+
# # Training
90+
91+
clear_memory()
92+
93+
# learn = get_learner(nfs, data, lr, conv_layer, cb_funcs=cbfs)
94+
learn = Learner(arch, data, loss_func, lr=lr, cb_funcs=cbfs, opt_func=opt_func)
95+
96+
# +
97+
# model_summary(learn, data)
98+
# -
99+
100+
learn.fit(1)
101+
102+
save_model(learn, "m1", fpath)
103+
104+
# +
105+
temp = Path('/media/hdd/Datasets/ArtClass/Popular/artgerm/10004370_1657536534486515_1883801324_n.jpg')
106+
107+
get_class_pred(temp, learn ,ll, 128)
108+
# -
109+
110+
temp = Path('/home/eragon/Downloads/Telegram Desktop/IMG_1800.PNG')
111+
112+
get_class_pred(temp, learn ,ll,128)
113+
114+
temp = Path('/home/eragon/Downloads/Telegram Desktop/IMG_20210106_180731.jpg')
115+
116+
get_class_pred(temp, learn ,ll,128)
117+
118+
# # Digging in
119+
120+
# +
121+
# classification_report(learn, n_classes, device)
122+
# -
123+
124+
learn.recorder.plot_lr()
125+
126+
learn.recorder.plot_loss()
127+
128+
# # Model vis
129+
130+
run_with_act_vis(1, learn)
131+
132+
# # Multiple runs with model saving
133+
134+
dict_runner = {
135+
"xres18":[1, partial(xresnet18, c_out=n_classes)(), data, loss_func, .001, cbfs,opt_func],
136+
"xres34":[1, partial(xresnet34, c_out=n_classes)(), data, loss_func, .001, cbfs,opt_func],
137+
"xres50":[1, partial(xresnet50, c_out=n_classes)(), data, loss_func, .001, cbfs,opt_func],
138+
}
139+
140+
learn = Learner(arch(), data, loss_func, lr=lr, cb_funcs=cbfs, opt_func=opt_func)
141+
142+
multiple_runner(dict_runner, fpath)
143+
144+
145+
146+
147+
148+
149+
150+

demos/basicTrain.py

Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
# ---
2+
# jupyter:
3+
# jupytext:
4+
# text_representation:
5+
# extension: .py
6+
# format_name: light
7+
# format_version: '1.5'
8+
# jupytext_version: 1.10.1
9+
# kernelspec:
10+
# display_name: Python 3
11+
# language: python
12+
# name: python3
13+
# ---
14+
15+
# +
16+
# %load_ext autoreload
17+
# %autoreload 2
18+
19+
# %matplotlib inline
20+
21+
import sys
22+
sys.path.append("../")
23+
# -
24+
25+
from sprintdl.main import *
26+
from sprintdl.nets import *
27+
28+
device = torch.device('cuda',0)
29+
from torch.nn import init
30+
import torch
31+
import math
32+
33+
# # Define required
34+
35+
# +
36+
# x_train,y_train,x_valid,y_valid = get_mnist("/media/hdd/Datasets/imagenette2-160.tgz")
37+
38+
# +
39+
fpath = Path("/media/hdd/Datasets/imagenette2-160/")
40+
41+
tfms = [make_rgb, ResizeFixed(128), to_byte_tensor, to_float_tensor]
42+
bs = 256
43+
44+
45+
# -
46+
47+
# # Define model and data
48+
49+
# +
50+
def prev_pow_2(x): return 2**math.floor(math.log2(x))
51+
52+
def get_cnn_layers(data, nfs, layer, **kwargs):
53+
def f(ni, nf, stride=2): return layer(ni, nf, 3, stride=stride, **kwargs)
54+
l1 = data.c_in
55+
l2 = prev_pow_2(l1*3*3)
56+
layers = [f(l1 , l2 , stride=1),
57+
f(l2 , l2*2, stride=2),
58+
f(l2*2, l2*4, stride=2)]
59+
nfs = [l2*4] + nfs
60+
layers += [f(nfs[i], nfs[i+1]) for i in range(len(nfs)-1)]
61+
layers += [nn.AdaptiveAvgPool2d(1), Lambda(flatten),
62+
nn.Linear(nfs[-1], data.c_out)]
63+
return layers
64+
65+
def get_cnn_model(data, nfs, layer, **kwargs):
66+
return nn.Sequential(*get_cnn_layers(data, nfs, layer, **kwargs))
67+
68+
def get_learner(nfs, data, lr, layer, loss_func=F.cross_entropy,
69+
cb_funcs=None, opt_func=sgd_opt, **kwargs):
70+
model = get_cnn_model(data, nfs, layer, **kwargs)
71+
init_cnn(model)
72+
return Learner(model, data, loss_func, lr=lr, cb_funcs=cb_funcs, opt_func=opt_func)
73+
74+
75+
# +
76+
nfs = [64,64,128,256]
77+
# sched = combine_scheds([0.3, 0.7], [sched_cos(0.3, 0.6), sched_cos(0.6, 0.2)])
78+
# sched = combine_scheds([0.3, 0.7], [sched_cos(.1,.3), sched_cos(.3, 0.05)])
79+
# mnist_view = view_tfm(1,28,28)
80+
81+
lr = 1e-2
82+
pct_start = 0.5
83+
phases = create_phases(pct_start)
84+
sched_lr = combine_scheds(phases, cos_1cycle_anneal(lr/10., lr, lr/1e5))
85+
sched_mom = combine_scheds(phases, cos_1cycle_anneal(0.95, 0.85, 0.95))
86+
87+
cbfs = [
88+
partial(AvgStatsCallback,accuracy),
89+
partial(ParamScheduler, 'lr', sched_lr),
90+
partial(ParamScheduler, 'mom', sched_mom),
91+
partial(BatchTransformXCallback, norm_imagenette),
92+
ProgressCallback,
93+
Recorder,
94+
# MixUp,
95+
partial(CudaCallback, device)]
96+
97+
loss_func=LabelSmoothingCrossEntropy()
98+
arch = partial(xresnet18, c_out=10)
99+
epochs = 5
100+
lr = .4
101+
# opt_func = partial(sgd_mom_opt, wd=0.01)
102+
opt_func = adam_opt(mom=0.9, mom_sqr=0.99, eps=1e-6, wd=1e-2)
103+
# opt_func = lamb
104+
# -
105+
106+
# # Actual process
107+
108+
il = ImageList.from_files(fpath, tfms=tfms)
109+
110+
il
111+
112+
sd = SplitData.split_by_func(il, partial(grandparent_splitter, valid_name='val'))
113+
ll = label_by_func(sd, parent_labeler, proc_y=CategoryProcessor())
114+
115+
ll
116+
117+
data = ll.to_databunch(bs, c_in=3, c_out=10)
118+
119+
# # Training
120+
121+
# learn = get_learner(nfs, data, lr, conv_layer, cb_funcs=cbfs)
122+
learn = Learner(arch(), data, loss_func, lr=lr, cb_funcs=cbfs, opt_func=opt_func)
123+
124+
# +
125+
# model_summary(learn, data)
126+
# -
127+
128+
learn.fit(epochs)
129+
130+
# +
131+
# run_with_act_vis(epochs,learn)
132+
# -
133+
134+
# # Digging in
135+
136+
learn.avg_stats.valid_stats.avg_stats
137+
138+
learn.recorder.plot_lr()
139+
140+
learn.recorder.plot_loss()
141+
142+
143+
144+
145+
146+
147+
148+
149+
150+
151+
152+
153+
154+
155+
156+

0 commit comments

Comments
 (0)