Skip to content

Commit 26e0783

Browse files
committed
init
0 parents  commit 26e0783

File tree

13 files changed

+704
-0
lines changed

13 files changed

+704
-0
lines changed

README.md

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# Pytorch Material Classification
2+
3+
4+
## Setup
5+
6+
### Prerequisites
7+
8+
- Ubuntu 18.04
9+
- Pytorch 1.1.0
10+
11+
### Getting Started
12+
13+
- Clone this repo:
14+
```bash
15+
git clone git@github.com:jiaxue1993/pytorch-material-classification.git
16+
cd pytorch-material-classification/
17+
```
18+
- Install [Pytorch-Encoding](https://github.com/zhanghang1989/PyTorch-Encoding)
19+
- Download the [database](https://drive.google.com/file/d/1Hd1G7aKhsPPMbNrk4zHNJAzoXvUzWJ9M/view?usp=sharing), release to dataset folder
20+
- Run the code by bash run.sh
21+
22+
## Datasets
23+
You can find the database [here](http://jiaxueweb.com/)
24+
25+
## Acknowledgement
26+
27+
This work was supported by National Science Foundation award IIS-1421134. A GPU used for this research was donated by the NVIDIA Corporation.

dataloader/__init__.py

Whitespace-only changes.

dataloader/dtd.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
import torch
2+
import torch.utils.data as data
3+
import torchvision
4+
from torchvision import transforms, datasets
5+
6+
from PIL import Image
7+
import os
8+
import os.path
9+
10+
11+
def find_classes(dir):
12+
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
13+
classes.sort()
14+
class_to_idx = {classes[i]: i for i in range(len(classes))}
15+
return classes, class_to_idx
16+
17+
18+
class Dataloder():
19+
def __init__(self, config):
20+
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
21+
std=[0.229, 0.224, 0.225])
22+
transform_train = transforms.Compose([
23+
transforms.Resize(256),
24+
transforms.RandomResizedCrop(224),
25+
transforms.RandomHorizontalFlip(),
26+
transforms.ToTensor(),
27+
normalize,
28+
])
29+
transform_test = transforms.Compose([
30+
transforms.Resize(256),
31+
transforms.CenterCrop(224),
32+
transforms.ToTensor(),
33+
normalize,
34+
])
35+
36+
trainset = datasets.ImageFolder(os.path.join(config.dataset_path, 'train'), transform_train)
37+
testset = datasets.ImageFolder(os.path.join(config.dataset_path, 'test'), transform_test)
38+
39+
kwargs = {'num_workers': 8, 'pin_memory': True}
40+
trainloader = torch.utils.data.DataLoader(trainset, batch_size=
41+
config.batch_size, shuffle=True, **kwargs)
42+
testloader = torch.utils.data.DataLoader(testset, batch_size=
43+
config.batch_size, shuffle=False, **kwargs)
44+
self.classes = trainset.classes
45+
self.trainloader = trainloader
46+
self.testloader = testloader
47+
48+
def getloader(self):
49+
return self.trainloader, self.testloader

dataloader/gtos.py

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
import torch
2+
import torch.utils.data as data
3+
import torchvision
4+
from torchvision import datasets, models, transforms
5+
6+
from PIL import Image
7+
import os
8+
import os.path
9+
10+
_imagenet_pca = {
11+
'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]),
12+
'eigvec': torch.Tensor([
13+
[-0.5675, 0.7192, 0.4009],
14+
[-0.5808, -0.0045, -0.8140],
15+
[-0.5836, -0.6948, 0.4203],
16+
])
17+
}
18+
19+
20+
class Dataloder():
21+
def __init__(self, config):
22+
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
23+
std=[0.229, 0.224, 0.225])
24+
transform_train = transforms.Compose([
25+
transforms.Resize(256),
26+
transforms.RandomResizedCrop(224),
27+
transforms.RandomHorizontalFlip(),
28+
transforms.ColorJitter(0.4,0.4,0.4),
29+
transforms.ToTensor(),
30+
Lighting(0.1, _imagenet_pca['eigval'], _imagenet_pca['eigvec']),
31+
normalize,
32+
])
33+
transform_test = transforms.Compose([
34+
transforms.Resize(256),
35+
transforms.CenterCrop(224),
36+
transforms.ToTensor(),
37+
normalize,
38+
])
39+
40+
data_dir = 'dataset/gtos-mobile'
41+
trainset = datasets.ImageFolder(os.path.join(data_dir, 'train'), transform_train)
42+
testset = datasets.ImageFolder(os.path.join(data_dir, 'test'), transform_test)
43+
44+
45+
kwargs = {'num_workers': 8, 'pin_memory': True} if config.cuda else {}
46+
trainloader = torch.utils.data.DataLoader(trainset, batch_size=
47+
config.batch_size, shuffle=True, **kwargs)
48+
testloader = torch.utils.data.DataLoader(testset, batch_size=
49+
config.batch_size, shuffle=False, **kwargs)
50+
51+
52+
self.trainloader = trainloader
53+
self.testloader = testloader
54+
self.classes = trainset.classes
55+
56+
def getloader(self):
57+
return self.classes, self.trainloader, self.testloader
58+
59+
60+
class Lighting(object):
61+
"""Lighting noise(AlexNet - style PCA - based noise)"""
62+
63+
def __init__(self, alphastd, eigval, eigvec):
64+
self.alphastd = alphastd
65+
self.eigval = eigval
66+
self.eigvec = eigvec
67+
68+
def __call__(self, img):
69+
if self.alphastd == 0:
70+
return img
71+
72+
alpha = img.new().resize_(3).normal_(0, self.alphastd)
73+
rgb = self.eigvec.type_as(img).clone()\
74+
.mul(alpha.view(1, 3).expand(3, 3))\
75+
.mul(self.eigval.view(1, 3).expand(3, 3))\
76+
.sum(1).squeeze()
77+
78+
return img.add(rgb.view(3, 1, 1).expand_as(img))
79+
80+
81+
if __name__ == "__main__":
82+
data_dir = 'dataset/gtos-mobile'
83+
trainset = datasets.ImageFolder(os.path.join(data_dir, 'train'))
84+
testset = datasets.ImageFolder(os.path.join(data_dir, 'test'))
85+
print(trainset.classes)
86+
print(len(testset))

dataloader/minc.py

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
import torch
2+
import torch.utils.data as data
3+
import torchvision
4+
from torchvision import transforms
5+
6+
from PIL import Image
7+
import os
8+
import os.path
9+
10+
11+
def find_classes(dir):
12+
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
13+
classes.sort()
14+
class_to_idx = {classes[i]: i for i in range(len(classes))}
15+
return classes, class_to_idx
16+
17+
18+
def make_dataset(filename, datadir, class_to_idx):
19+
images = []
20+
labels = []
21+
with open(os.path.join(filename), "r") as lines:
22+
for line in lines:
23+
_image = os.path.join(datadir, line.rstrip('\n'))
24+
_dirname = os.path.split(os.path.dirname(_image))[1]
25+
assert os.path.isfile(_image)
26+
label = class_to_idx[_dirname]
27+
images.append(_image)
28+
labels.append(label)
29+
30+
return images, labels
31+
32+
33+
class MINCDataloder(data.Dataset):
34+
def __init__(self, root, filename, transform=None):
35+
classes, class_to_idx = find_classes(root + '/images')
36+
self.classes = classes
37+
self.class_to_idx = class_to_idx
38+
self.transform = transform
39+
self.images, self.labels = make_dataset(filename, root,
40+
class_to_idx)
41+
assert (len(self.images) == len(self.labels))
42+
43+
def __getitem__(self, index):
44+
_img = Image.open(self.images[index]).convert('RGB')
45+
_label = self.labels[index]
46+
if self.transform is not None:
47+
_img = self.transform(_img)
48+
49+
return _img, _label
50+
51+
def __len__(self):
52+
return len(self.images)
53+
54+
55+
class Dataloder():
56+
def __init__(self, config):
57+
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
58+
std=[0.229, 0.224, 0.225])
59+
transform_train = transforms.Compose([
60+
transforms.Resize(256),
61+
transforms.RandomResizedCrop(224),
62+
transforms.RandomHorizontalFlip(),
63+
transforms.ToTensor(),
64+
normalize,
65+
])
66+
transform_test = transforms.Compose([
67+
transforms.Resize(256),
68+
transforms.CenterCrop(224),
69+
transforms.ToTensor(),
70+
normalize,
71+
])
72+
73+
trainset = MINCDataloder(config.dataset_path,
74+
config.train_source, transform=transform_train)
75+
testset = MINCDataloder(config.dataset_path,
76+
config.eval_source, transform=transform_test)
77+
78+
kwargs = {'num_workers': 0, 'pin_memory': True}
79+
trainloader = torch.utils.data.DataLoader(trainset, batch_size=
80+
config.batch_size, shuffle=True, **kwargs)
81+
testloader = torch.utils.data.DataLoader(testset, batch_size=
82+
config.batch_size, shuffle=False, **kwargs)
83+
self.classes = trainset.classes
84+
self.trainloader = trainloader
85+
self.testloader = testloader
86+
87+
def getloader(self):
88+
return self.trainloader, self.testloader

dataset/.gitkeep

Whitespace-only changes.

experiments/gtos.dep.R18/config.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
import os
2+
import os.path as osp
3+
import sys
4+
import time
5+
import numpy as np
6+
from easydict import EasyDict as edict
7+
import argparse
8+
9+
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
10+
11+
C = edict()
12+
config = C
13+
14+
C.seed = 0
15+
16+
"""please config ROOT_dir and user when u first using"""
17+
C.repo_name = 'Deep-Encoding-Pooling-Network-DEP-'
18+
C.dataset = 'GTOS'
19+
C.model = 'DEP'
20+
C.abs_dir = osp.realpath(".")
21+
C.this_dir = C.abs_dir.split(osp.sep)[-1]
22+
C.root_dir = C.abs_dir[:C.abs_dir.index(C.repo_name) + len(C.repo_name)]
23+
C.log_dir = osp.abspath(osp.join(C.root_dir, 'log', C.this_dir))
24+
C.snapshot_dir = osp.abspath(osp.join(C.log_dir, "snapshot"))
25+
26+
exp_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())
27+
C.log_file = C.log_dir + '/log_' + exp_time + '.log'
28+
C.link_log_file = C.log_file + '/log_last.log'
29+
C.val_log_file = C.log_dir + '/val_' + exp_time + '.log'
30+
C.link_val_log_file = C.log_dir + '/val_last.log'
31+
32+
"""Data Dir and Weight Dir"""
33+
C.dataset_path = "/home/jia/Downloads/database/minc-2500/"
34+
C.img_root_folder = C.dataset_path
35+
C.gt_root_folder = C.dataset_path
36+
37+
38+
"""Path Config"""
39+
40+
def add_path(path):
41+
if path not in sys.path:
42+
sys.path.insert(0, path)
43+
44+
45+
add_path(C.root_dir)
46+
47+
"""Train Config"""
48+
C.lr = 1e-2
49+
C.lr_decay = 40
50+
C.momentum = 0.9
51+
C.weight_decay = 1e-4
52+
C.batch_size = 64
53+
C.start_epoch = 1
54+
C.nepochs = 100
55+
56+
C.no_cuda = False
57+
C.resume = False
58+
C.momentum = 0.9
59+
C.weight_decay = 1e-4

0 commit comments

Comments
 (0)