/
data_loader.py
160 lines (135 loc) · 5.59 KB
/
data_loader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
"""
data loader taking python shelve DB
NOTE: only works for training
"""
import gzip
import json
import math
import random
import shelve
import torch
from torch.utils.data import DataLoader, Sampler, Dataset
from torch.nn.utils.rnn import pad_sequence
from prepro_v4 import InputFeatures
class BucketSampler(Sampler):
def __init__(self, lens, bucket_size, batch_size,
droplast=False, shuffle=True):
self._lens = lens
self._batch_size = batch_size
self._bucket_size = bucket_size
self._droplast = droplast
self._shuf = shuffle
def __iter__(self):
ids = list(range(len(self._lens)))
if self._shuf:
random.shuffle(ids)
buckets = [sorted(ids[i:i+self._bucket_size],
key=lambda i: self._lens[i], reverse=True)
for i in range(0, len(ids), self._bucket_size)]
batches = [bucket[i:i+self._batch_size]
for bucket in buckets
for i in range(0, len(bucket), self._batch_size)]
if self._droplast:
batches = [batch for batch in batches
if len(batch) == self._batch_size]
if self._shuf:
random.shuffle(batches)
return iter(batches)
def __len__(self):
bucket_sizes = ([self._bucket_size]
* (len(self._lens) // self._bucket_size)
+ [len(self._lens) % self._bucket_size])
if self._droplast:
return sum(s//self._batch_size for s in bucket_sizes)
else:
return sum(math.ceil(s/self._batch_size) for s in bucket_sizes)
class GPT2FeatureDataset(Dataset):
def __init__(self, features, max_len=None):
self.features = features
self.max_len = max_len # this max_len do truncate
def __getitem__(self, i):
feat_dict = self.features[i]
if self.max_len is not None and feat_dict['input_len'] > self.max_len:
# tuncate on the left side (context)
feat_dict['input_ids'] = feat_dict['input_ids'][-self.max_len:]
feat_dict['position_ids'] = feat_dict['position_ids'][
-self.max_len:] # FIXME what is the correct behavior here?
feat_dict['token_type_ids'] = feat_dict['token_type_ids'][
-self.max_len:]
feat_dict['lm_labels'] = feat_dict['lm_labels'][-self.max_len:]
# import pdb; pdb.set_trace()
feat = InputFeatures(**feat_dict)
return feat
def __len__(self):
return len(self.features)
@staticmethod
def collate(features):
input_ids = pad_sequence([torch.tensor(f.input_ids, dtype=torch.long)
for f in features],
batch_first=True, padding_value=0)
position_ids = pad_sequence([torch.tensor(f.position_ids,
dtype=torch.long)
for f in features],
batch_first=True, padding_value=0)
token_type_ids = pad_sequence([torch.tensor(f.token_type_ids,
dtype=torch.long)
for f in features],
batch_first=True, padding_value=0)
labels = pad_sequence([torch.tensor(f.lm_labels, dtype=torch.long)
for f in features],
batch_first=True, padding_value=-1)
return (input_ids, position_ids, token_type_ids, labels)
class BucketingDataLoader(object):
def __init__(self, db_name, batch_size, max_seq_len,
bucket=100, shuffle=True):
self.db = shelve.open(f'{db_name}/db', 'r')
self.batch_size = batch_size
self.max_len = max_seq_len
self.bucket_size = bucket * batch_size
self.shuffle = shuffle
def _get_keys(self):
keys = list(self.db.keys())
return keys
def __iter__(self):
keys = self._get_keys()
if self.shuffle:
random.shuffle(keys)
for key in keys:
chunk = json.loads(gzip.decompress(self.db[key]).decode('utf-8'))
# discard long examples
trunc_chunk = []
lens = []
for feat in chunk:
if feat['input_len'] > self.max_len:
continue
trunc_chunk.append(feat)
lens.append(feat['input_len'])
dataset = GPT2FeatureDataset(trunc_chunk, self.max_len)
sampler = BucketSampler(lens, self.bucket_size, self.batch_size,
droplast=True, shuffle=self.shuffle)
loader = DataLoader(dataset, batch_sampler=sampler,
num_workers=0, # can test multi-worker
collate_fn=GPT2FeatureDataset.collate)
yield from loader
def __len__(self):
raise NotImplementedError()
def __del__(self):
self.db.close()
class DistributedBucketingDataLoader(BucketingDataLoader):
def __init__(self, rank, num_replica, *args, **kwargs):
super().__init__(*args, **kwargs)
self.rank = rank
self.num_replica = num_replica
def _get_keys(self):
keys = list(self.db.keys())[self.rank::self.num_replica]
return keys
def test(db_path):
import ipdb
device = torch.device('cuda')
loader = BucketingDataLoader(db_path, 32, 256)
for batch in loader:
ipdb.set_trace()
if __name__ == '__main__':
import sys
db_path = sys.argv[1]
test(db_path)