/
page_classifier.py
148 lines (113 loc) · 5.2 KB
/
page_classifier.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
# -*- coding: utf-8 -*-
import click
import random
import numpy as np
from collections import Counter
from keras.preprocessing.sequence import pad_sequences
from sklearn.utils.class_weight import compute_class_weight
from utils.tokenizer import DefaultTokenizer
_tokenizer = DefaultTokenizer()
def build_dataset(dataset, page_db, entity_db, category, text_len, min_text_len,
entity_len, min_entity_len, abstract_only, random_seed,
min_word_count, min_entity_count, in_links, out_links,
dev_size, test_size, **kwargs):
random.seed(random_seed)
np.random.seed(random_seed)
if category == 'pro':
kb_data = dataset.profession_kb
click.echo('Category: Profession')
elif category == 'nat':
kb_data = dataset.nationality_kb
click.echo('Category: Nationality')
else:
raise RuntimeError('Unsupported category: %s' % category)
target_data = [(title, types[0]) for (title, types) in kb_data
if len(types) == 1]
word_counter = Counter()
entity_counter = Counter()
click.echo('Building vocabulary...')
with click.progressbar(page_db.keys()) as bar:
for key in bar:
title = key.decode('utf-8')
title = entity_db.resolve_redirect(title)
words = _tokenizer.tokenize(page_db.get_text(title, abstract_only))
words = words[:text_len]
entities = set([l.title for l in page_db.get_links(title, in_links, out_links, abstract_only)])
word_counter.update(w.lower() for w in words)
entity_counter.update(entities)
word_dic = {
k: n for (n, k) in enumerate([w for (w, c) in word_counter.iteritems() if c >= min_word_count], 1)
}
entity_dic = {
k: n for (n, k) in enumerate([e for (e, c) in entity_counter.iteritems() if c >= min_entity_count], 1)
}
click.echo('Building dataset...')
type_list = list(set([t for (_, t) in target_data]))
type_index = {t: n for (n, t) in enumerate(type_list)}
words_arr = []
entities_arr = []
label_names = []
titles = []
feature_options = dict(in_links=in_links, out_links=out_links,
abstract_only=abstract_only)
with click.progressbar(target_data) as bar:
for (n, (title, type_name)) in enumerate(bar):
title = entity_db.resolve_redirect(title)
ret = generate_features(title, page_db, word_dic, entity_dic,
**feature_options)
if ret is None:
continue
(word_f, entity_f) = ret
if len(word_f) <= min_text_len:
continue
if len(entity_f) <= min_entity_len:
continue
words_arr.append(word_f)
entities_arr.append(entity_f)
label_names.append(type_name)
titles.append(title)
titles = np.array(titles)
words_arr = pad_sequences(words_arr, maxlen=text_len, dtype='int32')
entities_arr = pad_sequences(entities_arr, maxlen=entity_len, dtype='int32')
labels = np.zeros((len(label_names), len(type_list)), dtype='int32')
for (n, label_name) in enumerate(label_names):
labels[n][type_index[label_name]] = 1
dev_count = int(float(len(words_arr)) * dev_size)
click.echo('dev size: %d' % dev_count)
test_count = int(float(len(words_arr)) * test_size)
click.echo('test size: %d' % test_count)
indices = set(range(len(words_arr)))
data = {}
titles_data = {}
if dev_count > 0:
dev_indices = random.sample(indices, dev_count)
data['dev'] = (words_arr[dev_indices], entities_arr[dev_indices],
labels[dev_indices])
titles_data['dev'] = titles[dev_indices]
indices -= set(dev_indices)
if test_count > 0:
test_indices = random.sample(indices, test_count)
data['test'] = (words_arr[test_indices], entities_arr[test_indices],
labels[test_indices])
titles_data['test'] = titles[test_indices]
indices -= set(test_indices)
train_indices = list(indices)
data['train'] = (words_arr[train_indices], entities_arr[train_indices],
labels[train_indices])
titles_data['train'] = titles[train_indices]
class_weight = compute_class_weight('balanced', np.array(type_list),
[t for (_, t) in target_data])
class_weight = {i: w for (i, w) in enumerate(class_weight)}
return dict(data=data, titles=titles_data, type_list=type_list,
word_dic=word_dic, entity_dic=entity_dic, category=category,
class_weight=class_weight, feature_options=feature_options)
def generate_features(title, page_db, word_dic, entity_dic, in_links=True,
out_links=True, abstract_only=False):
try:
text = page_db.get_text(title, abstract_only)
links = set([l.title for l in page_db.get_links(title, in_links, out_links, abstract_only)])
except KeyError:
return None
word_f = [word_dic[w.lower()] for w in _tokenizer.tokenize(text) if w.lower() in word_dic]
entity_f = [entity_dic[e] for e in links if e in entity_dic]
return (word_f, entity_f)