/
pseudo_label_stats.py
49 lines (39 loc) · 1.58 KB
/
pseudo_label_stats.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import argparse
import json
from sklearn.metrics import confusion_matrix, f1_score
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input-file", help="Input file")
args = parser.parse_args()
with open(args.input_file) as rf:
data = json.load(rf)
classes = [c[:9] for c in data["classes"]]
print(f"Data size: {len(data['data'])}")
for confidence_threshold in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
labels = [
sample["label"]
for sample in data["data"]
if sample["confidence"][sample["prediction"]] >= confidence_threshold
]
preds = [
sample["prediction"]
for sample in data["data"]
if sample["confidence"][sample["prediction"]] >= confidence_threshold
]
if not labels or not preds:
macro_f1 = "N/A"
micro_f1 = "N/A"
else:
macro_f1 = f1_score(labels, preds, average="macro")
micro_f1 = f1_score(labels, preds, average="micro")
print(
f"[Confidence {confidence_threshold}] Micro f1: {micro_f1}, Macro f1: {macro_f1}"
)
labels = [sample["label"] for sample in data["data"]]
preds = [sample["prediction"] for sample in data["data"]]
matrix = confusion_matrix(labels, preds)
name_string = "".join(f"{name:10s}" for name in classes)
print(f"{' '*10}{name_string}")
for name, row in zip(classes, matrix):
row_string = "".join(f"{str(v):10s}" for v in row)
print(f"{name:10s}{row_string}")