-
Notifications
You must be signed in to change notification settings - Fork 0
/
DTAnalysis.py
60 lines (50 loc) · 2.4 KB
/
DTAnalysis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import pandas as pd
from joblib import load
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from treeinterpreter import treeinterpreter as ti
import numpy as np
print("Loading data...")
X = pd.read_csv("MIArr.csv", header=None, dtype=int).to_numpy(dtype=int)
Y = pd.read_csv("MILabels.csv", header=None, dtype=int).to_numpy(dtype=int)
feats = pd.read_csv("FinalFeatureSetNN_RF.csv", header=None, dtype=str).to_numpy(dtype=str)
classes = ["Ramnit ","Lollipop","Kelihos.v3","Vundo","Simda","Tracur","Kelihos.v1","Obfuscator.ACY","Gatak"]
print("Data Loaded...")
#load model
clf = load('RF_CLF.joblib')
sampInd = 1557 #sample of interest Index, correspinds to sample 4WM7aZDLCmlosUBiqKOx in MIArr.csv
#find tree with highest predicted probability that X[sampInd] belongs to true class
max = 0
DTs = clf.estimators_
for i, dt in enumerate(DTs):
pred = dt.predict_proba(X[sampInd:sampInd+1])[0,Y[sampInd-1]]
if pred > max:
Mdt = dt
max =pred
#evaluate sample of interest with decision tree sub-classifier which predics highest probability of true class for sample of interest
prediction, bias, contributions = ti.predict(Mdt, X[sampInd:sampInd+1])
y_samp = Y[sampInd:sampInd+1][0][0]
print("Interpretation of sample: "+str(sampInd)+"(class = "+str(y_samp)+")")
print("Predicted probabilities: "+str(prediction[0]))
y_pred = np.argmax(prediction)
print("Predicted class = "+str(y_pred+1))
#print contributions of nodes to sample predicition according to tree interpreter
#print only major contributions
print("Major contributions to prediction(≥ 0.05%):")
print("Bias:\t"+str(round(bias[0][y_pred], 4)))
sum = round(bias[0][y_pred], 4)
for i in range(len(feats)):
if(contributions[0][i][y_pred]>=0.05):
print(str(feats[i])+":\t"+str(round(contributions[0][i][y_pred], 4)))
sum+=round(contributions[0][i][y_pred], 4)
print("features above responsible for: ", 1 - (prediction[0][y_pred] - sum),"of predicted probability\n\n")
#ask user if they want to see all contributions
ans = input("See full feature contribution list?[Y/N]:")
if ans == "Y":
print("All contributions to prediction:")
print("Bias:\t"+str(bias[0][y_pred]))
for i in range(len(feats)):
if(contributions[0][i][y_pred]>0):
print(str(feats[i])+":\t"+str(contributions[0][i][y_pred]))
print("All other features have 0 importance")