We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
import numpy as np from transformers import AutoTokenizer, AutoModelForCausalLM import shap import torch
tokenizer = AutoTokenizer.from_pretrained("gpt2", use_fast=True) model = AutoModelForCausalLM.from_pretrained("gpt2").cuda()
model.config.is_decoder=True
model.config.task_specific_params["text-generation"] = { "do_sample": True, "max_length": 50, "temperature": 0.7, "top_k": 50, "no_repeat_ngram_size": 2 } Define initial text s = ['I enjoy walking with my cute dog'] Create an explainer object explainer = shap.Explainer(model,tokenizer)
This is transformer model
When I am trying same with Tez model, its giving error.
class Classifier(tez.Model): def init(self, num_train_steps, num_classes): super().init() self.bert = transformers.SqueezeBertModel.from_pretrained("squeezebert/squeezebert-uncased") self.bert_drop = nn.Dropout(0.3) self.out = nn.Linear(768, num_classes) self.num_train_steps = num_train_steps self.step_scheduler_after = "batch"
def fetch_optimizer(self): param_optimizer = list(self.named_parameters()) no_decay = ["bias", "LayerNorm.bias"] optimizer_parameters = [ { "params": [ p for n, p in param_optimizer if not any(nd in n for nd in no_decay) ], "weight_decay": 0.001, }, { "params": [ p for n, p in param_optimizer if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, }, ] opt = AdamW(optimizer_parameters, lr=3e-5) return opt def fetch_scheduler(self): sch = get_linear_schedule_with_warmup( self.optimizer, num_warmup_steps=0, num_training_steps=self.num_train_steps ) return sch def loss(self, outputs, targets): if targets is None: return None return nn.BCEWithLogitsLoss()(outputs, targets.float()) def monitor_metrics(self, outputs, targets): if targets is None: return {} outputs = torch.sigmoid(outputs) outputs = outputs.cpu().detach().numpy() targets = targets.cpu().detach().numpy() fpr_micro, tpr_micro, _ = metrics.roc_curve(targets.ravel(), outputs.ravel()) auc_micro = metrics.auc(fpr_micro, tpr_micro) return {"auc": auc_micro} def forward(self, ids, mask, targets=None): o_2 = self.bert(ids, attention_mask=mask)[1] b_o = self.bert_drop(o_2) output = self.out(b_o) loss = self.loss(output, targets) acc = self.monitor_metrics(output, targets) return output, loss, acc
n_train_steps = 13565 model = Classifier(n_train_steps, 28) optimizer = model.fetch_optimizer() checkpoint = torch.load('C:/Users/Jay/Downloads/model (1).bin', map_location=torch.device('cpu')) model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer'])
explainer = shap.Explainer(model, tokenizer) shap_values = explainer('The waiter shook his head in horror and left.')
Error : TypeError: forward() missing 1 required positional argument: 'mask'
Can you please let me know how can we resolve this?
The text was updated successfully, but these errors were encountered:
No branches or pull requests
import numpy as np
from transformers import AutoTokenizer, AutoModelForCausalLM
import shap
import torch
tokenizer = AutoTokenizer.from_pretrained("gpt2", use_fast=True)
model = AutoModelForCausalLM.from_pretrained("gpt2").cuda()
set model decoder to true
model.config.is_decoder=True
set text-generation params under task_specific_params
model.config.task_specific_params["text-generation"] = {
"do_sample": True,
"max_length": 50,
"temperature": 0.7,
"top_k": 50,
"no_repeat_ngram_size": 2
}
Define initial text
s = ['I enjoy walking with my cute dog']
Create an explainer object
explainer = shap.Explainer(model,tokenizer)
This is transformer model
When I am trying same with Tez model, its giving error.
class Classifier(tez.Model):
def init(self, num_train_steps, num_classes):
super().init()
self.bert = transformers.SqueezeBertModel.from_pretrained("squeezebert/squeezebert-uncased")
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(768, num_classes)
self.num_train_steps = num_train_steps
self.step_scheduler_after = "batch"
n_train_steps = 13565
model = Classifier(n_train_steps, 28)
optimizer = model.fetch_optimizer()
checkpoint = torch.load('C:/Users/Jay/Downloads/model (1).bin', map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
explainer = shap.Explainer(model, tokenizer)
shap_values = explainer('The waiter shook his head in horror and left.')
Error : TypeError: forward() missing 1 required positional argument: 'mask'
Can you please let me know how can we resolve this?
The text was updated successfully, but these errors were encountered: