Skip to content

Commit

Permalink
fix eval_f1 and qa error (#21)
Browse files Browse the repository at this point in the history
  • Loading branch information
xin3he committed Apr 7, 2022
1 parent afb84c5 commit d6e5219
Showing 1 changed file with 2 additions and 1 deletion.
3 changes: 2 additions & 1 deletion nlp_toolkit/optimization/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1004,6 +1004,7 @@ def autodistillation(self, teacher_model, model_builder=None, model_cls=None,
train_func=None, eval_func=None):
assert hasattr(self, "autodistillation_config"), "Must specify" + \
"Trainer.autodistillation_config before calling autodistillation."
self.evaluation_loop = self.auto_distil_evaluation_loop
if model_builder is None:
assert model_cls is not None, "Must specify model_cls to use the built-in " + \
"model_builder, e.g. model_cls=AutoModelForPreTraining, or you can use " + \
Expand Down Expand Up @@ -1111,7 +1112,7 @@ def model_builder_builtin(self, arch_paras=None, model_cls=None):
config.__setattr__('true_hidden_size', arch_paras[k])
return model_cls.from_config(config)

def evaluation_loop(
def auto_distil_evaluation_loop(
self,
dataloader: DataLoader,
description: str,
Expand Down

0 comments on commit d6e5219

Please sign in to comment.