Skip to content

Commit

Permalink
Mixed Precision to False
Browse files Browse the repository at this point in the history
  • Loading branch information
pascal-pfeiffer committed May 14, 2024
1 parent 0d2df5e commit cc8bce4
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 0 deletions.
1 change: 1 addition & 0 deletions tests/ui/llm_studio.feature
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ Feature: LLM Studio
Scenario: Create experiment
When I create experiment test-experiment
And I update LLM Backbone to h2oai/llama2-0b-unit-test
I set Mixed Precision to false
And I tweak data sampling to 0.5
And I tweak max length prompt to 128
And I tweak max length answer to 128
Expand Down
11 changes: 11 additions & 0 deletions tests/ui/llm_studio_page.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ class LLMStudioPage(BasePage):
MAX_LENGTH_ANSWER = "experiment/start/cfg/max_length_answer"
MAX_LENGTH = "experiment/start/cfg/max_length"
MAX_LENGTH_INFERENCE = "experiment/start/cfg/max_length_inference"
MIXED_PRECISION = "experiment/start/cfg/mixed_precision"
EXPERIMENT_REFRESH_SELECTOR = "experiment/list/refresh"
GPU_WARNING_SELECTOR = "experiment/start/error/proceed"

Expand Down Expand Up @@ -173,6 +174,16 @@ def experiment_name(self, name: str):
def llm_backbone(self, value: str):
self.page.get_by_role("combobox", name="LLM Backbone").fill(value)

def mixed_precision(self, value: bool):
old_toggle_value = self.get_by_test_id(self.MIXED_PRECISION).get_attribute(
"aria-checked"
)
assert old_toggle_value in ["true", "false"]
assert value in ["true", "false"]

if old_toggle_value != value:
self.get_by_test_id(self.MIXED_PRECISION).click()

def data_sample(self, value):
self.slider(self.DATA_SAMPLING, value)

Expand Down
5 changes: 5 additions & 0 deletions tests/ui/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,11 @@ def update_llm_backbone(llm_studio: LLMStudioPage, llm_backbone: str):
llm_studio.llm_backbone(llm_backbone)


@when(parsers.parse("I set Mixed Precision to {value}"))
def update_mixed_precision(llm_studio: LLMStudioPage, value: bool):
llm_studio.mixed_precision(value)


@when(parsers.parse("I tweak max length prompt to {value}"))
def tweak_max_length_prompt(llm_studio: LLMStudioPage, value: str):
llm_studio.max_length_prompt(value)
Expand Down

0 comments on commit cc8bce4

Please sign in to comment.