Skip to content

Commit

Permalink
Merge pull request #15 from haseeb-heaven/feat/support-gemma-groq
Browse files Browse the repository at this point in the history
Feat/support gemma groq
  • Loading branch information
haseeb-heaven committed Mar 17, 2024
2 parents 553ee1e + 0735433 commit 2e3b4fa
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 5 deletions.
1 change: 1 addition & 0 deletions README.md
Expand Up @@ -298,6 +298,7 @@ If you're interested in contributing to **Code-Interpreter**, we'd love to have
- **v2.0.1** - Added AnthropicAI Claude-2, Instant models.

🔥 **v2.1** - Added AnhtorpicAI Claude-3 models powerful _Opus,Sonnet,Haiku_ models.
- **v2.1.1** - Added **Groq-AI** Model _Gemma-7B_ with **700 Tokens/Sec**.

## 📜 **License**

Expand Down
17 changes: 17 additions & 0 deletions configs/groq-gemma.config
@@ -0,0 +1,17 @@
# The temperature parameter controls the randomness of the model's output. Lower values make the output more deterministic.
temperature = 0.1

# The maximum number of new tokens that the model can generate.
max_tokens = 8192

# The start separator for the generated code.
start_sep = ```

# The end separator for the generated code.
end_sep = ```

# If True, the first line of the generated text will be skipped.
skip_first_line = True

# The model used for generating the code.
HF_MODEL = groq-gemma
2 changes: 1 addition & 1 deletion interpreter.py
Expand Up @@ -24,7 +24,7 @@
from libs.utility_manager import UtilityManager

# The main version of the interpreter.
INTERPRETER_VERSION = "2.1"
INTERPRETER_VERSION = "2.1.1"

def main():
parser = argparse.ArgumentParser(description='Code - Interpreter')
Expand Down
12 changes: 8 additions & 4 deletions libs/interpreter_lib.py
Expand Up @@ -53,7 +53,7 @@ def initialize(self):
self.DISPLAY_CODE = self.args.display_code
self.INTERPRETER_MODEL = self.args.model if self.args.model else None
self.logger.info(f"Interpreter args model selected is '{self.args.model}")
self.logger.info(f"Interpreter model selected is '{self.INTERPRETER_MODEL}")
self.logger.info(f"Interpreter model selected is '{self.INTERPRETER_MODEL}'")
self.system_message = ""
self.INTERPRETER_MODE = 'code'

Expand Down Expand Up @@ -91,7 +91,7 @@ def initialize_client(self):
load_dotenv()
self.logger.info("Initializing Client")

self.logger.info(f"Interpreter model selected is '{self.INTERPRETER_MODEL}")
self.logger.info(f"Interpreter model selected is '{self.INTERPRETER_MODEL}'")
if self.INTERPRETER_MODEL is None or self.INTERPRETER_MODEL == "":
self.logger.info("HF_MODEL is not provided, using default model.")
config_file_name = f"configs/gpt-3.5-turbo.config" # Setting default model to GPT 3.5 Turbo.
Expand Down Expand Up @@ -225,7 +225,8 @@ def execute_last_code(self,os_name):

def generate_content(self,message, chat_history: list[tuple[str, str]], temperature=0.1, max_tokens=1024,config_values=None,image_file=None):
self.logger.info(f"Generating content with args: message={message}, chat_history={chat_history}, temperature={temperature}, max_tokens={max_tokens}, config_values={config_values}, image_file={image_file}")

self.logger.info(f"Interpreter model selected is '{self.INTERPRETER_MODEL}'")

# Use the values from the config file if they are provided
if config_values:
temperature = float(config_values.get('temperature', temperature))
Expand Down Expand Up @@ -300,6 +301,9 @@ def generate_content(self,message, chat_history: list[tuple[str, str]], temperat
elif 'groq-mixtral' in self.INTERPRETER_MODEL:
self.logger.info("Model is Groq/Mixtral.")
self.INTERPRETER_MODEL = "groq/mixtral-8x7b-32768"
elif 'groq-gemma' in self.INTERPRETER_MODEL:
self.logger.info("Model is Groq/Gemma.")
self.INTERPRETER_MODEL = "groq/gemma-7b-it"

response = litellm.completion(self.INTERPRETER_MODEL, messages=messages,temperature=temperature,max_tokens=max_tokens)
self.logger.info("Response received from completion function.")
Expand Down Expand Up @@ -697,7 +701,7 @@ def interpreter_main(self,version):

# Check if prompt contains any file uploaded by user.
extracted_file_name = self.utility_manager.extract_file_name(prompt)
self.logger.info(f"Input prompt extracted_name: '{extracted_file_name}'")
self.logger.info(f"Input prompt file name: '{extracted_file_name}'")

if extracted_file_name is not None:
full_path = self.utility_manager.get_full_file_path(extracted_file_name)
Expand Down

0 comments on commit 2e3b4fa

Please sign in to comment.