Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We鈥檒l occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added Claude-3 Models #9

Merged
merged 1 commit into from Mar 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 5 additions & 1 deletion README.md
Expand Up @@ -36,7 +36,8 @@ The distinguishing feature of this interpreter, as compared to others, is its **
- 馃捇 Support for more **Operating Systems**.
- 馃摑 Support for **Multi-Modal** for _Text_ and _Vision_.
- 馃搳 Support for **Google** and **OpenAI** Vision Models.
- Support for **Local** models via **LLM Studio**.
- 馃捇 ~~Support for **Local** models via **LLM Studio**.~~
- 馃敆 Support for **Multi-Modal** models from Anthropic AI.

## **Table of Contents**
- [Features](#馃専-features)
Expand Down Expand Up @@ -165,6 +166,7 @@ To use Code-Interpreter, use the following command options:
- `gemini-pro` - Generates code using the Gemini Pro model.
- `palm-2` - Generates code using the PALM 2 model.
- `claude-2` - Generates code using the AnthropicAI Claude-2 model.
- `claude-3` - Generates code using the AnthropicAI Claude-3 model.
- `groq-mixtral` - Generates code using the Groq Mixtral model.
- `groq-llama2` - Generates code using the Groq Llama2 model.
- `code-llama` - Generates code using the Code-llama model.
Expand Down Expand Up @@ -290,6 +292,8 @@ If you're interested in contributing to **Code-Interpreter**, we'd love to have

- **v2.0.1** - Added AnthropicAI Claude-2, Instant models.

馃敟 **v2.1** - Added AnhtorpicAI Claude-3 models powerful _Opus,Sonnet,Haiku_ models.

## 馃摐 **License**

This project is licensed under the **MIT License**. For more details, please refer to the LICENSE file.
Expand Down
17 changes: 17 additions & 0 deletions configs/claude-3-opus.config
@@ -0,0 +1,17 @@
# The temperature parameter controls the randomness of the model's output. Lower values make the output more deterministic.
temperature = 0.1

# The maximum number of new tokens that the model can generate.
max_tokens = 1024

# The start separator for the generated code.
start_sep = ```

# The end separator for the generated code.
end_sep = ```

# If True, the first line of the generated text will be skipped.
skip_first_line = True

# The model used for generating the code.
HF_MODEL = claude-3-opus
17 changes: 17 additions & 0 deletions configs/claude-3-sonnet.config
@@ -0,0 +1,17 @@
# The temperature parameter controls the randomness of the model's output. Lower values make the output more deterministic.
temperature = 0.1

# The maximum number of new tokens that the model can generate.
max_tokens = 1024

# The start separator for the generated code.
start_sep = ```

# The end separator for the generated code.
end_sep = ```

# If True, the first line of the generated text will be skipped.
skip_first_line = True

# The model used for generating the code.
HF_MODEL = claude-3-sonnet
2 changes: 1 addition & 1 deletion interpreter
Expand Up @@ -29,7 +29,7 @@ def main():
parser.add_argument('--save_code', '-s', action='store_true', default=False, help='Save the generated code')
parser.add_argument('--mode', '-md', choices=['code', 'script', 'command','vision','chat'], help='Select the mode (`code` for generating code, `script` for generating shell scripts, `command` for generating single line commands) `vision` for generating text from images')
parser.add_argument('--model', '-m', type=str, default='code-llama', help='Set the model for code generation. (Defaults to gpt-3.5-turbo)')
parser.add_argument('--version', '-v', action='version', version='%(prog)s 2.0')
parser.add_argument('--version', '-v', action='version', version='%(prog)s 2.1')
parser.add_argument('--lang', '-l', type=str, default='python', help='Set the interpreter language. (Defaults to Python)')
parser.add_argument('--display_code', '-dc', action='store_true', default=False, help='Display the code in output')
parser.add_argument('--history', '-hi', action='store_true', default=False, help='Use history as memory')
Expand Down
2 changes: 1 addition & 1 deletion interpreter.py
Expand Up @@ -28,7 +28,7 @@ def main():
parser.add_argument('--save_code', '-s', action='store_true', default=False, help='Save the generated code')
parser.add_argument('--mode', '-md', choices=['code', 'script', 'command','vision','chat'], help='Select the mode (`code` for generating code, `script` for generating shell scripts, `command` for generating single line commands) `vision` for generating text from images')
parser.add_argument('--model', '-m', type=str, default='code-llama', help='Set the model for code generation. (Defaults to gpt-3.5-turbo)')
parser.add_argument('--version', '-v', action='version', version='%(prog)s 2.0.1')
parser.add_argument('--version', '-v', action='version', version='%(prog)s 2.1')
parser.add_argument('--lang', '-l', type=str, default='python', help='Set the interpreter language. (Defaults to Python)')
parser.add_argument('--display_code', '-dc', action='store_true', default=False, help='Display the code in output')
parser.add_argument('--history', '-hi', action='store_true', default=False, help='Use history as memory')
Expand Down
41 changes: 35 additions & 6 deletions libs/interpreter_lib.py
Expand Up @@ -28,7 +28,7 @@
class Interpreter:
logger = None
client = None
interpreter_version = "2.0.1"
interpreter_version = "2.1"

def __init__(self, args):
self.args = args
Expand Down Expand Up @@ -157,11 +157,29 @@ def get_prompt(self,message: str, chat_history: List[dict]) -> str:
if chat_history or len(chat_history) > 0:
system_message += "\n\n" + "\n\n" + "This is user chat history for this task and make sure to use this as reference to generate the answer if user asks for 'History' or 'Chat History'.\n\n" + "\n\n" + str(chat_history) + "\n\n"

messages = [
{"role": "system", "content":system_message},
{"role": "assistant", "content": "Please generate code wrapped inside triple backticks known as codeblock."},
{"role": "user", "content": message}
]
# Use the Messages API from Anthropic.
if 'claude-3' in self.INTERPRETER_MODEL:
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": message
}
]
}
]

# Use the Assistants API.
else:
messages = [
{"role": "system", "content":system_message},
{"role": "assistant", "content": "Please generate code wrapped inside triple backticks known as codeblock."},
{"role": "user", "content": message}
]


return messages

def execute_last_code(self,os_name):
Expand Down Expand Up @@ -278,6 +296,17 @@ def generate_content(self,message, chat_history: list[tuple[str, str]], temperat
elif 'claude-2.1' in self.INTERPRETER_MODEL:
self.logger.info("Model is claude-2.1.")
self.INTERPRETER_MODEL = "claude-2.1"

# Support for Claude-3 Models
elif 'claude-3' in self.INTERPRETER_MODEL:

if 'claude-3-sonnet' in self.INTERPRETER_MODEL:
self.logger.info("Model is claude-3-sonnet.")
self.INTERPRETER_MODEL = "claude-3-sonnet-20240229"

elif 'claude-3-opus' in self.INTERPRETER_MODEL:
self.logger.info("Model is claude-3-opus.")
self.INTERPRETER_MODEL = "claude-3-opus-20240229"

response = litellm.completion(self.INTERPRETER_MODEL, messages=messages,temperature=temperature,max_tokens=max_tokens)
self.logger.info("Response received from completion function.")
Expand Down
10 changes: 10 additions & 0 deletions tests/test_interpreter.py
Expand Up @@ -62,6 +62,16 @@ def test_interpreter_claude_2_model(self):
args = Namespace(exec=True, save_code=True, mode='code', model='claude-2', display_code=True, lang='python')
interpreter = Interpreter(args)
self.assertEqual(interpreter.args.model, 'claude-2')

def test_interpreter_claude_3_opus_model(self):
args = Namespace(exec=True, save_code=True, mode='code', model='claude-3-opus', display_code=True, lang='python')
interpreter = Interpreter(args)
self.assertEqual(interpreter.args.model, 'claude-3-opus')

def test_interpreter_claude_3_sonnet_model(self):
args = Namespace(exec=True, save_code=True, mode='code', model='claude-3-sonnet', display_code=True, lang='python')
interpreter = Interpreter(args)
self.assertEqual(interpreter.args.model, 'claude-3-sonnet')

if __name__ == '__main__':
unittest.main()