Skip to content

Commit

Permalink
Add GPT-3.5 and GPT-4 support.
Browse files Browse the repository at this point in the history
  • Loading branch information
lgrammel committed Mar 17, 2023
1 parent 6296a33 commit b6e11de
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 18 deletions.
10 changes: 10 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,15 @@
# Changelog

## 1.14.0 - 2023-03-17

### Added

- GPT-3.5-Turbo and GPT-4 support. GPT-3.5-Turbo is the default. You can change to GPT-4 in the settings (you need to be in the OpenAI GPT-4 beta for it to work).

### Removed

- text-davinci-003 support.

## 1.13.0 - 2023-03-10

### Added
Expand Down
16 changes: 15 additions & 1 deletion app/vscode/asset/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"publisher": "rubberduck",
"name": "rubberduck-vscode",
"version": "1.13.0",
"version": "1.14.0",
"displayName": "Rubberduck - ChatGPT for Visual Studio Code",
"description": "Generate code, edit code, explain code, generate tests, find bugs, diagnose errors, and even create your own conversation templates.",
"keywords": [
Expand Down Expand Up @@ -217,6 +217,20 @@
"default": "https://api.openai.com/v1/",
"markdownDescription": "Specify the URL to the OpenAI API. If you are using a proxy, you can set it here.",
"scope": "application"
},
"rubberduck.model": {
"type": "string",
"default": "gpt-3.5-turbo",
"enum": [
"gpt-3.5-turbo",
"gpt-4"
],
"enumDescriptions": [
"Faster, less expensive model. Less accurate.",
"Expensive, slow model. More accurate. Requires beta access (OpenAI)."
],
"markdownDescription": "Select the OpenAI model that you want to use.",
"scope": "application"
}
}
},
Expand Down
16 changes: 14 additions & 2 deletions lib/extension/src/conversation/Conversation.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import { webviewApi } from "@rubberduck/common";
import Handlebars from "handlebars";
import * as vscode from "vscode";
import zod from "zod";
import { DiffEditor } from "../diff/DiffEditor";
import { DiffEditorManager } from "../diff/DiffEditorManager";
import { OpenAIClient } from "../openai/OpenAIClient";
Expand Down Expand Up @@ -171,8 +172,19 @@ export class Conversation {
});
}

const completion = await this.openAIClient.generateCompletion({
prompt: await this.evaluateTemplate(prompt.template, variables),
// retrieve vscode setting rubberduck.model
const model = zod
.enum(["gpt-4", "gpt-3.5-turbo"])
.parse(vscode.workspace.getConfiguration("rubberduck").get("model"));

const completion = await this.openAIClient.generateChatCompletion({
messages: [
{
role: "user",
content: await this.evaluateTemplate(prompt.template, variables),
},
],
model,
maxTokens: prompt.maxTokens,
stop: prompt.stop,
temperature: prompt.temperature,
Expand Down
35 changes: 20 additions & 15 deletions lib/extension/src/openai/OpenAIClient.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,19 @@ export function getVSCodeOpenAIBaseUrl(): string {
.get("baseUrl", "https://api.openai.com/v1/");
}

const completionStreamSchema = zod.object({
const chatCompletionStreamSchema = zod.object({
id: zod.string(),
object: zod.literal("text_completion"),
object: zod.literal("chat.completion.chunk"),
created: zod.number(),
model: zod.string(),
choices: zod
.array(
zod.object({
text: zod.string(),
delta: zod.object({
role: zod.literal("assistant").optional(),
content: zod.string().optional(),
}),
index: zod.number(),
logprobs: zod.nullable(zod.any()),
finish_reason: zod.nullable(zod.string()),
})
)
Expand Down Expand Up @@ -76,14 +78,19 @@ export class OpenAIClient {
this.openAIBaseUrl = openAIBaseUrl.replace(/\/$/, "");
}

async generateCompletion({
prompt,
async generateChatCompletion({
messages,
maxTokens,
stop,
model,
temperature = 0,
streamHandler,
}: {
prompt: string;
messages: Array<{
role: "assistant" | "user" | "system";
content: string;
}>;
model: "gpt-4" | "gpt-3.5-turbo";
maxTokens: number;
stop?: string[] | undefined;
temperature?: number | undefined;
Expand All @@ -100,7 +107,7 @@ export class OpenAIClient {
> {
this.logger.log([
"--- Start OpenAI prompt ---",
prompt,
JSON.stringify(messages),
"--- End OpenAI prompt ---",
]);

Expand All @@ -123,15 +130,13 @@ export class OpenAIClient {
]);

const response = await axios.post(
`${this.openAIBaseUrl}/completions`,
`${this.openAIBaseUrl}/chat/completions`,
{
model: "text-davinci-003",
prompt,
model,
messages,
max_tokens: maxTokens,
stop,
temperature,
// top_p is excluded because temperature is set
best_of: 1,
frequency_penalty: 0,
presence_penalty: 0,
stream: true,
Expand Down Expand Up @@ -178,11 +183,11 @@ export class OpenAIClient {
}

this.logger.debug("Process next line of chunk");
const result = completionStreamSchema.parse(
const result = chatCompletionStreamSchema.parse(
secureJSON.parse(line.substring("data: ".length))
);

responseUntilNow += result.choices[0]?.text ?? "";
responseUntilNow += result.choices[0]?.delta.content ?? "";

streamHandler(responseUntilNow);
}
Expand Down

0 comments on commit b6e11de

Please sign in to comment.