Skip to content

Commit

Permalink
Bump version to v0.2.33 (#2717)
Browse files Browse the repository at this point in the history
  • Loading branch information
merrymercy committed Nov 22, 2023
1 parent 0bbeddc commit 0a5ad3e
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 9 deletions.
2 changes: 1 addition & 1 deletion fastchat/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.2.32"
__version__ = "0.2.33"
4 changes: 2 additions & 2 deletions fastchat/serve/gradio_web_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -610,7 +610,7 @@ def build_single_model_ui(models, add_promotion_links=False):
# 🏔️ Chat with Open Large Language Models
{promotion}
## 👉 Choose any model to chat
## Choose any model to chat
"""

state = gr.State()
Expand All @@ -635,7 +635,7 @@ def build_single_model_ui(models, add_promotion_links=False):
with gr.Column(scale=20):
textbox = gr.Textbox(
show_label=False,
placeholder="Enter your prompt here and press ENTER",
placeholder="👉 Enter your prompt and press ENTER",
container=False,
elem_id="input_box",
)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "fschat"
version = "0.2.32"
version = "0.2.33"
description = "An open platform for training, serving, and evaluating large language model based chatbots."
readme = "README.md"
requires-python = ">=3.8"
Expand Down
25 changes: 20 additions & 5 deletions tests/test_openai_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,28 @@ def test_list_models():
def test_completion(model, logprob):
prompt = "Once upon a time"
completion = openai.Completion.create(
model=model, prompt=prompt, logprobs=logprob, max_tokens=64
model=model,
prompt=prompt,
logprobs=logprob,
max_tokens=64,
temperature=0,
)
print(f"full text: {prompt + completion.choices[0].text}", flush=True)
if completion.choices[0].logprobs is not None:
print(f"logprobs: {completion.choices[0].logprobs.token_logprobs}", flush=True)
print(
f"logprobs: {completion.choices[0].logprobs.token_logprobs[:10]}",
flush=True,
)


def test_completion_stream(model):
prompt = "Once upon a time"
res = openai.Completion.create(
model=model, prompt=prompt, max_tokens=64, stream=True
model=model,
prompt=prompt,
max_tokens=64,
stream=True,
temperature=0,
)
print(prompt, end="")
for chunk in res:
Expand All @@ -49,14 +60,18 @@ def test_embedding(model):

def test_chat_completion(model):
completion = openai.ChatCompletion.create(
model=model, messages=[{"role": "user", "content": "Hello! What is your name?"}]
model=model,
messages=[{"role": "user", "content": "Hello! What is your name?"}],
temperature=0,
)
print(completion.choices[0].message.content)


def test_chat_completion_stream(model):
messages = [{"role": "user", "content": "Hello! What is your name?"}]
res = openai.ChatCompletion.create(model=model, messages=messages, stream=True)
res = openai.ChatCompletion.create(
model=model, messages=messages, stream=True, temperature=0
)
for chunk in res:
content = chunk["choices"][0]["delta"].get("content", "")
print(content, end="", flush=True)
Expand Down

0 comments on commit 0a5ad3e

Please sign in to comment.