Skip to content

Commit

Permalink
Merge pull request #75 from cubenlp/rex/dev
Browse files Browse the repository at this point in the history
update api of valid models
  • Loading branch information
RexWzh committed Apr 4, 2024
2 parents 8a2aabd + a0fbc6b commit c02bfe1
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 14 deletions.
2 changes: 1 addition & 1 deletion chattool/__init__.py
Expand Up @@ -2,7 +2,7 @@

__author__ = """Rex Wang"""
__email__ = '1073853456@qq.com'
__version__ = '3.1.3'
__version__ = '3.1.4'

import os, sys, requests
from .chattype import Chat, Resp
Expand Down
24 changes: 14 additions & 10 deletions chattool/asynctool.py
Expand Up @@ -54,6 +54,7 @@ async def async_process_msgs( chatlogs:List[List[Dict]]
, nproc:int=1
, timeout:int=0
, timeinterval:int=0
, showcost:bool=False
, **options
)->List[bool]:
"""Process messages asynchronously
Expand Down Expand Up @@ -100,7 +101,7 @@ async def chat_complete(ind, locker, chat_log, chkpoint, **options):
chat = Chat(chat_log)
async with locker: # locker | not necessary for normal IO
chat.save(chkpoint, index=ind)
return ind, resp.cost()
return ind, resp.cost() if showcost else 0

async with sem, aiohttp.ClientSession() as session:
tasks = []
Expand All @@ -124,19 +125,20 @@ async def chat_complete(ind, locker, chat_log, chkpoint, **options):

def async_chat_completion( msgs:Union[List[List[Dict]], str]
, chkpoint:str
, model:str='gpt-3.5-turbo'
, api_key:Union[str, None]=None
, chat_url:Union[str, None]=None
, max_tries:int=1
, nproc:int=1
, timeout:int=0
, timeinterval:int=0
, clearfile:bool=False
, notrun:bool=False
, msg2log:Union[Callable, None]=None
, wait:bool=False
, showcost:bool=False
, data2chat:Union[Callable, None]=None
, msg2log:Union[Callable, None]=None
, max_requests:int=-1
, ncoroutines:int=1
, notrun:bool=False
, **options
):
"""Asynchronous chat completion
Expand All @@ -151,24 +153,24 @@ def async_chat_completion( msgs:Union[List[List[Dict]], str]
timeout (int, optional): timeout for the API call. Defaults to 0(no timeout).
timeinterval (int, optional): time interval between two API calls. Defaults to 0.
clearfile (bool, optional): whether to clear the checkpoint file. Defaults to False.
notrun (bool, optional): whether to run the async process. It should be True
when use in Jupyter Notebook. Defaults to False.
wait (bool, optional): wait for the `await` command. Defaults to False.
msg2log (Union[Callable, None], optional): function to convert message to chat log.
Defaults to None.
data2chat (Union[Callable, None], optional): function to convert data to Chat object.
Defaults to None.
notrun (bool, optional): (Deprecated) wait for the `await` command. Defaults to False.
max_requests (int, optional): (Deprecated)maximum number of requests to make. Defaults to -1.
ncoroutines (int, optional): (Deprecated)number of coroutines. Defaults to 1.
Returns:
List[Dict]: list of responses
"""
# convert chatlogs
# convert msg to chatlogs
if data2chat is not None:
msg2log = lambda data: data2chat(data).chat_log
elif msg2log is None: # By default, use method from the Chat object
msg2log = lambda data: Chat(data).chat_log
# use nproc instead of ncoroutines
# number of coroutines
nproc = max(nproc, ncoroutines)
chatlogs = [msg2log(log) for log in msgs]
if clearfile and os.path.exists(chkpoint):
Expand All @@ -184,6 +186,8 @@ def async_chat_completion( msgs:Union[List[List[Dict]], str]
else:
raise Exception("chat_url is not provided!")
chat_url = chattool.request.normalize_url(chat_url)
if 'model' not in options:
options['model'] = chattool.model if chattool.model else "gpt-3.5-turbo"
# run async process
assert nproc > 0, "nproc must be greater than 0!"
max_tries = max(max_tries, max_requests)
Expand All @@ -193,13 +197,13 @@ def async_chat_completion( msgs:Union[List[List[Dict]], str]
"api_key": api_key,
"chat_url": chat_url,
"max_tries": max_tries,
"showcost": showcost,
"nproc": nproc,
"timeout": timeout,
"timeinterval": timeinterval,
"model": model,
**options
}
if notrun: # when use in Jupyter Notebook
if notrun or wait: # when use in Jupyter Notebook
return async_process_msgs(**args) # return the async object
else:
return asyncio.run(async_process_msgs(**args))
2 changes: 0 additions & 2 deletions chattool/request.py
Expand Up @@ -95,7 +95,6 @@ def valid_models(api_key:str, model_url:str, gpt_only:bool=True):
"""
headers = {
"Authorization": "Bearer " + api_key,
"Content-Type": "application/json"
}
model_response = requests.get(normalize_url(model_url), headers=headers)
if model_response.status_code == 200:
Expand Down Expand Up @@ -132,7 +131,6 @@ def filecontent(api_key:str, base_url:str, fileid:str):
"""Returns the contents of the specified file"""
headers = {
"Authorization": "Bearer " + api_key,
"Content-Type": "application/json"
}
fileurl = normalize_url(os.path.join(base_url, "v1/files", fileid, "content"))
resp = requests.get(fileurl, headers=headers)
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Expand Up @@ -7,7 +7,7 @@
with open('README.md') as readme_file:
readme = readme_file.read()

VERSION = '3.1.3'
VERSION = '3.1.4'

requirements = [
'Click>=7.0', 'requests>=2.20', "responses>=0.23", 'aiohttp>=3.8',
Expand Down

0 comments on commit c02bfe1

Please sign in to comment.