You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
total vram = 96869.25
required vram(full=13858, 8bit=8254, 4bit=5140)
determined model type: alpaca
Traceback (most recent call last):
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/gradio/routes.py", line 437, in run_predict
output = await app.get_blocks().process_api(
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/gradio/blocks.py", line 1352, in process_api
result = await self.call_function(
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/gradio/blocks.py", line 1077, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/anyio/to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 807, in run
result = context.run(func, *args)
File "/home/16tb_hdd/xxc/LLM-As-Chatbot/app.py", line 291, in download_completed
global_vars.initialize_globals(tmp_args)
File "/home/16tb_hdd/xxc/LLM-As-Chatbot/global_vars.py", line 176, in initialize_globals
model, tokenizer = load_model(
File "/home/16tb_hdd/xxc/LLM-As-Chatbot/models/alpaca.py", line 17, in load_model
tokenizer = LlamaTokenizer.from_pretrained(
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/transformers/tokenization_utils_base.py", line 1830, in from_pretrained
raise EnvironmentError(
OSError: Can't load tokenizer for 'elinas/llama-7b-hf-transformers-4.29'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'elinas/llama-7b-hf-transformers-4.29' is the correct path to a directory containing all relevant files for a LlamaTokenizer tokenizer.
Hi, I'm having this issue right now.
Can anyone tell me how to fix it?
The text was updated successfully, but these errors were encountered:
total vram = 96869.25
required vram(full=13858, 8bit=8254, 4bit=5140)
determined model type: alpaca
Traceback (most recent call last):
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/gradio/routes.py", line 437, in run_predict
output = await app.get_blocks().process_api(
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/gradio/blocks.py", line 1352, in process_api
result = await self.call_function(
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/gradio/blocks.py", line 1077, in call_function
prediction = await anyio.to_thread.run_sync(
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/anyio/to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/anyio/_backends/_asyncio.py", line 807, in run
result = context.run(func, *args)
File "/home/16tb_hdd/xxc/LLM-As-Chatbot/app.py", line 291, in download_completed
global_vars.initialize_globals(tmp_args)
File "/home/16tb_hdd/xxc/LLM-As-Chatbot/global_vars.py", line 176, in initialize_globals
model, tokenizer = load_model(
File "/home/16tb_hdd/xxc/LLM-As-Chatbot/models/alpaca.py", line 17, in load_model
tokenizer = LlamaTokenizer.from_pretrained(
File "/home/xiaoxingchen/.conda/envs/llm-serve/lib/python3.9/site-packages/transformers/tokenization_utils_base.py", line 1830, in from_pretrained
raise EnvironmentError(
OSError: Can't load tokenizer for 'elinas/llama-7b-hf-transformers-4.29'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'elinas/llama-7b-hf-transformers-4.29' is the correct path to a directory containing all relevant files for a LlamaTokenizer tokenizer.
Hi, I'm having this issue right now.
Can anyone tell me how to fix it?
The text was updated successfully, but these errors were encountered: