You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
(chatglm3-demo) root@ubuntu:/chat/ChatGLM3/openai_api_demo# python openai_api.py
Loading checkpoint shards: 100%|███████████████████████████████████| 7/7 [00:11<00:00, 1.70s/it]
INFO: Started server process [298293]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
2023-12-22 11:31:31.972 | DEBUG | __main__:create_chat_completion:157 - ==== request ====
{'messages': [ChatMessage(role='user', content='波士顿天气如何?', name=None, function_call=None)], 'temperature': 0.8, 'top_p': 0.8, 'max_tokens': 100, 'echo': False, 'stream': True, 'repetition_penalty': 1.1, 'functions': [{'name': 'get_current_weather', 'description': 'Get the current weather in a given location.', 'parameters': {'type': 'object', 'properties': {'location': {'type': 'string', 'description': 'The city and state, e.g. Beijing'}, 'unit': {'type': 'string', 'enum': ['celsius', 'fahrenheit']}}, 'required': ['location']}}]}
INFO: 127.0.0.1:54764 - "POST /v1/chat/completions HTTP/1.1" 200 OK
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/uvicorn/protocols/http/h11_impl.py", line 408, in run_asgi
result = await app( # type: ignore[func-returns-value]
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/uvicorn/middleware/proxy_headers.py", line 84, in __call__
return await self.app(scope, receive, send)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/fastapi/applications.py", line 1106, in __call__
await super().__call__(scope, receive, send)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/applications.py", line 122, in __call__
await self.middleware_stack(scope, receive, send)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/middleware/errors.py", line 184, in __call__
raise exc
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/middleware/errors.py", line 162, in __call__
await self.app(scope, receive, _send)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/middleware/cors.py", line 83, in __call__
await self.app(scope, receive, send)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/middleware/exceptions.py", line 79, in __call__
raise exc
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/middleware/exceptions.py", line 68, in __call__
await self.app(scope, receive, sender)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/fastapi/middleware/asyncexitstack.py", line 20, in __call__
raise e
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/fastapi/middleware/asyncexitstack.py", line 17, in __call__
await self.app(scope, receive, send)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/routing.py", line 718, in __call__
await route.handle(scope, receive, send)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/routing.py", line 276, in handle
await self.app(scope, receive, send)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/routing.py", line 69, in app
await response(scope, receive, send)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/sse_starlette/sse.py", line 255, in __call__
async with anyio.create_task_group() as task_group:
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 597, in __aexit__
raise exceptions[0]
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/sse_starlette/sse.py", line 258, in wrap
await func()
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/sse_starlette/sse.py", line 245, in stream_response
async for data in self.body_iterator:
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/concurrency.py", line 63, in iterate_in_threadpool
yield await anyio.to_thread.run_sync(_next, iterator)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/anyio/to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/anyio/_backends/_asyncio.py", line 807, in run
result = context.run(func, *args)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/starlette/concurrency.py", line 53, in _next
return next(iterator)
File "/media/ubuntu/chat/ChatGLM3/openai_api_demo/openai_api.py", line 321, in predict_stream
for new_response in generate_stream_chatglm3(model, tokenizer, gen_params):
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 56, in generator_context
response = gen.send(request)
File "/media/ubuntu/chat/ChatGLM3/openai_api_demo/utils.py", line 89, in generate_stream_chatglm3
response = tokenizer.decode(output_ids)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 3750, in decode
return self._decode(
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/transformers/tokenization_utils.py", line 1001, in _decode
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/transformers/tokenization_utils.py", line 982, in convert_ids_to_tokens
tokens.append(self._convert_id_to_token(index))
File "/root/.cache/huggingface/modules/transformers_modules/chatglm3-6b/tokenization_chatglm.py", line 157, in _convert_id_to_token
return self.tokenizer.convert_id_to_token(index)
File "/root/.cache/huggingface/modules/transformers_modules/chatglm3-6b/tokenization_chatglm.py", line 88, in convert_id_to_token
return self.sp_model.IdToPiece(index)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/sentencepiece/__init__.py", line 1045, in _batched_func
return _func(self, arg)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/sentencepiece/__init__.py", line 1038, in _func
raise IndexError('piece id is out of range.')
IndexError: piece id is out of range.
python openai_api_request.py
(chatglm3-demo) root@ubuntu:/chat/ChatGLM3/openai_api_demo# python openai_api_request.py
鲜明的 pupp
취
screened
save
Traceback (most recent call last):
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/urllib3/response.py", line 710, in _error_catcher
yield
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/urllib3/response.py", line 1073, in read_chunked
self._update_chunk_length()
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/urllib3/response.py", line 1008, in _update_chunk_length
raise InvalidChunkLength(self, line) from None
urllib3.exceptions.InvalidChunkLength: InvalidChunkLength(got length b'', 0 bytes read)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/requests/models.py", line 816, in generate
yield from self.raw.stream(chunk_size, decode_content=True)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/urllib3/response.py", line 933, in stream
yield from self.read_chunked(amt, decode_content=decode_content)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/urllib3/response.py", line 1061, in read_chunked
with self._error_catcher():
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/contextlib.py", line 153, in __exit__
self.gen.throw(typ, value, traceback)
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/urllib3/response.py", line 727, in _error_catcher
raise ProtocolError(f"Connection broken: {e!r}", e) from e
urllib3.exceptions.ProtocolError: ("Connection broken: InvalidChunkLength(got length b'', 0 bytes read)", InvalidChunkLength(got length b'', 0 bytes read))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/media/ubuntu/chat/ChatGLM3/openai_api_demo/openai_api_request.py", line 99, in <module>
function_chat(use_stream=True)
File "/media/ubuntu/chat/ChatGLM3/openai_api_demo/openai_api_request.py", line 80, in function_chat
create_chat_completion("chatglm3-6b", messages=chat_messages, functions=functions, use_stream=use_stream)
File "/media/ubuntu/chat/ChatGLM3/openai_api_demo/openai_api_request.py", line 28, in create_chat_completion
for line in response.iter_lines():
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/requests/models.py", line 865, in iter_lines
for chunk in self.iter_content(
File "/root/anaconda3/envs/chatglm3-demo/lib/python3.10/site-packages/requests/models.py", line 818, in generate
raise ChunkedEncodingError(e)
requests.exceptions.ChunkedEncodingError: ("Connection broken: InvalidChunkLength(got length b'', 0 bytes read)", InvalidChunkLength(got length b'', 0 bytes read))
(chatglm3-demo) root@ubuntu:/chat/ChatGLM3/openai_api_demo#
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
git 最新代码,pip 最新依赖
修改openai_api.py 这段模型路径
MODEL_PATH = os.environ.get('MODEL_PATH', '/chat/ChatGLM3/chatglm3-6b')
python openai_api.py
然后用另外一个终端:python openai_api_request.py
openai_api.py 报错日志:
python openai_api_request.py
Beta Was this translation helpful? Give feedback.
All reactions