From 0c1f9a106d6861a209918a3e4c72035fcd9b0095 Mon Sep 17 00:00:00 2001 From: dongfangduoshou123 <471747996@qq.com> Date: Mon, 10 Jun 2024 11:19:13 +0800 Subject: [PATCH] Create client_call_fastapi_4v_9b.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 调用示例 --- basic_demo/client_call_fastapi_4v_9b.py | 35 +++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 basic_demo/client_call_fastapi_4v_9b.py diff --git a/basic_demo/client_call_fastapi_4v_9b.py b/basic_demo/client_call_fastapi_4v_9b.py new file mode 100644 index 0000000..e406865 --- /dev/null +++ b/basic_demo/client_call_fastapi_4v_9b.py @@ -0,0 +1,35 @@ +import requests +import base64 + +def chat_with_model(messages, image_path=None): + url = "http://localhost:8000/v1/chat/completions" + image_data = "-1" + if image_path: + with open(image_path, "rb") as image_file: + image_data = base64.b64encode(image_file.read()).decode("utf-8") + + payload = { + "model": "glm-4v-9b", + "messages": messages, + "temperature": 0.6, + "top_p": 0.8, + "max_tokens": 1024, + "image": image_data + } + + response = requests.post(url, json=payload) + return response.json() + +# Example usage +messages = [ + {"role": "user", "content": "Hello, how are you?"}, + {"role": "user", "content": "提取文本原样输出,无需解释"} +] + +# Chat with model without image +response = chat_with_model(messages) +print(response) + +# Chat with model with image +response_with_image = chat_with_model(messages, image_path="/home/image001.png") +print(response_with_image)