diff --git a/asr/funasr/ASR_client.py b/asr/funasr/ASR_client.py
index cf5bdb7..9cbf51e 100644
--- a/asr/funasr/ASR_client.py
+++ b/asr/funasr/ASR_client.py
@@ -6,7 +6,7 @@ import argparse
import json
parser = argparse.ArgumentParser()
-parser.add_argument("--host", type=str, default="127.0.0.1", required=False, help="host ip, localhost, 0.0.0.0")
+parser.add_argument("--host", type=str, default="192.168.1.111", required=False, help="host ip, localhost, 0.0.0.0")
parser.add_argument("--port", type=int, default=10197, required=False, help="grpc server port")
parser.add_argument("--chunk_size", type=int, default=160, help="ms")
parser.add_argument("--vad_needed", type=bool, default=True)
diff --git a/asr/funasr/funasr_client_api.py b/asr/funasr/funasr_client_api.py
index 6daebd5..f6768c9 100644
--- a/asr/funasr/funasr_client_api.py
+++ b/asr/funasr/funasr_client_api.py
@@ -27,13 +27,13 @@ class Funasr_websocket_recognizer():
'''
parser = argparse.ArgumentParser()
- parser.add_argument("--host", type=str, default="127.0.0.1", required=False, help="host ip, localhost, 0.0.0.0")
+ parser.add_argument("--host", type=str, default="192.168.1.111", required=False, help="host ip, localhost, 0.0.0.0")
parser.add_argument("--port", type=int, default=10194, required=False, help="grpc server port")
parser.add_argument("--chunk_size", type=int, default=160, help="ms")
parser.add_argument("--vad_needed", type=bool, default=True)
args = parser.parse_args()
- def __init__(self, host="127.0.0.1",
+ def __init__(self, host="192.168.1.111",
port="10197",
is_ssl=True,
chunk_size="0, 10, 5",
diff --git a/cache_data/input.wav b/cache_data/input.wav
index f990e01..ea8733a 100644
Binary files a/cache_data/input.wav and b/cache_data/input.wav differ
diff --git a/config.json b/config.json
index 9e1d5d0..655331a 100644
--- a/config.json
+++ b/config.json
@@ -27,7 +27,7 @@
"items": [],
"source": {
"automatic_player_status": true,
- "automatic_player_url": "http://127.0.0.1:6000",
+ "automatic_player_url": "http://192.168.1.111:6000",
"liveRoom": {
"enabled": true,
"url": ""
diff --git a/core/socket_bridge_service.py b/core/socket_bridge_service.py
index 685f0cc..f775cb6 100644
--- a/core/socket_bridge_service.py
+++ b/core/socket_bridge_service.py
@@ -51,7 +51,7 @@ class SocketBridgeService:
async def create_socket_client(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
- sock.connect(('127.0.0.1', 10001))
+ sock.connect(('192.168.1.111', 10001))
sock.setblocking(True) # 设置为阻塞模式
return sock
except Exception as e:
diff --git a/gui/static/js/index.js b/gui/static/js/index.js
index 6e97143..874878f 100644
--- a/gui/static/js/index.js
+++ b/gui/static/js/index.js
@@ -234,7 +234,7 @@ class FayInterface {
panelMsg: '',
panelReply: '',
robot:'static/images/Normal.gif',
- base_url: 'http://127.0.0.1:5000',
+ base_url: 'http://192.168.1.111:5000',
play_sound_enabled: false,
source_record_enabled: false
};
@@ -246,7 +246,7 @@ class FayInterface {
},
methods: {
initFayService() {
- this.fayService = new FayInterface('ws://127.0.0.1:10003', this.base_url, this);
+ this.fayService = new FayInterface('ws://192.168.1.111:10003', this.base_url, this);
this.fayService.connectWebSocket();
this.fayService.websocket.addEventListener('open', () => {
this.loadUserList();
diff --git a/gui/static/js/setting.js b/gui/static/js/setting.js
index e1533e6..b004527 100644
--- a/gui/static/js/setting.js
+++ b/gui/static/js/setting.js
@@ -170,7 +170,7 @@ new Vue({
}],
automatic_player_status: false,
automatic_player_url: "",
- host_url: "http://127.0.0.1:5000"
+ host_url: "http://192.168.1.111:5000"
};
},
created() {
@@ -179,7 +179,7 @@ new Vue({
},
methods: {
initFayService() {
- this.fayService = new FayInterface('ws://127.0.0.1:10003', this.host_url, this);
+ this.fayService = new FayInterface('ws://192.168.1.111:10003', this.host_url, this);
this.fayService.connectWebSocket();
},
getData() {
diff --git a/gui/templates/setting.html b/gui/templates/setting.html
index e5ebcdc..8517c6d 100644
--- a/gui/templates/setting.html
+++ b/gui/templates/setting.html
@@ -106,7 +106,7 @@
active-color="#13ce66"
inactive-color="#ff4949">
自动播放:
-
+
diff --git a/gui/window.py b/gui/window.py
index 76a9d6c..dc775c9 100644
--- a/gui/window.py
+++ b/gui/window.py
@@ -27,7 +27,7 @@ class MainWindow(QMainWindow):
#清空缓存
profile = QWebEngineProfile.defaultProfile()
profile.clearHttpCache()
- self.browser.load(QUrl('http://127.0.0.1:5000'))
+ self.browser.load(QUrl('http://192.168.1.111:5000'))
self.setCentralWidget(self.browser)
MyThread(target=self.runnable).start()
diff --git a/llm/VllmGPT.py b/llm/VllmGPT.py
index 45b75f1..1bd6119 100644
--- a/llm/VllmGPT.py
+++ b/llm/VllmGPT.py
@@ -4,7 +4,7 @@ import requests
class VllmGPT:
- def __init__(self, host="127.0.0.1",
+ def __init__(self, host="192.168.1.111",
port="8000",
model="THUDM/chatglm3-6b",
max_tokens="1024"):
@@ -17,7 +17,7 @@ class VllmGPT:
def question(self,cont):
chat_list = []
- url = "http://127.0.0.1:8101/v1/completions"
+ url = "http://192.168.1.111:8101/v1/completions"
req = json.dumps({
"model": "THUDM/chatglm3-6b",
"prompt": cont,
@@ -51,6 +51,6 @@ class VllmGPT:
return res['choices'][0]['message']['content']
if __name__ == "__main__":
- vllm = VllmGPT('127.0.0.1','8101','Qwen-7B-Chat')
+ vllm = VllmGPT('192.168.1.111','8101','Qwen-7B-Chat')
req = vllm.question2("你叫什么名字啊今年多大了")
print(req)
diff --git a/llm/agent/tools/SendWX.py b/llm/agent/tools/SendWX.py
index ccf9c5d..30dc15f 100644
--- a/llm/agent/tools/SendWX.py
+++ b/llm/agent/tools/SendWX.py
@@ -4,7 +4,7 @@ from langchain.tools import BaseTool
import requests
import json
-url = "http://127.0.0.1:4008/send"
+url = "http://192.168.1.111:4008/send"
headers = {'Content-Type': 'application/json'}
data = {
"message": "你好",
diff --git a/llm/nlp_ChatGLM3.py b/llm/nlp_ChatGLM3.py
index c0c25a5..ed892b9 100644
--- a/llm/nlp_ChatGLM3.py
+++ b/llm/nlp_ChatGLM3.py
@@ -25,7 +25,7 @@ def question(cont, uid=0, observation=""):
content = {
"prompt":"请简单回复我。" + cont,
"history":chat_list}
- url = "http://127.0.0.1:8000/v1/completions"
+ url = "http://192.168.1.111:8000/v1/completions"
req = json.dumps(content)
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req)
diff --git a/llm/nlp_VisualGLM.py b/llm/nlp_VisualGLM.py
index 3efcc3f..c5131e0 100644
--- a/llm/nlp_VisualGLM.py
+++ b/llm/nlp_VisualGLM.py
@@ -25,7 +25,7 @@ def question(cont, uid=0, observation=""):
filepath = os.path.join(current_working_directory, "data", filename)
cv2.imwrite(filepath, img)
content["image"] = filepath
- url = "http://127.0.0.1:8080"
+ url = "http://192.168.1.111:8080"
print(content)
req = json.dumps(content)
headers = {'content-type': 'application/json'}
diff --git a/llm/nlp_privategpt.py b/llm/nlp_privategpt.py
index 04dcc47..38caa40 100644
--- a/llm/nlp_privategpt.py
+++ b/llm/nlp_privategpt.py
@@ -2,7 +2,7 @@ import hashlib
import os
from pgpt_python.client import PrivateGPTApi
-client = PrivateGPTApi(base_url="http://127.0.0.1:8001")
+client = PrivateGPTApi(base_url="http://192.168.1.111:8001")
index_name = "knowledge_data"
folder_path = "llm/privategpt/knowledge_base"
diff --git a/main.py b/main.py
index 94e8fe9..96aa67f 100644
--- a/main.py
+++ b/main.py
@@ -120,7 +120,7 @@ if __name__ == '__main__':
contentdb.init_db()
#ip替换
- if config_util.fay_url != "127.0.0.1":
+ if config_util.fay_url != "192.168.1.111":
replace_ip_in_file("gui/static/js/index.js", config_util.fay_url)
replace_ip_in_file("gui/static/js/setting.js", config_util.fay_url)
diff --git a/system.conf b/system.conf
index c1fbc84..80eb004 100644
--- a/system.conf
+++ b/system.conf
@@ -4,7 +4,7 @@
#建议使用funasr,请依照asr/funasr/README>md的说明启动
ASR_mode = funasr
#ASR二选一(需要运行fay/test/funasr服务)集成达摩院asr项目、感谢中科大脑算法工程师张聪聪提供集成代码
-local_asr_ip=127.0.0.1
+local_asr_ip=192.168.1.111
local_asr_port=10197
# ASR二选一(第1次运行建议用这个,免费3个月), 阿里云 实时语音识别 服务密钥(必须)https://ai.aliyun.com/nls/trans
@@ -58,7 +58,7 @@ gpt_base_url=https://api.zyai.online/v1
#gpt model engine 如:gpt-3.5-turbo、moonshot-v1-8k
gpt_model_engine=gpt-3.5-turbo
-#gpt(fastgpt)代理(可为空,填写例子:127.0.0.1:7890)
+#gpt(fastgpt)代理(可为空,填写例子:192.168.1.111:7890)
proxy_config=
#通义星尘 服务密钥(NLP多选1) https://xingchen.aliyun.com/
@@ -66,7 +66,7 @@ xingchen_api_key=
xingchen_characterid=
#ollama服务 (NLP多选1)
-#ollama_ip 可以填写 127.0.0.1 或者 localhost
+#ollama_ip 可以填写 192.168.1.111 或者 localhost
ollama_ip = localhost
#ollama_model 可以填写 qwen:latest , llama2:latest , yi:lastest , pi:latest , gemma:latest
ollama_model = gemma:latest
@@ -75,8 +75,8 @@ ollama_model = gemma:latest
coze_bot_id=
coze_api_key=
-#启动模式common、web(服务器或docker请使用web方式,通过http://127.0.0.1:5000控制)
+#启动模式common、web(服务器或docker请使用web方式,通过http://192.168.1.111:5000控制)
start_mode=web
#服务器主动地址
-fay_url = 127.0.0.1
\ No newline at end of file
+fay_url = 192.168.1.111
\ No newline at end of file
diff --git a/test/ollama/README.md b/test/ollama/README.md
index bea28f8..9e2ac89 100644
--- a/test/ollama/README.md
+++ b/test/ollama/README.md
@@ -19,7 +19,7 @@ ollama pull qwen:latest #阿里开源模型
```
chat_module=ollama_api
-ollama_ip = 127.0.0.1
+ollama_ip = 192.168.1.111
ollama_model = phi:latest # llama2:latest , yi:lastest , pi:latest , gemma:latest (开源大语言模型多选1)
```
服务配置
diff --git a/test/ovr_lipsync/ffmpeg/doc/ffmpeg-all.html b/test/ovr_lipsync/ffmpeg/doc/ffmpeg-all.html
index 2d58c04..a72711a 100644
--- a/test/ovr_lipsync/ffmpeg/doc/ffmpeg-all.html
+++ b/test/ovr_lipsync/ffmpeg/doc/ffmpeg-all.html
@@ -23099,12 +23099,12 @@ an external server.
Example:
Create a localhost stream on port 5555:
-
ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
+ffmpeg -re -i input -f mpegts zmq:tcp://192.168.1.111:5555
Multiple clients may connect to the stream using:
-
ffplay zmq:tcp://127.0.0.1:5555
+ffplay zmq:tcp://192.168.1.111:5555
Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
diff --git a/test/ovr_lipsync/ffmpeg/doc/ffmpeg-protocols.html b/test/ovr_lipsync/ffmpeg/doc/ffmpeg-protocols.html
index b892318..cba6377 100644
--- a/test/ovr_lipsync/ffmpeg/doc/ffmpeg-protocols.html
+++ b/test/ovr_lipsync/ffmpeg/doc/ffmpeg-protocols.html
@@ -2622,12 +2622,12 @@ an external server.
Example:
Create a localhost stream on port 5555:
-
ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
+ffmpeg -re -i input -f mpegts zmq:tcp://192.168.1.111:5555
Multiple clients may connect to the stream using:
-
ffplay zmq:tcp://127.0.0.1:5555
+ffplay zmq:tcp://192.168.1.111:5555
Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
diff --git a/test/ovr_lipsync/ffmpeg/doc/ffplay-all.html b/test/ovr_lipsync/ffmpeg/doc/ffplay-all.html
index 2c57444..70765d1 100644
--- a/test/ovr_lipsync/ffmpeg/doc/ffplay-all.html
+++ b/test/ovr_lipsync/ffmpeg/doc/ffplay-all.html
@@ -11528,12 +11528,12 @@ an external server.
Example:
Create a localhost stream on port 5555:
-
ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
+ffmpeg -re -i input -f mpegts zmq:tcp://192.168.1.111:5555
Multiple clients may connect to the stream using:
-
ffplay zmq:tcp://127.0.0.1:5555
+ffplay zmq:tcp://192.168.1.111:5555
Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
diff --git a/test/ovr_lipsync/ffmpeg/doc/ffprobe-all.html b/test/ovr_lipsync/ffmpeg/doc/ffprobe-all.html
index 528af1d..7a2d64c 100644
--- a/test/ovr_lipsync/ffmpeg/doc/ffprobe-all.html
+++ b/test/ovr_lipsync/ffmpeg/doc/ffprobe-all.html
@@ -11892,12 +11892,12 @@ an external server.
Example:
Create a localhost stream on port 5555:
-
ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
+ffmpeg -re -i input -f mpegts zmq:tcp://192.168.1.111:5555
Multiple clients may connect to the stream using:
-
ffplay zmq:tcp://127.0.0.1:5555
+ffplay zmq:tcp://192.168.1.111:5555
Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
diff --git a/test/python_connector_demo/remote_audio.py b/test/python_connector_demo/remote_audio.py
index 8229b22..1ba3d12 100644
--- a/test/python_connector_demo/remote_audio.py
+++ b/test/python_connector_demo/remote_audio.py
@@ -58,7 +58,7 @@ def receive_audio(client):
if __name__ == "__main__":
client = socket.socket()
- client.connect(("127.0.0.1", 10001))
+ client.connect(("192.168.1.111", 10001))
# client.send(b"User ")#指定用户名
# client.send(b"False")#不回传音频(可以通过websocket 10003数字人接口接收音频http路径和本地路径)
time.sleep(1)
diff --git a/test/rasa/actions/actions.py b/test/rasa/actions/actions.py
index 99f8ab0..8a32a58 100644
--- a/test/rasa/actions/actions.py
+++ b/test/rasa/actions/actions.py
@@ -62,7 +62,7 @@ class ActionGPTResponse(Action):
current_chat={'role': 'user', 'content': tracker.latest_message.get("text")}
history.append(current_chat)
- url = "http://127.0.0.1:8101/v1/chat/completions"
+ url = "http://192.168.1.111:8101/v1/chat/completions"
req = json.dumps({
"model": "THUDM/chatglm3-6b",
"messages": history,
@@ -168,7 +168,7 @@ class ActionAskProblem(Action):
current_chat={'role': 'user', 'content': tracker.latest_message.get("text")}
history.append(current_chat)
- url = "http://127.0.0.1:8101/v1/chat/completions"
+ url = "http://192.168.1.111:8101/v1/chat/completions"
req = json.dumps({
"model": "THUDM/chatglm3-6b",
"messages": history,
diff --git a/test/test_human_ws_api.py b/test/test_human_ws_api.py
index 1d7434e..d4c2ebd 100644
--- a/test/test_human_ws_api.py
+++ b/test/test_human_ws_api.py
@@ -10,7 +10,7 @@ import queue
# 配置项
config = {
"enable_auto_get": True, # 设置是否启用主动获取播放项
- "url" : "127.0.0.1" #服务端Url
+ "url" : "192.168.1.111" #服务端Url
}
audio_queue = queue.Queue()
diff --git a/test/test_nlp.py b/test/test_nlp.py
index e5bbd06..8e3cb3f 100644
--- a/test/test_nlp.py
+++ b/test/test_nlp.py
@@ -2,7 +2,7 @@ import requests
import json
def test_gpt(prompt):
- url = 'http://127.0.0.1:5000/v1/chat/completions' # 替换为您的接口地址
+ url = 'http://192.168.1.111:5000/v1/chat/completions' # 替换为您的接口地址
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer YOUR_API_KEY', # 如果您的接口需要身份验证
diff --git a/tts/gptsovits.py b/tts/gptsovits.py
index ef15ed4..57ed857 100644
--- a/tts/gptsovits.py
+++ b/tts/gptsovits.py
@@ -12,7 +12,7 @@ class Speech:
pass
def to_sample(self, text, style) :
- url = "http://127.0.0.1:9880"
+ url = "http://192.168.1.111:9880"
data = {
"text": text,
"text_language": "zh",
diff --git a/tts/gptsovits_v3.py b/tts/gptsovits_v3.py
index 9125eef..5467bfd 100644
--- a/tts/gptsovits_v3.py
+++ b/tts/gptsovits_v3.py
@@ -15,7 +15,7 @@ class Speech:
pass
def to_sample(self, text, style) :
- url = "http://127.0.0.1:9880/tts"
+ url = "http://192.168.1.111:9880/tts"
data = {
"text": text, # str.(required) text to be synthesized
"text_lang": "zh", # str.(required) language of the text to be synthesized
diff --git a/utils/openai_api/langchain_openai_api.py b/utils/openai_api/langchain_openai_api.py
index 55be7e1..3728150 100644
--- a/utils/openai_api/langchain_openai_api.py
+++ b/utils/openai_api/langchain_openai_api.py
@@ -17,7 +17,7 @@ def initialize_llm_chain(messages: list):
template = "{input}"
prompt = PromptTemplate.from_template(template)
- endpoint_url = "http://127.0.0.1:8000/v1/chat/completions"
+ endpoint_url = "http://192.168.1.111:8000/v1/chat/completions"
llm = ChatGLM3(
endpoint_url=endpoint_url,
max_tokens=8096,
diff --git a/utils/openai_api/openai_api.py b/utils/openai_api/openai_api.py
index 8e5d544..2067175 100644
--- a/utils/openai_api/openai_api.py
+++ b/utils/openai_api/openai_api.py
@@ -3,7 +3,7 @@ import openai
# 设置OpenAI API的密钥
# openai.api_key = os.getenv("OPENAI_API_KEY")
-openai.base_url = "http://127.0.0.1:8000/v1/chat/completions"
+openai.base_url = "http://192.168.1.111:8000/v1/chat/completions"
# 定义API请求的数据
data = {
"model": "chatglm3-6b",
diff --git a/utils/openai_api/openai_api_request.py b/utils/openai_api/openai_api_request.py
index 1372d9b..7cfdcde 100644
--- a/utils/openai_api/openai_api_request.py
+++ b/utils/openai_api/openai_api_request.py
@@ -12,7 +12,7 @@ and handle responses.
from openai import OpenAI
-base_url = "http://127.0.0.1:8000/v1/"
+base_url = "http://192.168.1.111:8000/v1/"
client = OpenAI(api_key="EMPTY", base_url=base_url)
@@ -197,25 +197,25 @@ if __name__ == "__main__":
# embedding()
# function_chat()
-# curl -X POST "http://127.0.0.1:8000/v1/chat/completions" \
+# curl -X POST "http://192.168.1.111:8000/v1/chat/completions" \
# -H "Content-Type: application/json" \
# -d "{\"model\": \"chatglm3-6b\", \"messages\": [{\"role\": \"system\", \"content\": \"You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown.\"}, {\"role\": \"user\", \"content\": \"你好,给我讲一个故事,大概100字\"}], \"stream\": false, \"max_tokens\": 100, \"temperature\": 0.8, \"top_p\": 0.8}"
-# curl -X POST "http://127.0.0.1:8000/v1/completions" \
+# curl -X POST "http://192.168.1.111:8000/v1/completions" \
# -H 'Content-Type: application/json' \
# -d '{"prompt": "请用20字内回复我.你今年多大了", "history": []}'
-# curl -X POST "http://127.0.0.1:8000/v1/completions" \
+# curl -X POST "http://192.168.1.111:8000/v1/completions" \
# -H 'Content-Type: application/json' \
# -d '{"prompt": "请用20字内回复我.你今年多大了", "history": [{"你好","你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。"}]}'
-# curl -X POST "http://127.0.0.1:8000/v1/completions" \
+# curl -X POST "http://192.168.1.111:8000/v1/completions" \
# -H 'Content-Type: application/json' \
# -d '{"prompt": "请用20字内回复我.你今年多大了", "history": [["你好","你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。"]]}'
-# curl -X POST "http://127.0.0.1:8000/v1/completions" \
+# curl -X POST "http://192.168.1.111:8000/v1/completions" \
# -H 'Content-Type: application/json' \
# -d '{"prompt": "请用20字内回复我.你今年多大了", "history": ["你好"]}'
diff --git a/utils/openai_api/openai_request.py b/utils/openai_api/openai_request.py
index 76c4a5e..1ea02ba 100644
--- a/utils/openai_api/openai_request.py
+++ b/utils/openai_api/openai_request.py
@@ -22,7 +22,7 @@ def question(cont):
content = {
"prompt":"请简单回复我。" + cont,
"history":chat_list}
- url = "http://127.0.0.1:8000/v1/completions"
+ url = "http://192.168.1.111:8000/v1/completions"
req = json.dumps(content)
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req)
diff --git a/utils/openai_api/readme.txt b/utils/openai_api/readme.txt
index a39bda5..61b9116 100644
--- a/utils/openai_api/readme.txt
+++ b/utils/openai_api/readme.txt
@@ -7,10 +7,10 @@ conda install pytorch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 pytorch-cuda=
启动
python -m vllm.entrypoints.openai.api_server --tensor-parallel-size=1 --trust-remote-code --max-model-len 1024 --model THUDM/chatglm3-6b
-python -m vllm.entrypoints.openai.api_server --host 127.0.0.1 --port 8101 --tensor-parallel-size=1 --trust-remote-code --max-model-len 1024 --model THUDM/chatglm3-6b
+python -m vllm.entrypoints.openai.api_server --host 192.168.1.111 --port 8101 --tensor-parallel-size=1 --trust-remote-code --max-model-len 1024 --model THUDM/chatglm3-6b
测试
-curl http://127.0.0.1:8101/v1/completions \
+curl http://192.168.1.111:8101/v1/completions \
-H "Content-Type: application/json" \
-d '{
"model": "THUDM/chatglm3-6b",
@@ -20,12 +20,12 @@ curl http://127.0.0.1:8101/v1/completions \
}'
多轮对话
-curl -X POST "http://127.0.0.1:8101/v1/completions" \
+curl -X POST "http://192.168.1.111:8101/v1/completions" \
-H "Content-Type: application/json" \
-d "{\"model\": \"THUDM/chatglm3-6b\",\"prompt\": \"你叫什么名字\", \"history\": [{\"role\": \"user\", \"content\": \"你出生在哪里.\"}, {\"role\": \"assistant\", \"content\": \"出生在北京\"}]}"
多轮对话
-curl -X POST "http://127.0.0.1:8101/v1/chat/completions" \
+curl -X POST "http://192.168.1.111:8101/v1/chat/completions" \
-H "Content-Type: application/json" \
-d "{\"model\": \"THUDM/chatglm3-6b\", \"messages\": [{\"role\": \"system\", \"content\": \"You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown.\"}, {\"role\": \"user\", \"content\": \"你好,给我讲一个故事,大概100字\"}], \"stream\": false, \"max_tokens\": 100, \"temperature\": 0.8, \"top_p\": 0.8}"
diff --git a/utils/openai_api/zhipu_api_request.py b/utils/openai_api/zhipu_api_request.py
index 537d48f..5987a36 100644
--- a/utils/openai_api/zhipu_api_request.py
+++ b/utils/openai_api/zhipu_api_request.py
@@ -14,7 +14,7 @@ variable formate as xxx.xxx (just for check, not need a real key).
from zhipuai import ZhipuAI
-base_url = "http://127.0.0.1:8000/v1/"
+base_url = "http://192.168.1.111:8000/v1/"
client = ZhipuAI(api_key="EMP.TY", base_url=base_url)