本地化配置
This commit is contained in:
parent
6710e31ec0
commit
8a6e754fe0
@ -6,7 +6,7 @@ import argparse
|
||||
import json
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--host", type=str, default="192.168.1.111", required=False, help="host ip, localhost, 0.0.0.0")
|
||||
parser.add_argument("--host", type=str, default="127.0.0.1", required=False, help="host ip, localhost, 0.0.0.0")
|
||||
parser.add_argument("--port", type=int, default=10197, required=False, help="grpc server port")
|
||||
parser.add_argument("--chunk_size", type=int, default=160, help="ms")
|
||||
parser.add_argument("--vad_needed", type=bool, default=True)
|
||||
|
@ -27,13 +27,13 @@ class Funasr_websocket_recognizer():
|
||||
'''
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--host", type=str, default="192.168.1.111", required=False, help="host ip, localhost, 0.0.0.0")
|
||||
parser.add_argument("--host", type=str, default="127.0.0.1", required=False, help="host ip, localhost, 0.0.0.0")
|
||||
parser.add_argument("--port", type=int, default=10194, required=False, help="grpc server port")
|
||||
parser.add_argument("--chunk_size", type=int, default=160, help="ms")
|
||||
parser.add_argument("--vad_needed", type=bool, default=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
def __init__(self, host="192.168.1.111",
|
||||
def __init__(self, host="127.0.0.1",
|
||||
port="10197",
|
||||
is_ssl=True,
|
||||
chunk_size="0, 10, 5",
|
||||
|
Binary file not shown.
@ -27,9 +27,9 @@
|
||||
"items": [],
|
||||
"source": {
|
||||
"automatic_player_status": true,
|
||||
"automatic_player_url": "http://192.168.1.111:6000",
|
||||
"automatic_player_url": "http://127.0.0.1:6000",
|
||||
"liveRoom": {
|
||||
"enabled": true,
|
||||
"enabled": false,
|
||||
"url": ""
|
||||
},
|
||||
"record": {
|
||||
|
@ -51,7 +51,7 @@ class SocketBridgeService:
|
||||
async def create_socket_client(self):
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
try:
|
||||
sock.connect(('192.168.1.111', 10001))
|
||||
sock.connect(('127.0.0.1', 10001))
|
||||
sock.setblocking(True) # 设置为阻塞模式
|
||||
return sock
|
||||
except Exception as e:
|
||||
|
@ -234,7 +234,7 @@ class FayInterface {
|
||||
panelMsg: '',
|
||||
panelReply: '',
|
||||
robot:'static/images/Normal.gif',
|
||||
base_url: 'http://192.168.1.111:5000',
|
||||
base_url: 'http://127.0.0.1:5000',
|
||||
play_sound_enabled: false,
|
||||
source_record_enabled: false
|
||||
};
|
||||
@ -246,7 +246,7 @@ class FayInterface {
|
||||
},
|
||||
methods: {
|
||||
initFayService() {
|
||||
this.fayService = new FayInterface('ws://192.168.1.111:10003', this.base_url, this);
|
||||
this.fayService = new FayInterface('ws://127.0.0.1:10003', this.base_url, this);
|
||||
this.fayService.connectWebSocket();
|
||||
this.fayService.websocket.addEventListener('open', () => {
|
||||
this.loadUserList();
|
||||
|
@ -170,7 +170,7 @@ new Vue({
|
||||
}],
|
||||
automatic_player_status: false,
|
||||
automatic_player_url: "",
|
||||
host_url: "http://192.168.1.111:5000"
|
||||
host_url: "http://127.0.0.1:5000"
|
||||
};
|
||||
},
|
||||
created() {
|
||||
@ -179,7 +179,7 @@ new Vue({
|
||||
},
|
||||
methods: {
|
||||
initFayService() {
|
||||
this.fayService = new FayInterface('ws://192.168.1.111:10003', this.host_url, this);
|
||||
this.fayService = new FayInterface('ws://127.0.0.1:10003', this.host_url, this);
|
||||
this.fayService.connectWebSocket();
|
||||
},
|
||||
getData() {
|
||||
|
@ -106,7 +106,7 @@
|
||||
active-color="#13ce66"
|
||||
inactive-color="#ff4949">
|
||||
</el-switch><span class="font_name" style="margin-left: 10px;">自动播放:</span>
|
||||
<input class="section_2" v-model="automatic_player_url" placeholder="http://192.168.1.111:6000" :disabled="!configEditable" />
|
||||
<input class="section_2" v-model="automatic_player_url" placeholder="http://127.0.0.1:6000" :disabled="!configEditable" />
|
||||
</div>
|
||||
</div>
|
||||
<div style="margin-top: 40px; text-align: center;">
|
||||
|
@ -27,7 +27,7 @@ class MainWindow(QMainWindow):
|
||||
#清空缓存
|
||||
profile = QWebEngineProfile.defaultProfile()
|
||||
profile.clearHttpCache()
|
||||
self.browser.load(QUrl('http://192.168.1.111:5000'))
|
||||
self.browser.load(QUrl('http://127.0.0.1:5000'))
|
||||
self.setCentralWidget(self.browser)
|
||||
MyThread(target=self.runnable).start()
|
||||
|
||||
|
@ -4,7 +4,7 @@ import requests
|
||||
|
||||
class VllmGPT:
|
||||
|
||||
def __init__(self, host="192.168.1.111",
|
||||
def __init__(self, host="127.0.0.1",
|
||||
port="8000",
|
||||
model="THUDM/chatglm3-6b",
|
||||
max_tokens="1024"):
|
||||
@ -17,7 +17,7 @@ class VllmGPT:
|
||||
|
||||
def question(self,cont):
|
||||
chat_list = []
|
||||
url = "http://192.168.1.111:8101/v1/completions"
|
||||
url = "http://127.0.0.1:8101/v1/completions"
|
||||
req = json.dumps({
|
||||
"model": "THUDM/chatglm3-6b",
|
||||
"prompt": cont,
|
||||
@ -51,6 +51,6 @@ class VllmGPT:
|
||||
return res['choices'][0]['message']['content']
|
||||
|
||||
if __name__ == "__main__":
|
||||
vllm = VllmGPT('192.168.1.111','8101','Qwen-7B-Chat')
|
||||
vllm = VllmGPT('127.0.0.1','8101','Qwen-7B-Chat')
|
||||
req = vllm.question2("你叫什么名字啊今年多大了")
|
||||
print(req)
|
||||
|
@ -4,7 +4,7 @@ from langchain.tools import BaseTool
|
||||
import requests
|
||||
import json
|
||||
|
||||
url = "http://192.168.1.111:4008/send"
|
||||
url = "http://127.0.0.1:4008/send"
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
data = {
|
||||
"message": "你好",
|
||||
|
@ -25,7 +25,7 @@ def question(cont, uid=0, observation=""):
|
||||
content = {
|
||||
"prompt":"请简单回复我。" + cont,
|
||||
"history":chat_list}
|
||||
url = "http://192.168.1.111:8000/v1/completions"
|
||||
url = "http://127.0.0.1:8000/v1/completions"
|
||||
req = json.dumps(content)
|
||||
headers = {'content-type': 'application/json'}
|
||||
r = requests.post(url, headers=headers, data=req)
|
||||
|
@ -25,7 +25,7 @@ def question(cont, uid=0, observation=""):
|
||||
filepath = os.path.join(current_working_directory, "data", filename)
|
||||
cv2.imwrite(filepath, img)
|
||||
content["image"] = filepath
|
||||
url = "http://192.168.1.111:8080"
|
||||
url = "http://127.0.0.1:8080"
|
||||
print(content)
|
||||
req = json.dumps(content)
|
||||
headers = {'content-type': 'application/json'}
|
||||
|
@ -2,7 +2,7 @@ import hashlib
|
||||
import os
|
||||
from pgpt_python.client import PrivateGPTApi
|
||||
|
||||
client = PrivateGPTApi(base_url="http://192.168.1.111:8001")
|
||||
client = PrivateGPTApi(base_url="http://127.0.0.1:8001")
|
||||
|
||||
index_name = "knowledge_data"
|
||||
folder_path = "llm/privategpt/knowledge_base"
|
||||
|
2
main.py
2
main.py
@ -120,7 +120,7 @@ if __name__ == '__main__':
|
||||
contentdb.init_db()
|
||||
|
||||
#ip替换
|
||||
if config_util.fay_url != "192.168.1.111":
|
||||
if config_util.fay_url != "127.0.0.1":
|
||||
replace_ip_in_file("gui/static/js/index.js", config_util.fay_url)
|
||||
replace_ip_in_file("gui/static/js/setting.js", config_util.fay_url)
|
||||
|
||||
|
10
system.conf
10
system.conf
@ -4,7 +4,7 @@
|
||||
#建议使用funasr,请依照asr/funasr/README>md的说明启动
|
||||
ASR_mode = funasr
|
||||
#ASR二选一(需要运行fay/test/funasr服务)集成达摩院asr项目、感谢中科大脑算法工程师张聪聪提供集成代码
|
||||
local_asr_ip=192.168.1.111
|
||||
local_asr_ip=127.0.0.1
|
||||
local_asr_port=10197
|
||||
|
||||
# ASR二选一(第1次运行建议用这个,免费3个月), 阿里云 实时语音识别 服务密钥(必须)https://ai.aliyun.com/nls/trans
|
||||
@ -58,7 +58,7 @@ gpt_base_url=https://api.zyai.online/v1
|
||||
#gpt model engine 如:gpt-3.5-turbo、moonshot-v1-8k
|
||||
gpt_model_engine=gpt-3.5-turbo
|
||||
|
||||
#gpt(fastgpt)代理(可为空,填写例子:192.168.1.111:7890)
|
||||
#gpt(fastgpt)代理(可为空,填写例子:127.0.0.1:7890)
|
||||
proxy_config=
|
||||
|
||||
#通义星尘 服务密钥(NLP多选1) https://xingchen.aliyun.com/
|
||||
@ -66,7 +66,7 @@ xingchen_api_key=
|
||||
xingchen_characterid=
|
||||
|
||||
#ollama服务 (NLP多选1)
|
||||
#ollama_ip 可以填写 192.168.1.111 或者 localhost
|
||||
#ollama_ip 可以填写 127.0.0.1 或者 localhost
|
||||
ollama_ip = localhost
|
||||
#ollama_model 可以填写 qwen:latest , llama2:latest , yi:lastest , pi:latest , gemma:latest
|
||||
ollama_model = gemma:latest
|
||||
@ -75,8 +75,8 @@ ollama_model = gemma:latest
|
||||
coze_bot_id=
|
||||
coze_api_key=
|
||||
|
||||
#启动模式common、web(服务器或docker请使用web方式,通过http://192.168.1.111:5000控制)
|
||||
#启动模式common、web(服务器或docker请使用web方式,通过http://127.0.0.1:5000控制)
|
||||
start_mode=web
|
||||
|
||||
#服务器主动地址
|
||||
fay_url = 192.168.1.111
|
||||
fay_url = 127.0.0.1
|
@ -19,7 +19,7 @@ ollama pull qwen:latest #阿里开源模型
|
||||
```
|
||||
chat_module=ollama_api
|
||||
|
||||
ollama_ip = 192.168.1.111
|
||||
ollama_ip = 127.0.0.1
|
||||
ollama_model = phi:latest # llama2:latest , yi:lastest , pi:latest , gemma:latest (开源大语言模型多选1)
|
||||
```
|
||||
服务配置
|
||||
|
@ -23099,12 +23099,12 @@ an external server.
|
||||
<p>Example:
|
||||
Create a localhost stream on port 5555:
|
||||
</p><div class="example">
|
||||
<pre class="example-preformatted">ffmpeg -re -i input -f mpegts zmq:tcp://192.168.1.111:5555
|
||||
<pre class="example-preformatted">ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
|
||||
</pre></div>
|
||||
|
||||
<p>Multiple clients may connect to the stream using:
|
||||
</p><div class="example">
|
||||
<pre class="example-preformatted">ffplay zmq:tcp://192.168.1.111:5555
|
||||
<pre class="example-preformatted">ffplay zmq:tcp://127.0.0.1:5555
|
||||
</pre></div>
|
||||
|
||||
<p>Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
|
||||
|
@ -2622,12 +2622,12 @@ an external server.
|
||||
<p>Example:
|
||||
Create a localhost stream on port 5555:
|
||||
</p><div class="example">
|
||||
<pre class="example-preformatted">ffmpeg -re -i input -f mpegts zmq:tcp://192.168.1.111:5555
|
||||
<pre class="example-preformatted">ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
|
||||
</pre></div>
|
||||
|
||||
<p>Multiple clients may connect to the stream using:
|
||||
</p><div class="example">
|
||||
<pre class="example-preformatted">ffplay zmq:tcp://192.168.1.111:5555
|
||||
<pre class="example-preformatted">ffplay zmq:tcp://127.0.0.1:5555
|
||||
</pre></div>
|
||||
|
||||
<p>Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
|
||||
|
@ -11528,12 +11528,12 @@ an external server.
|
||||
<p>Example:
|
||||
Create a localhost stream on port 5555:
|
||||
</p><div class="example">
|
||||
<pre class="example-preformatted">ffmpeg -re -i input -f mpegts zmq:tcp://192.168.1.111:5555
|
||||
<pre class="example-preformatted">ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
|
||||
</pre></div>
|
||||
|
||||
<p>Multiple clients may connect to the stream using:
|
||||
</p><div class="example">
|
||||
<pre class="example-preformatted">ffplay zmq:tcp://192.168.1.111:5555
|
||||
<pre class="example-preformatted">ffplay zmq:tcp://127.0.0.1:5555
|
||||
</pre></div>
|
||||
|
||||
<p>Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
|
||||
|
@ -11892,12 +11892,12 @@ an external server.
|
||||
<p>Example:
|
||||
Create a localhost stream on port 5555:
|
||||
</p><div class="example">
|
||||
<pre class="example-preformatted">ffmpeg -re -i input -f mpegts zmq:tcp://192.168.1.111:5555
|
||||
<pre class="example-preformatted">ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
|
||||
</pre></div>
|
||||
|
||||
<p>Multiple clients may connect to the stream using:
|
||||
</p><div class="example">
|
||||
<pre class="example-preformatted">ffplay zmq:tcp://192.168.1.111:5555
|
||||
<pre class="example-preformatted">ffplay zmq:tcp://127.0.0.1:5555
|
||||
</pre></div>
|
||||
|
||||
<p>Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
|
||||
|
@ -58,7 +58,7 @@ def receive_audio(client):
|
||||
|
||||
if __name__ == "__main__":
|
||||
client = socket.socket()
|
||||
client.connect(("192.168.1.111", 10001))
|
||||
client.connect(("127.0.0.1", 10001))
|
||||
# client.send(b"<username>User</username>")#指定用户名
|
||||
# client.send(b"<output>False<output>")#不回传音频(可以通过websocket 10003数字人接口接收音频http路径和本地路径)
|
||||
time.sleep(1)
|
||||
|
@ -62,7 +62,7 @@ class ActionGPTResponse(Action):
|
||||
current_chat={'role': 'user', 'content': tracker.latest_message.get("text")}
|
||||
history.append(current_chat)
|
||||
|
||||
url = "http://192.168.1.111:8101/v1/chat/completions"
|
||||
url = "http://127.0.0.1:8101/v1/chat/completions"
|
||||
req = json.dumps({
|
||||
"model": "THUDM/chatglm3-6b",
|
||||
"messages": history,
|
||||
@ -168,7 +168,7 @@ class ActionAskProblem(Action):
|
||||
current_chat={'role': 'user', 'content': tracker.latest_message.get("text")}
|
||||
history.append(current_chat)
|
||||
|
||||
url = "http://192.168.1.111:8101/v1/chat/completions"
|
||||
url = "http://127.0.0.1:8101/v1/chat/completions"
|
||||
req = json.dumps({
|
||||
"model": "THUDM/chatglm3-6b",
|
||||
"messages": history,
|
||||
|
@ -10,7 +10,7 @@ import queue
|
||||
# 配置项
|
||||
config = {
|
||||
"enable_auto_get": True, # 设置是否启用主动获取播放项
|
||||
"url" : "192.168.1.111" #服务端Url
|
||||
"url" : "127.0.0.1" #服务端Url
|
||||
}
|
||||
|
||||
audio_queue = queue.Queue()
|
||||
|
@ -2,7 +2,7 @@ import requests
|
||||
import json
|
||||
|
||||
def test_gpt(prompt):
|
||||
url = 'http://192.168.1.111:5000/v1/chat/completions' # 替换为您的接口地址
|
||||
url = 'http://127.0.0.1:5000/v1/chat/completions' # 替换为您的接口地址
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': f'Bearer YOUR_API_KEY', # 如果您的接口需要身份验证
|
||||
|
@ -12,7 +12,7 @@ class Speech:
|
||||
pass
|
||||
|
||||
def to_sample(self, text, style) :
|
||||
url = "http://192.168.1.111:9880"
|
||||
url = "http://127.0.0.1:9880"
|
||||
data = {
|
||||
"text": text,
|
||||
"text_language": "zh",
|
||||
|
@ -15,7 +15,7 @@ class Speech:
|
||||
pass
|
||||
|
||||
def to_sample(self, text, style) :
|
||||
url = "http://192.168.1.111:9880/tts"
|
||||
url = "http://127.0.0.1:9880/tts"
|
||||
data = {
|
||||
"text": text, # str.(required) text to be synthesized
|
||||
"text_lang": "zh", # str.(required) language of the text to be synthesized
|
||||
|
@ -17,7 +17,7 @@ def initialize_llm_chain(messages: list):
|
||||
template = "{input}"
|
||||
prompt = PromptTemplate.from_template(template)
|
||||
|
||||
endpoint_url = "http://192.168.1.111:8000/v1/chat/completions"
|
||||
endpoint_url = "http://127.0.0.1:8000/v1/chat/completions"
|
||||
llm = ChatGLM3(
|
||||
endpoint_url=endpoint_url,
|
||||
max_tokens=8096,
|
||||
|
@ -3,7 +3,7 @@ import openai
|
||||
|
||||
# 设置OpenAI API的密钥
|
||||
# openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
openai.base_url = "http://192.168.1.111:8000/v1/chat/completions"
|
||||
openai.base_url = "http://127.0.0.1:8000/v1/chat/completions"
|
||||
# 定义API请求的数据
|
||||
data = {
|
||||
"model": "chatglm3-6b",
|
||||
|
@ -12,7 +12,7 @@ and handle responses.
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
base_url = "http://192.168.1.111:8000/v1/"
|
||||
base_url = "http://127.0.0.1:8000/v1/"
|
||||
client = OpenAI(api_key="EMPTY", base_url=base_url)
|
||||
|
||||
|
||||
@ -197,25 +197,25 @@ if __name__ == "__main__":
|
||||
# embedding()
|
||||
# function_chat()
|
||||
|
||||
# curl -X POST "http://192.168.1.111:8000/v1/chat/completions" \
|
||||
# curl -X POST "http://127.0.0.1:8000/v1/chat/completions" \
|
||||
# -H "Content-Type: application/json" \
|
||||
# -d "{\"model\": \"chatglm3-6b\", \"messages\": [{\"role\": \"system\", \"content\": \"You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown.\"}, {\"role\": \"user\", \"content\": \"你好,给我讲一个故事,大概100字\"}], \"stream\": false, \"max_tokens\": 100, \"temperature\": 0.8, \"top_p\": 0.8}"
|
||||
|
||||
# curl -X POST "http://192.168.1.111:8000/v1/completions" \
|
||||
# curl -X POST "http://127.0.0.1:8000/v1/completions" \
|
||||
# -H 'Content-Type: application/json' \
|
||||
# -d '{"prompt": "请用20字内回复我.你今年多大了", "history": []}'
|
||||
|
||||
# curl -X POST "http://192.168.1.111:8000/v1/completions" \
|
||||
# curl -X POST "http://127.0.0.1:8000/v1/completions" \
|
||||
# -H 'Content-Type: application/json' \
|
||||
# -d '{"prompt": "请用20字内回复我.你今年多大了", "history": [{"你好","你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。"}]}'
|
||||
|
||||
|
||||
# curl -X POST "http://192.168.1.111:8000/v1/completions" \
|
||||
# curl -X POST "http://127.0.0.1:8000/v1/completions" \
|
||||
# -H 'Content-Type: application/json' \
|
||||
# -d '{"prompt": "请用20字内回复我.你今年多大了", "history": [["你好","你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。"]]}'
|
||||
|
||||
|
||||
# curl -X POST "http://192.168.1.111:8000/v1/completions" \
|
||||
# curl -X POST "http://127.0.0.1:8000/v1/completions" \
|
||||
# -H 'Content-Type: application/json' \
|
||||
# -d '{"prompt": "请用20字内回复我.你今年多大了", "history": ["你好"]}'
|
||||
|
||||
|
@ -22,7 +22,7 @@ def question(cont):
|
||||
content = {
|
||||
"prompt":"请简单回复我。" + cont,
|
||||
"history":chat_list}
|
||||
url = "http://192.168.1.111:8000/v1/completions"
|
||||
url = "http://127.0.0.1:8000/v1/completions"
|
||||
req = json.dumps(content)
|
||||
headers = {'content-type': 'application/json'}
|
||||
r = requests.post(url, headers=headers, data=req)
|
||||
|
@ -7,10 +7,10 @@ conda install pytorch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 pytorch-cuda=
|
||||
启动
|
||||
python -m vllm.entrypoints.openai.api_server --tensor-parallel-size=1 --trust-remote-code --max-model-len 1024 --model THUDM/chatglm3-6b
|
||||
|
||||
python -m vllm.entrypoints.openai.api_server --host 192.168.1.111 --port 8101 --tensor-parallel-size=1 --trust-remote-code --max-model-len 1024 --model THUDM/chatglm3-6b
|
||||
python -m vllm.entrypoints.openai.api_server --host 127.0.0.1 --port 8101 --tensor-parallel-size=1 --trust-remote-code --max-model-len 1024 --model THUDM/chatglm3-6b
|
||||
|
||||
测试
|
||||
curl http://192.168.1.111:8101/v1/completions \
|
||||
curl http://127.0.0.1:8101/v1/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "THUDM/chatglm3-6b",
|
||||
@ -20,12 +20,12 @@ curl http://192.168.1.111:8101/v1/completions \
|
||||
}'
|
||||
|
||||
多轮对话
|
||||
curl -X POST "http://192.168.1.111:8101/v1/completions" \
|
||||
curl -X POST "http://127.0.0.1:8101/v1/completions" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"model\": \"THUDM/chatglm3-6b\",\"prompt\": \"你叫什么名字\", \"history\": [{\"role\": \"user\", \"content\": \"你出生在哪里.\"}, {\"role\": \"assistant\", \"content\": \"出生在北京\"}]}"
|
||||
|
||||
|
||||
多轮对话
|
||||
curl -X POST "http://192.168.1.111:8101/v1/chat/completions" \
|
||||
curl -X POST "http://127.0.0.1:8101/v1/chat/completions" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"model\": \"THUDM/chatglm3-6b\", \"messages\": [{\"role\": \"system\", \"content\": \"You are ChatGLM3, a large language model trained by Zhipu.AI. Follow the user's instructions carefully. Respond using markdown.\"}, {\"role\": \"user\", \"content\": \"你好,给我讲一个故事,大概100字\"}], \"stream\": false, \"max_tokens\": 100, \"temperature\": 0.8, \"top_p\": 0.8}"
|
||||
|
@ -14,7 +14,7 @@ variable formate as xxx.xxx (just for check, not need a real key).
|
||||
|
||||
from zhipuai import ZhipuAI
|
||||
|
||||
base_url = "http://192.168.1.111:8000/v1/"
|
||||
base_url = "http://127.0.0.1:8000/v1/"
|
||||
client = ZhipuAI(api_key="EMP.TY", base_url=base_url)
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user