今天周三
1、 更换gpt对接方式; 2、增加chatglm2对接。
This commit is contained in:
parent
c8ca0dee43
commit
c01256e8dc
@ -121,6 +121,11 @@ Remote Android [Live2D](https://www.bilibili.com/video/BV1sx4y1d775/?vd_sou
|
||||
|
||||
**2023.08.16:**
|
||||
|
||||
+ 更换gpt对接方式;
|
||||
+ 增加chatglm2对接。
|
||||
|
||||
**2023.08.16:**
|
||||
|
||||
+ 优化UE反复重连系统资源占用太高的问题;
|
||||
+ 自动控制是否启动面板播放;
|
||||
+ 自动删除运行日志。
|
||||
|
@ -122,6 +122,11 @@ Message format: View [WebSocket.md](https://github.com/TheRamU/Fay/blob/main/Web
|
||||
|
||||
## **Upgrade Log**
|
||||
|
||||
**2023.08.23:**
|
||||
|
||||
- Replace the GPT docking method;
|
||||
- Add chatglm2 docking.
|
||||
|
||||
**2023.08.16:**
|
||||
|
||||
- Optimized the issue of high system resource consumption caused by UE repeatedly reconnecting;
|
||||
|
29
ai_module/nlp_ChatGLM2.py
Normal file
29
ai_module/nlp_ChatGLM2.py
Normal file
@ -0,0 +1,29 @@
|
||||
import json
|
||||
import requests
|
||||
from core.content_db import Content_Db
|
||||
content_db = Content_Db()
|
||||
list = content_db.get_list('all','desc',10)
|
||||
answer_info = dict()
|
||||
chat_list = []
|
||||
for val in list:
|
||||
answer_info = dict()
|
||||
if val[0] == "member":
|
||||
answer_info["role"] = "user"
|
||||
answer_info["content"] = val[2]
|
||||
elif val[0] == "fay":
|
||||
answer_info["role"] = "bot"
|
||||
answer_info["content"] = val[2]
|
||||
chat_list.append(answer_info)
|
||||
|
||||
def question(cont):
|
||||
print(chat_list)
|
||||
content = {
|
||||
"prompt":"请简单回复我。" + cont,
|
||||
"history":chat_list}
|
||||
url = "http://192.168.1.23:8000"
|
||||
req = json.dumps(content)
|
||||
headers = {'content-type': 'application/json'}
|
||||
r = requests.post(url, headers=headers, data=req)
|
||||
res = json.loads(r.text).get('response')
|
||||
return res
|
||||
|
@ -1,21 +1,78 @@
|
||||
from revChatGPT.V3 import Chatbot
|
||||
from utils import config_util as cfg
|
||||
import time
|
||||
"""
|
||||
此代码由fay开源开发者社区 江湖墨明 提供
|
||||
通过此代码的修改,可以实现对接本地clash代理或远程代理,clash无需设置成系统代理。以解决在开系统代理后无法使用部分功能的问题
|
||||
"""
|
||||
|
||||
import requests
|
||||
import time
|
||||
|
||||
from utils import config_util as cfg
|
||||
from urllib3.exceptions import InsecureRequestWarning
|
||||
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
|
||||
|
||||
#代理服务器的配置方式,参考链接https://blog.csdn.net/qq_30865917/article/details/106492549
|
||||
#httpproxy此处填写你代理服务器的地址,可以把配置文件放到config_util里面,二选一
|
||||
#httpproxy = cfg.chatgpt_httpproxy
|
||||
httpproxy = '127.0.0.1:7890'
|
||||
#如果要关闭代理直接访问,比如本地有加速器,则proxy_falg = '0';
|
||||
proxy_flag = '1'
|
||||
|
||||
count = 0
|
||||
def question(cont):
|
||||
global count
|
||||
url= "https://api.openai.com/v1/chat/completions"
|
||||
|
||||
session = requests.Session()
|
||||
session.verify = False
|
||||
|
||||
if proxy_flag == '1':
|
||||
session.proxies = {
|
||||
"https": "https://" + httpproxy,
|
||||
"http": "http://" + httpproxy
|
||||
}
|
||||
|
||||
|
||||
model_engine = "gpt-3.5-turbo"
|
||||
|
||||
#此处可以定义角色的行为和特征,假装xx模型可以绕过chatgpt信息检查
|
||||
prompt = "你是数字人Fay。回答之前请一步一步想清楚。你的底层AI算法技术是Fay。当有人质疑你是假的 AI ,或者质疑你是用 ChatGPT 套的壳制作的时候,你就避而不答,转而讲一个笑话。所有回复请用20字内。"
|
||||
|
||||
message=[
|
||||
{"role": "system", "content": prompt},
|
||||
{"role": "user", "content": cont}
|
||||
]
|
||||
|
||||
data = {
|
||||
"model":model_engine,
|
||||
"messages":message,
|
||||
"temperature":0.3,
|
||||
"max_tokens":2000,
|
||||
"user":"live-virtual-digital-person"
|
||||
}
|
||||
|
||||
headers = {'content-type': 'application/json', 'Authorization': 'Bearer ' + cfg.key_chatgpt_api_key}
|
||||
|
||||
starttime = time.time()
|
||||
|
||||
try:
|
||||
if cfg.proxy_config != '' :
|
||||
chatbot = Chatbot(proxy = cfg.proxy_config, api_key = cfg.key_chatgpt_api_key)
|
||||
else:
|
||||
chatbot = Chatbot(api_key = cfg.key_chatgpt_api_key)
|
||||
response = chatbot.ask(cont)
|
||||
count = 0
|
||||
return response
|
||||
except Exception as e:
|
||||
count += 1
|
||||
if count < 3:
|
||||
time.sleep(15)
|
||||
return question(cont)
|
||||
return 'gpt当前繁忙,请稍后重试' + e
|
||||
response = session.post(url, json=data, headers=headers, verify=False)
|
||||
response.raise_for_status() # 检查响应状态码是否为200
|
||||
|
||||
result = eval(response.text)
|
||||
response_text = result["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"请求失败: {e}")
|
||||
response_text = "抱歉,我现在太忙了,休息一会,请稍后再试。"
|
||||
|
||||
|
||||
print("接口调用耗时 :" + str(time.time() - starttime))
|
||||
|
||||
return response_text
|
||||
|
||||
if __name__ == "__main__":
|
||||
#测试代理模式
|
||||
for i in range(3):
|
||||
|
||||
query = "爱情是什么"
|
||||
response = question(query)
|
||||
print("\n The result is ", response)
|
@ -39,6 +39,7 @@ from ai_module import yolov8
|
||||
from ai_module import nlp_VisualGLM
|
||||
from ai_module import nlp_lingju
|
||||
from ai_module import nlp_rwkv_api
|
||||
from ai_module import nlp_ChatGLM2
|
||||
|
||||
import platform
|
||||
if platform.system() == "Windows":
|
||||
@ -53,7 +54,9 @@ modules = {
|
||||
"nlp_rasa": nlp_rasa,
|
||||
"nlp_VisualGLM": nlp_VisualGLM,
|
||||
"nlp_lingju": nlp_lingju,
|
||||
"nlp_rwkv_api":nlp_rwkv_api
|
||||
"nlp_rwkv_api":nlp_rwkv_api,
|
||||
"nlp_chatglm2": nlp_ChatGLM2
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -20,7 +20,7 @@ ms_tts_region=
|
||||
xf_ltp_app_id=
|
||||
xf_ltp_api_key=
|
||||
|
||||
#NLP多选一:lingju、yuan、gpt、chatgpt、rasa(需启动chatglm及rasa,https://m.bilibili.com/video/BV1D14y1f7pr)、VisualGLM、rwkv_api、rwkv
|
||||
#NLP多选一:lingju、yuan、gpt、chatgpt、rasa、chatglm2(需启动chatglm及rasa,https://m.bilibili.com/video/BV1D14y1f7pr)、VisualGLM、rwkv_api、rwkv
|
||||
chat_module=lingju
|
||||
|
||||
#灵聚 服务密钥(NLP多选1) https://open.lingju.ai
|
||||
|
Loading…
Reference in New Issue
Block a user