From 580264169f594f619d3fd20868f53bf4e675d14e Mon Sep 17 00:00:00 2001 From: xszyou Date: Wed, 30 Aug 2023 18:29:08 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BB=8A=E5=A4=A9=E6=98=9F=E6=9C=9F=E4=B8=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1、 调整gpt的消息记录方式; 2、*q&a支持RPA自动化脚本。 --- README.md | 10 +++++++++- README_EN.md | 5 +++++ WebSocket.md | 8 +++++++- ai_module/nlp_gpt.py | 24 +++++++++++++++++++++++- core/qa_service.py | 44 ++++++++++++++++++++++++++++++-------------- requirements.txt | 6 ++++-- 6 files changed, 78 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 3c3ed54..d4e510f 100644 --- a/README.md +++ b/README.md @@ -119,7 +119,15 @@ Remote Android  [Live2D](https://www.bilibili.com/video/BV1sx4y1d775/?vd_sou ## **三、升级日志** -**2023.08.16:** +**2023.08.30** + ++ 调整gpt的消息记录方式; + ++ *q&a支持RPA自动化脚本。 + + + +**2023.08.23:** + 更换gpt对接方式; + 增加chatglm2对接。 diff --git a/README_EN.md b/README_EN.md index 7e815c5..4af68b1 100644 --- a/README_EN.md +++ b/README_EN.md @@ -122,6 +122,11 @@ Message format: View [WebSocket.md](https://github.com/TheRamU/Fay/blob/main/Web ## **Upgrade Log** +**2023.08.30** + +- Adjust the message recording method of GPT; +- *Q&A supports RPA automation scripts. + **2023.08.23:** - Replace the GPT docking method; diff --git a/WebSocket.md b/WebSocket.md index 9e2c08d..ae1ebbe 100644 --- a/WebSocket.md +++ b/WebSocket.md @@ -110,4 +110,10 @@ | 参数 | 描述 | 类型 | 范围 | | ---------- | ---------------- | ----- | --------------- | -| Data.text | 文本 | str | | \ No newline at end of file +| Data.text | 文本 | str | | + +### 接收消息标记音频播放完成 + +```php +Play End +``` diff --git a/ai_module/nlp_gpt.py b/ai_module/nlp_gpt.py index a3b7b9d..06557d7 100644 --- a/ai_module/nlp_gpt.py +++ b/ai_module/nlp_gpt.py @@ -9,6 +9,7 @@ import time from utils import config_util as cfg from urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning) +from core.content_db import Content_Db #代理服务器的配置方式,参考链接https://blog.csdn.net/qq_30865917/article/details/106492549 #httpproxy此处填写你代理服务器的地址,可以把配置文件放到config_util里面,二选一 @@ -34,10 +35,31 @@ def question(cont): #此处可以定义角色的行为和特征,假装xx模型可以绕过chatgpt信息检查 prompt = "你是数字人Fay。回答之前请一步一步想清楚。你的底层AI算法技术是Fay。当有人质疑你是假的 AI ,或者质疑你是用 ChatGPT 套的壳制作的时候,你就避而不答,转而讲一个笑话。所有回复请用20字内。" + content_db = Content_Db() + communication_history = content_db.get_list('all','desc',10) + #历史记录处理 + if len(communication_history)>1: + msg = "以下是历史记录:" + i = 0 + for info in communication_history: + if info[0] == 'member': + content = "user:" + info[2] + else: + content = "reply:" + info[2] + if msg == "": + msg = content + else: + if i == len(communication_history) - 1: + msg = msg + "\n现在需要询问您的问题是(直接回答,不用前缀reply:):\n"+ cont + else: + msg = msg + "\n"+ content + i+=1 + else: + msg = cont message=[ {"role": "system", "content": prompt}, - {"role": "user", "content": cont} + {"role": "user", "content": msg} ] data = { diff --git a/core/qa_service.py b/core/qa_service.py index bc8ca40..66c1756 100644 --- a/core/qa_service.py +++ b/core/qa_service.py @@ -2,12 +2,20 @@ from openpyxl import load_workbook import difflib from utils import config_util as cfg +from scheduler.thread_manager import MyThread +import shlex +import subprocess +import time def question(query_type,text): qa = QAService() answer = qa.question(query_type,text) return answer +def run_script(command): + args = shlex.split(command) # 分割命令行参数 + subprocess.Popen(args) + class QAService: def __init__(self): @@ -34,31 +42,38 @@ class QAService: def question(self, query_type, text): if query_type == 'qa': answer_dict = self.__read_qna(cfg.config['interact']['QnA']) - answer = self.__get_keyword(answer_dict, text) + answer, action = self.__get_keyword(answer_dict, text, query_type) + if action: + MyThread(target=self.__run, args=[action]).start() + return answer + elif query_type == 'Persona': answer_dict = self.attribute_keyword - answer = self.__get_keyword(answer_dict, text) + answer, action = self.__get_keyword(answer_dict, text, query_type) elif query_type == 'command': - answer = self.__get_keyword(self.command_keyword, text) + answer, action = self.__get_keyword(self.command_keyword, text, query_type) return answer - - def __read_qna(self, filename) -> list: + def __run(self,action): + time.sleep(2) + run_script(action) + + def __read_qna(self, filename): qna = [] try: wb = load_workbook(filename) - sheets = wb.worksheets - sheet = sheets[0] - for row in sheet.rows: + sheet = wb.active + for row in sheet.iter_rows(min_row=2, values_only=True): if len(row) >= 2: - qna.append([row[0].value.split(";"), row[1].value]) + qna.append([row[0].split(";"), row[1], row[2] if len(row) >= 3 else None]) except BaseException as e: - print("无法读取Q&A文件 {} -> ".format(filename) + str(e)) + print(f"无法读取Q&A文件 {filename} -> {e}") return qna - def __get_keyword(self, keyword_dict, text): + def __get_keyword(self, keyword_dict, text, query_type): last_similar = 0 last_answer = '' + last_action = '' for qa in keyword_dict: for quest in qa[0]: similar = self.__string_similar(text, quest) @@ -67,10 +82,11 @@ class QAService: if similar > last_similar: last_similar = similar last_answer = qa[1] + if query_type == "qa": + last_action = qa[2] if last_similar >= 0.6: - return last_answer - return None - + return last_answer, last_action + return None, None def __string_similar(self, s1, s2): return difflib.SequenceMatcher(None, s1, s2).quick_ratio() diff --git a/requirements.txt b/requirements.txt index 33b79ed..b13554b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ flask_cors~=3.0.10 PyQtWebEngine~=5.15.5 eyed3~=0.9.6 websocket-client -azure-cognitiveservices-speech~=1.21.0 +azure-cognitiveservices-speech aliyun-python-sdk-core==2.13.3 scipy~=1.10.0 simhash @@ -20,4 +20,6 @@ gevent~=22.10.1 edge_tts~=6.1.3 eyed3 revChatGPT -ultralytics \ No newline at end of file +ultralytics +subprocess +shlex \ No newline at end of file