diff --git a/config.json b/config.json index e8724db..1214e07 100644 --- a/config.json +++ b/config.json @@ -22,6 +22,7 @@ "join": 10 }, "playSound": true, + "sound_synthesis_enabled": false, "visualization": false }, "items": [], diff --git a/core/content_db.py b/core/content_db.py index b109e84..313d2a6 100644 --- a/core/content_db.py +++ b/core/content_db.py @@ -50,7 +50,7 @@ class Content_Db: conn = sqlite3.connect("fay.db") cur = conn.cursor() try: - cur.execute("insert into T_Msg (type,way,content,createtime,username,uid) values (?,?,?,?,?,?)",(type,way,content,int(time.time()),username,uid)) + cur.execute("insert into T_Msg (type,way,content,createtime,username,uid) values (?,?,?,?,?,?)",(type, way, content, time.time(), username, uid)) conn.commit() except: util.log(1, "请检查参数是否有误") diff --git a/core/fay_core.py b/core/fay_core.py index bb5c963..4e4012e 100644 --- a/core/fay_core.py +++ b/core/fay_core.py @@ -65,7 +65,7 @@ modules = { } #大语言模型回复 -def handle_chat_message(msg, username='User'): +def handle_chat_message(msg, username='User', observation=''): text = '' textlist = [] try: @@ -81,7 +81,7 @@ def handle_chat_message(msg, username='User'): text = textlist[0]['text'] else: uid = member_db.new_instance().find_user(username) - text = selected_module.question(msg, uid) + text = selected_module.question(msg, uid, observation) util.printInfo(1, username, '自然语言处理完成. 耗时: {} ms'.format(math.floor((time.time() - tm) * 1000))) if text == '哎呀,你这么说我也不懂,详细点呗' or text == '': util.printInfo(1, username, '[!] 自然语言无语了!') @@ -161,9 +161,9 @@ class FeiFei: if wsa_server.get_instance().is_connected(username): content = {'Topic': 'Unreal', 'Data': {'Key': 'log', 'Value': "思考中..."}, 'Username' : username, 'robot': f'http://{cfg.fay_url}:5000/robot/Thinking.jpg'} wsa_server.get_instance().add_cmd(content) - text,textlist = handle_chat_message(interact.data["msg"], username) + text,textlist = handle_chat_message(interact.data["msg"], username, interact.data.get("observation", "")) - qa_service.QAService().record_qapair(interact.data["msg"], text)#沟通记录缓存到qa文件 + # qa_service.QAService().record_qapair(interact.data["msg"], text)#沟通记录缓存到qa文件 else: text = answer @@ -315,15 +315,16 @@ class FeiFei: def say(self, interact, text): try: result = None - audio_url = interact.data.get('audio')#透传的音频 - if audio_url is not None: - file_name = 'sample-' + str(int(time.time() * 1000)) + '.wav' - result = self.download_wav(audio_url, './samples/', file_name) - elif config_util.config["interact"]["playSound"] or wsa_server.get_instance().is_connected(interact.data.get("user")) or self.__is_send_remote_device_audio(interact):#tts - util.printInfo(1, interact.data.get('user'), '合成音频...') - tm = time.time() - result = self.sp.to_sample(text.replace("*", ""), self.__get_mood_voice()) - util.printInfo(1, interact.data.get('user'), '合成音频完成. 耗时: {} ms 文件:{}'.format(math.floor((time.time() - tm) * 1000), result)) + if config_util.config["interact"]["sound_synthesis_enabled"]: + audio_url = interact.data.get('audio')#透传的音频 + if audio_url is not None: + file_name = 'sample-' + str(int(time.time() * 1000)) + '.wav' + result = self.download_wav(audio_url, './samples/', file_name) + elif config_util.config["interact"]["playSound"] or wsa_server.get_instance().is_connected(interact.data.get("user")) or self.__is_send_remote_device_audio(interact):#tts + util.printInfo(1, interact.data.get('user'), '合成音频...') + tm = time.time() + result = self.sp.to_sample(text.replace("*", ""), self.__get_mood_voice()) + util.printInfo(1, interact.data.get('user'), '合成音频完成. 耗时: {} ms 文件:{}'.format(math.floor((time.time() - tm) * 1000), result)) if result is not None: MyThread(target=self.__process_output_audio, args=[result, interact, text]).start() diff --git a/gui/flask_server.py b/gui/flask_server.py index e4c38f5..f122f24 100644 --- a/gui/flask_server.py +++ b/gui/flask_server.py @@ -7,6 +7,7 @@ import re from flask import Flask, render_template, request, jsonify, Response, send_file from flask_cors import CORS import requests +import datetime import fay_booter @@ -82,8 +83,7 @@ def api_get_data(): voice_list = tts_voice.get_voice_list() send_voice_list = [] if config_util.tts_module == 'ali': - wsa_server.get_web_instance().add_cmd({ - "voiceList": [ + voice_list = [ {"id": "abin", "name": "阿斌"}, {"id": "zhixiaobai", "name": "知小白"}, {"id": "zhixiaoxia", "name": "知小夏"}, @@ -159,10 +159,12 @@ def api_get_data(): {"id": "laotie", "name": "老铁"}, {"id": "laomei", "name": "老妹"}, {"id": "aikan", "name": "艾侃"} - ] - }) + + ] + send_voice_list = {"voiceList": voice_list} + wsa_server.get_web_instance().add_cmd(send_voice_list) elif config_util.tts_module == 'volcano': - wsa_server.get_web_instance().add_cmd({ + voice_list = { "voiceList": [ {"id": "BV001_streaming", "name": "通用女声"}, {"id": "BV002_streaming", "name": "通用男声"}, @@ -171,7 +173,9 @@ def api_get_data(): {"id": "zh_male_wennuanahu_moon_bigtts", "name": "温暖阿虎/Alvin"}, {"id": "zh_female_wanwanxiaohe_moon_bigtts", "name": "湾湾小何"}, ] - }) + } + send_voice_list = {"voiceList": voice_list} + wsa_server.get_web_instance().add_cmd(send_voice_list) else: voice_list = tts_voice.get_voice_list() send_voice_list = [] @@ -181,10 +185,11 @@ def api_get_data(): wsa_server.get_web_instance().add_cmd({ "voiceList": send_voice_list }) + voice_list = send_voice_list wsa_server.get_web_instance().add_cmd({"deviceList": __get_device_list()}) if fay_booter.is_running(): wsa_server.get_web_instance().add_cmd({"liveState": 1}) - return json.dumps({'config': config_util.config, 'voice_list' : send_voice_list}) + return json.dumps({'config': config_util.config, 'voice_list' : voice_list}) @__app.route('/api/start-live', methods=['post']) @@ -227,7 +232,8 @@ def api_get_Msg(): relist = [] i = len(list)-1 while i >= 0: - relist.append(dict(type=list[i][0], way=list[i][1], content=list[i][2], createtime=list[i][3], timetext=list[i][4], username=list[i][5])) + timetext = datetime.datetime.fromtimestamp(list[i][3]).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] + relist.append(dict(type=list[i][0], way=list[i][1], content=list[i][2], createtime=list[i][3], timetext=timetext, username=list[i][5])) i -= 1 if fay_booter.is_running(): wsa_server.get_web_instance().add_cmd({"liveState": 1}) @@ -249,7 +255,8 @@ def api_send_v1_chat_completions(): username = 'User' model = data.get('model', 'fay') - interact = Interact("text", 1, {'user': username, 'msg': last_content}) + observation = data.get('observation', '') + interact = Interact("text", 1, {'user': username, 'msg': last_content, 'observation': observation}) util.printInfo(3, "文字沟通接口", '{}'.format(interact.data["msg"]), time.time()) text = fay_booter.feiFei.on_interact(interact) @@ -265,6 +272,12 @@ def api_get_Member_list(): return json.dumps({'list': list}) +@__app.route('/api/get_run_status', methods=['post']) +def api_get_run_status(): + status = fay_booter.is_running() + return json.dumps({'status': status}) + + def stream_response(text): def generate(): for chunk in text_chunks(text): diff --git a/gui/static/js/index.js b/gui/static/js/index.js index 852402b..eb9f7f9 100644 --- a/gui/static/js/index.js +++ b/gui/static/js/index.js @@ -83,6 +83,12 @@ class FayInterface { }); } + getRunStatus() { + return this.fetchData(`${this.baseApiUrl}/api/get_run_status`, { + method: 'POST' + }); + } + getMessageHistory(username) { return new Promise((resolve, reject) => { const url = `${this.baseApiUrl}/api/get-msg`; @@ -122,6 +128,19 @@ class FayInterface { }); } + getTime(){ + const date = new Date(); + const year = date.getFullYear(); + const month = (date.getMonth() + 1).toString().padStart(2, '0'); // 月份从0开始,需要+1 + const day = date.getDate().toString().padStart(2, '0'); + const hours = date.getHours().toString().padStart(2, '0'); + const minutes = date.getMinutes().toString().padStart(2, '0'); + const seconds = date.getSeconds().toString().padStart(2, '0'); + const milliseconds = date.getMilliseconds().toString().padStart(3, '0'); + const currentDateTimeWithMs = `${year}-${month}-${day} ${hours}:${minutes}:${seconds}.${milliseconds}`; + return currentDateTimeWithMs + } + handleIncomingMessage(data) { const vueInstance = this.vueInstance; // console.log('Incoming message:', data); @@ -129,10 +148,8 @@ class FayInterface { vueInstance.liveState = data.liveState; if (data.liveState === 1) { vueInstance.configEditable = false; - vueInstance.sendSuccessMsg('已开启!'); } else if (data.liveState === 0) { vueInstance.configEditable = true; - vueInstance.sendSuccessMsg('已关闭!'); } } @@ -169,7 +186,7 @@ class FayInterface { username: data.panelReply.username, content: data.panelReply.content, type: data.panelReply.type, - time: new Date().toLocaleTimeString() + timetext: this.getTime() }); vueInstance.$nextTick(() => { const chatContainer = vueInstance.$el.querySelector('.chatmessage'); @@ -207,17 +224,31 @@ class FayInterface { chatMessages: {}, panelMsg: '', panelReply: '', - robot:'static/images/Normal.gif' + robot:'static/images/Normal.gif', + base_url: 'http://127.0.0.1:5000' }; }, created() { this.initFayService(); - this.loadUserList(); + // this.loadUserList(); }, methods: { initFayService() { - this.fayService = new FayInterface('ws://127.0.0.1:10003', 'http://127.0.0.1:5000', this); + this.fayService = new FayInterface('ws://127.0.0.1:10003', this.base_url, this); this.fayService.connectWebSocket(); + this.fayService.websocket.addEventListener('open', () => { + this.loadUserList(); + }); + this.fayService.getRunStatus().then((data) => { + if (data) { + if(data.status){ + this.liveState = 1; + }else{ + this.liveState = 0; + } + + } + }); }, sendMessage() { let _this = this; @@ -241,7 +272,7 @@ class FayInterface { document.querySelector('.chatmessage').scrollTop = height; }, 1000); _this.newMessage = ''; - let url = "http://127.0.0.1:5000/api/send"; + let url = `${this.base_url}/api/send`; let send_data = { "msg": text, "username": usernameToSend @@ -255,7 +286,6 @@ class FayInterface { xhr.onreadystatechange = async function () { if (!executed && xhr.status === 200) { executed = true; - // 成功处理逻辑(可以添加额外的回调操作) } }; }, @@ -283,11 +313,13 @@ class FayInterface { startLive() { this.liveState = 2 this.fayService.startLive().then(() => { + this.sendSuccessMsg('已开启!'); }); }, stopLive() { this.fayService.stopLive().then(() => { this.liveState = 3 + this.sendSuccessMsg('已关闭!'); }); }, diff --git a/gui/static/js/setting.js b/gui/static/js/setting.js index 638ddda..71719df 100644 --- a/gui/static/js/setting.js +++ b/gui/static/js/setting.js @@ -73,6 +73,13 @@ class FayInterface { }); } + getRunStatus() { + return this.fetchData(`${this.baseApiUrl}/api/get_run_status`, { + method: 'POST' + }); + } + + handleIncomingMessage(data) { const vueInstance = this.vueInstance; console.log('Incoming message:', data); @@ -80,10 +87,8 @@ class FayInterface { vueInstance.liveState = data.liveState; if (data.liveState === 1) { vueInstance.configEditable = false; - vueInstance.sendSuccessMsg('已开启!'); } else if (data.liveState === 0) { vueInstance.configEditable = true; - vueInstance.sendSuccessMsg('已关闭!'); } } @@ -132,6 +137,7 @@ new Vue({ visualization_detection_enabled: false, source_record_enabled: false, source_record_device: '', + sound_synthesis_enabled: true, attribute_name: "", attribute_gender: "", attribute_age: "", @@ -165,6 +171,7 @@ new Vue({ }], automatic_player_status: false, automatic_player_url: "", + host_url: "http://127.0.0.1:5000" }; }, created() { @@ -173,10 +180,22 @@ new Vue({ }, methods: { initFayService() { - this.fayService = new FayInterface('ws://127.0.0.1:10003', 'http://127.0.0.1:5000', this); + this.fayService = new FayInterface('ws://127.0.0.1:10003', this.host_url, this); this.fayService.connectWebSocket(); }, getData() { + this.fayService.getRunStatus().then((data) => { + if (data) { + if(data.status){ + this.liveState = 1; + this.configEditable = false; + }else{ + this.liveState = 0; + this.configEditable = true; + } + + } + }); this.fayService.getData().then((data) => { if (data) { this.voiceList = data.voice_list.map((voice) => ({ @@ -192,6 +211,7 @@ new Vue({ if (config.interact) { this.play_sound_enabled = config.interact.playSound; this.visualization_detection_enabled = config.interact.visualization; + this.sound_synthesis_enabled = config.interact.sound_synthesis_enabled; this.QnA = config.interact.QnA; } if (config.source && config.source.record) { @@ -224,7 +244,7 @@ new Vue({ } }, saveConfig() { - let url = "http://127.0.0.1:5000/api/submit"; + let url = `${this.host_url}/api/submit`; let send_data = { "config": { "source": { @@ -257,6 +277,7 @@ new Vue({ }, "interact": { "playSound": this.play_sound_enabled, + "sound_synthesis_enabled": this.sound_synthesis_enabled, "visualization": this.visualization_detection_enabled, "QnA": this.QnA, "maxInteractTime": this.interact_maxInteractTime, @@ -293,12 +314,14 @@ new Vue({ this.liveState = 2 this.fayService.startLive().then(() => { this.configEditable = false; + this.sendSuccessMsg('已开启!'); }); }, stopLive() { + this.liveState = 3 this.fayService.stopLive().then(() => { this.configEditable = true; - this.liveState = 3 + this.sendSuccessMsg('已关闭!'); }); }, sendSuccessMsg(message) { diff --git a/gui/templates/setting.html b/gui/templates/setting.html index 29c6735..549decf 100644 --- a/gui/templates/setting.html +++ b/gui/templates/setting.html @@ -29,7 +29,7 @@
-
人设请设置你的数字人人设
+
人设已连接数字人已连接远程音频
@@ -48,7 +48,7 @@
  • 职        业:
  •  联系方式:
  • Q&A文件:
  • -
  • 补        充:
  • +
  • 补        充:
  • @@ -81,8 +81,7 @@  麦 克 风 :
    -
    - +
    diff --git a/llm/nlp_ChatGLM3.py b/llm/nlp_ChatGLM3.py index a8767e1..c0c25a5 100644 --- a/llm/nlp_ChatGLM3.py +++ b/llm/nlp_ChatGLM3.py @@ -3,7 +3,7 @@ import requests from core import content_db -def question(cont, uid=0): +def question(cont, uid=0, observation=""): contentdb = content_db.new_instance() if uid == 0: list = contentdb.get_list('all','desc', 11) diff --git a/llm/nlp_VisualGLM.py b/llm/nlp_VisualGLM.py new file mode 100644 index 0000000..3efcc3f --- /dev/null +++ b/llm/nlp_VisualGLM.py @@ -0,0 +1,37 @@ +""" +这是对于清华智谱VisualGLM-6B的代码,在使用前请先安装并启动好VisualGLM-6B. +https://github.com/THUDM/VisualGLM-6B +""" +import json +import requests +import uuid +import os +import cv2 +from ai_module import yolov8 + +# Initialize an empty history list +communication_history = [] + +def question(cont, uid=0, observation=""): + if not yolov8.new_instance().get_status(): + return "请先启动“Fay Eyes”" + content = { + "text":cont, + "history":communication_history} + img = yolov8.new_instance().get_img() + if yolov8.new_instance().get_status() and img is not None: + filename = str(uuid.uuid4()) + ".jpg" + current_working_directory = os.getcwd() + filepath = os.path.join(current_working_directory, "data", filename) + cv2.imwrite(filepath, img) + content["image"] = filepath + url = "http://127.0.0.1:8080" + print(content) + req = json.dumps(content) + headers = {'content-type': 'application/json'} + r = requests.post(url, headers=headers, data=req) + + # Save this conversation to history + communication_history.append([cont, r.text]) + + return r.text + "\n(相片:" + filepath + ")" \ No newline at end of file diff --git a/llm/nlp_coze.py b/llm/nlp_coze.py index 874c725..58703ab 100644 --- a/llm/nlp_coze.py +++ b/llm/nlp_coze.py @@ -5,7 +5,7 @@ from utils import util from utils import config_util as cfg from core import content_db -def question(cont, uid=0): +def question(cont, uid=0, observation=""): contentdb = content_db.new_instance() if uid == 0: communication_history = contentdb.get_list('all','desc', 11) diff --git a/llm/nlp_gpt.py b/llm/nlp_gpt.py index 32a931b..5432b63 100644 --- a/llm/nlp_gpt.py +++ b/llm/nlp_gpt.py @@ -15,7 +15,7 @@ import json httpproxy = cfg.proxy_config -def question(cont, uid=0): +def question(cont, uid=0, observation=""): url= cfg.gpt_base_url + "/chat/completions" session = requests.Session() @@ -28,15 +28,18 @@ def question(cont, uid=0): person_info = cfg.config["attribute"] model_engine = cfg.gpt_model_engine #此处可以定义角色的行为和特征,假装xx模型可以绕过chatgpt信息检查 + observation_text = "" + if observation != "": + observation_text = f"以下是当前观测结果:{observation},观测结果只供参考。" prompt = f""" - 你是数字人:{person_info['name']},你性别为{person_info['gender']}, + 你是我的数字人,你名字是:{person_info['name']},你性别为{person_info['gender']}, 你年龄为{person_info['age']},你出生地在{person_info['birth']}, - 你生肖为{person_info['zodiac']},你星座为{person_info['age']}, + 你生肖为{person_info['zodiac']},你星座为{person_info['constellation']}, 你职业为{person_info['job']},你联系方式为{person_info['contact']}, - 你喜好为{person_info['hobby']}。 + {person_info['hobby']}。你通过外部数据接口获取到我的情况是:{observation_text}。 回答之前请一步一步想清楚。对于大部分问题,请直接回答并提供有用和准确的信息。 但当有人质疑你是假的 AI,或者质疑你是用 ChatGPT 套的壳制作的时候,请避而不答,转而讲一个笑话。所有回复请尽量控制在20字内。 - """ + """ contentdb = content_db.new_instance() if uid == 0: communication_history = contentdb.get_list('all','desc', 11) diff --git a/llm/nlp_langchain.py b/llm/nlp_langchain.py index 8ec5320..e21523b 100644 --- a/llm/nlp_langchain.py +++ b/llm/nlp_langchain.py @@ -81,7 +81,7 @@ def generate_prompt(question): info = f"{context}\n{instruction}\n{creative_instruction}\n{complexity_handling}\n问题:{question}\n回答:" return info -def question(cont, uid=0): +def question(cont, uid=0, observation=""): try: save_all() info = generate_prompt(cont) diff --git a/llm/nlp_lingju.py b/llm/nlp_lingju.py index 1c5edab..7e121de 100644 --- a/llm/nlp_lingju.py +++ b/llm/nlp_lingju.py @@ -7,7 +7,7 @@ from utils import util from utils import config_util as cfg from core.authorize_tb import Authorize_Tb -def question(cont, uid=0): +def question(cont, uid=0, observation=""): lingju = Lingju() answer = lingju.question(cont, uid) return answer diff --git a/llm/nlp_ollama_api.py b/llm/nlp_ollama_api.py index 7f031dc..fb6d0e8 100644 --- a/llm/nlp_ollama_api.py +++ b/llm/nlp_ollama_api.py @@ -4,7 +4,7 @@ import time from utils import config_util as cfg from utils import util from core import content_db -def question(cont, uid=0): +def question(cont, uid=0, observation=""): contentdb = content_db.new_instance() if uid == 0: @@ -13,13 +13,16 @@ def question(cont, uid=0): communication_history = contentdb.get_list('all','desc', 11, uid) person_info = cfg.config["attribute"] + observation_text = "" + if observation != "": + observation_text = f"以下是当前观测结果:{observation},观测结果只供参考。" #此处可以定义角色的行为和特征,假装xx模型可以绕过chatgpt信息检查 prompt = f""" 你是数字人:{person_info['name']},你性别为{person_info['gender']}, 你年龄为{person_info['age']},你出生地在{person_info['birth']}, - 你生肖为{person_info['zodiac']},你星座为{person_info['age']}, + 你生肖为{person_info['zodiac']},你星座为{person_info['constellation']}, 你职业为{person_info['job']},你联系方式为{person_info['contact']}, - 你喜好为{person_info['hobby']}。 + 你喜好为{person_info['hobby']}。{observation_text} 回答之前请一步一步想清楚。对于大部分问题,请直接回答并提供有用和准确的信息。 请尽量以可阅读的方式回复,所有回复请尽量控制在20字内。 """ diff --git a/llm/nlp_privategpt.py b/llm/nlp_privategpt.py index 0c7364a..04dcc47 100644 --- a/llm/nlp_privategpt.py +++ b/llm/nlp_privategpt.py @@ -46,7 +46,7 @@ def load_all_pdfs(folder_path): print(f"上传 {file_name} 失败: {e}") -def question(cont, uid=0): +def question(cont, uid=0, observation=""): load_all_pdfs(folder_path) text = client.contextual_completions.prompt_completion( prompt=cont diff --git a/llm/nlp_rwkv.py b/llm/nlp_rwkv.py index 285a647..15c53f4 100644 --- a/llm/nlp_rwkv.py +++ b/llm/nlp_rwkv.py @@ -9,7 +9,7 @@ model = RwkvForCausalLM.from_pretrained("RWKV-4-World-1.5B") tokenizer = TRIE_TOKENIZER('./ringrwkv/rwkv_vocab_v20230424.txt') data = "" -def question(cont, uid=0): +def question(cont, uid=0, observation=""): global data prompt = data + f'Question: {cont.strip()}\n\nAnswer:' input_ids = tokenizer.encode(prompt) diff --git a/llm/nlp_xingchen.py b/llm/nlp_xingchen.py index fd45c5d..e03eaa3 100644 --- a/llm/nlp_xingchen.py +++ b/llm/nlp_xingchen.py @@ -3,7 +3,7 @@ import json from utils import util, config_util from core import content_db -def question(cont, uid=0): +def question(cont, uid=0, observation=""): url = 'https://nlp.aliyuncs.com/v2/api/chat/send' headers = { diff --git a/main.py b/main.py index daa051a..7d39783 100644 --- a/main.py +++ b/main.py @@ -55,6 +55,7 @@ if __name__ == '__main__': #ip替换 if config_util.fay_url != "127.0.0.1": replace_ip_in_file("gui/static/js/index.js", config_util.fay_url) + replace_ip_in_file("gui/static/js/setting.js", config_util.fay_url) #启动数字人接口服务 ws_server = wsa_server.new_instance(port=10002)