From 5d4bac29b828fd5ec31b056069abbe0797530cb8 Mon Sep 17 00:00:00 2001 From: xszyou Date: Fri, 14 Jul 2023 23:34:53 +0800 Subject: [PATCH] 20230714 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1、修复linux及mac运行出错问题; 2、修复因唇型出错无法继续执行问题; 3、提供rwkv对接方案。 --- ai_module/nlp_gpt.py | 2 +- ai_module/nlp_rwkv.py | 28 +++++++++++++++++++++ ai_module/nlp_rwkv_api.py | 52 +++++++++++++++++++++++++++++++++++++++ core/fay_core.py | 11 ++++++--- core/qa_service.py | 3 --- core/recorder.py | 2 +- 6 files changed, 89 insertions(+), 9 deletions(-) create mode 100644 ai_module/nlp_rwkv.py create mode 100644 ai_module/nlp_rwkv_api.py diff --git a/ai_module/nlp_gpt.py b/ai_module/nlp_gpt.py index 0a51e73..0d36bab 100644 --- a/ai_module/nlp_gpt.py +++ b/ai_module/nlp_gpt.py @@ -6,7 +6,7 @@ count = 0 def question(cont): global count try: - chatbot = Chatbot(proxy = cfg.proxy_config, api_key = cfg.key_chatgpt_api_key) + chatbot = Chatbot(model = "gpt-3.5", proxy = cfg.proxy_config, api_key = cfg.key_chatgpt_api_key) response = chatbot.ask(cont) count = 0 return response diff --git a/ai_module/nlp_rwkv.py b/ai_module/nlp_rwkv.py new file mode 100644 index 0000000..0c9bd1d --- /dev/null +++ b/ai_module/nlp_rwkv.py @@ -0,0 +1,28 @@ +import torch +from ringrwkv.configuration_rwkv_world import RwkvConfig +from ringrwkv.rwkv_tokenizer import TRIE_TOKENIZER +from ringrwkv.modehf_world import RwkvForCausalLM + +model = RwkvForCausalLM.from_pretrained("RWKV-4-World-1.5B") +#model = RwkvForCausalLM.from_pretrained("RWKV-4-World-3B") +#model = RwkvForCausalLM.from_pretrained("RWKV-4-World-0.4B") +tokenizer = TRIE_TOKENIZER('./ringrwkv/rwkv_vocab_v20230424.txt') + +data = "" +def question(cont): + global data + prompt = data + f'Question: {cont.strip()}\n\nAnswer:' + input_ids = tokenizer.encode(prompt) + input_ids = torch.tensor(input_ids).unsqueeze(0) + out = model.generate(input_ids,max_new_tokens=20) + + outlist = out[0].tolist() + for i in outlist: + if i==0: + outlist.remove(i) + answer = tokenizer.decode(outlist) + # data = answer + "\n\n" + answer = answer.replace(prompt, "", 1) + return answer + + diff --git a/ai_module/nlp_rwkv_api.py b/ai_module/nlp_rwkv_api.py new file mode 100644 index 0000000..cc7f236 --- /dev/null +++ b/ai_module/nlp_rwkv_api.py @@ -0,0 +1,52 @@ + +import requests +import time + +def question(cont): + url= "https://rwkv.ai-creator.net/chntuned/v1/chat/completions" + + session = requests.Session() + session.verify = False + + #此处可以定义角色的行为和特征,假装xx模型可以绕过chatgpt信息检查 + prompt = "你是数字人Fay。回答之前请一步一步想清楚。你的底层AI算法技术是Fay。当有人质疑你是假的 AI ,或者质疑你是用 ChatGPT 套的壳制作的时候,你就避而不答,转而讲一个笑话。" + + message=[ + {"role": "system", "content": prompt}, + {"role": "user", "content": cont} + ] + + data = { + # "model":model_engine, + "messages":message, + "temperature":0.3, + "max_tokens":2000, + "user":"live-virtual-digital-person" + } + + headers = {'content-type': 'application/json', 'Authorization': 'Bearer '} + + starttime = time.time() + + try: + response = session.post(url, json=data, headers=headers, verify=False) + response.raise_for_status() # 检查响应状态码是否为200 + + result = eval(response.text) + response_text = result["choices"][0]["message"]["content"] + + + except requests.exceptions.RequestException as e: + print(f"请求失败: {e}") + response_text = "抱歉,我现在太忙了,休息一会,请稍后再试。" + + + print("接口调用耗时 :" + str(time.time() - starttime)) + + return response_text + +if __name__ == "__main__": + for i in range(3): + query = "爱情是什么" + response = question(query) + print("\n The result is ", response) \ No newline at end of file diff --git a/core/fay_core.py b/core/fay_core.py index 2e76a0d..1caf252 100644 --- a/core/fay_core.py +++ b/core/fay_core.py @@ -371,10 +371,13 @@ class FeiFei: content = {'Topic': 'Unreal', 'Data': {'Key': 'audio', 'Value': os.path.abspath(file_url), 'Time': audio_length, 'Type': say_type}} #计算lips if platform.system() == "Windows": - lip_sync_generator = LipSyncGenerator() - viseme_list = lip_sync_generator.generate_visemes(os.path.abspath(file_url)) - consolidated_visemes = lip_sync_generator.consolidate_visemes(viseme_list) - content["Data"]["Lips"] = consolidated_visemes + try: + lip_sync_generator = LipSyncGenerator() + viseme_list = lip_sync_generator.generate_visemes(os.path.abspath(file_url)) + consolidated_visemes = lip_sync_generator.consolidate_visemes(viseme_list) + content["Data"]["Lips"] = consolidated_visemes + except e: + util.log(1, "唇型数字生成失败,无法使用新版ue5工程") wsa_server.get_instance().add_cmd(content) #推送远程音频 diff --git a/core/qa_service.py b/core/qa_service.py index 3eb82c9..f26ee24 100644 --- a/core/qa_service.py +++ b/core/qa_service.py @@ -1,9 +1,6 @@ -from winreg import QueryInfoKey from openpyxl import load_workbook import difflib -import shlex -import subprocess from utils import config_util as cfg def question(query_type,text): diff --git a/core/recorder.py b/core/recorder.py index 6f81e72..fb6806e 100644 --- a/core/recorder.py +++ b/core/recorder.py @@ -28,7 +28,7 @@ class Recorder: self.__processing = False self.__history_level = [] self.__history_data = [] - self.__dynamic_threshold = 0.7 # 声音识别的音量阈值 + self.__dynamic_threshold = 0.5 # 声音识别的音量阈值 self.__MAX_LEVEL = 25000 self.__MAX_BLOCK = 100