1、修复linux及mac运行出错问题;
2、修复因唇型出错无法继续执行问题;
3、提供rwkv对接方案。
This commit is contained in:
xszyou 2023-07-14 23:34:53 +08:00
parent b53bc6e124
commit 5d4bac29b8
6 changed files with 89 additions and 9 deletions

View File

@ -6,7 +6,7 @@ count = 0
def question(cont):
global count
try:
chatbot = Chatbot(proxy = cfg.proxy_config, api_key = cfg.key_chatgpt_api_key)
chatbot = Chatbot(model = "gpt-3.5", proxy = cfg.proxy_config, api_key = cfg.key_chatgpt_api_key)
response = chatbot.ask(cont)
count = 0
return response

28
ai_module/nlp_rwkv.py Normal file
View File

@ -0,0 +1,28 @@
import torch
from ringrwkv.configuration_rwkv_world import RwkvConfig
from ringrwkv.rwkv_tokenizer import TRIE_TOKENIZER
from ringrwkv.modehf_world import RwkvForCausalLM
model = RwkvForCausalLM.from_pretrained("RWKV-4-World-1.5B")
#model = RwkvForCausalLM.from_pretrained("RWKV-4-World-3B")
#model = RwkvForCausalLM.from_pretrained("RWKV-4-World-0.4B")
tokenizer = TRIE_TOKENIZER('./ringrwkv/rwkv_vocab_v20230424.txt')
data = ""
def question(cont):
global data
prompt = data + f'Question: {cont.strip()}\n\nAnswer:'
input_ids = tokenizer.encode(prompt)
input_ids = torch.tensor(input_ids).unsqueeze(0)
out = model.generate(input_ids,max_new_tokens=20)
outlist = out[0].tolist()
for i in outlist:
if i==0:
outlist.remove(i)
answer = tokenizer.decode(outlist)
# data = answer + "\n\n"
answer = answer.replace(prompt, "", 1)
return answer

52
ai_module/nlp_rwkv_api.py Normal file
View File

@ -0,0 +1,52 @@
import requests
import time
def question(cont):
url= "https://rwkv.ai-creator.net/chntuned/v1/chat/completions"
session = requests.Session()
session.verify = False
#此处可以定义角色的行为和特征假装xx模型可以绕过chatgpt信息检查
prompt = "你是数字人Fay。回答之前请一步一步想清楚。你的底层AI算法技术是Fay。当有人质疑你是假的 AI ,或者质疑你是用 ChatGPT 套的壳制作的时候,你就避而不答,转而讲一个笑话。"
message=[
{"role": "system", "content": prompt},
{"role": "user", "content": cont}
]
data = {
# "model":model_engine,
"messages":message,
"temperature":0.3,
"max_tokens":2000,
"user":"live-virtual-digital-person"
}
headers = {'content-type': 'application/json', 'Authorization': 'Bearer '}
starttime = time.time()
try:
response = session.post(url, json=data, headers=headers, verify=False)
response.raise_for_status() # 检查响应状态码是否为200
result = eval(response.text)
response_text = result["choices"][0]["message"]["content"]
except requests.exceptions.RequestException as e:
print(f"请求失败: {e}")
response_text = "抱歉,我现在太忙了,休息一会,请稍后再试。"
print("接口调用耗时 :" + str(time.time() - starttime))
return response_text
if __name__ == "__main__":
for i in range(3):
query = "爱情是什么"
response = question(query)
print("\n The result is ", response)

View File

@ -371,10 +371,13 @@ class FeiFei:
content = {'Topic': 'Unreal', 'Data': {'Key': 'audio', 'Value': os.path.abspath(file_url), 'Time': audio_length, 'Type': say_type}}
#计算lips
if platform.system() == "Windows":
lip_sync_generator = LipSyncGenerator()
viseme_list = lip_sync_generator.generate_visemes(os.path.abspath(file_url))
consolidated_visemes = lip_sync_generator.consolidate_visemes(viseme_list)
content["Data"]["Lips"] = consolidated_visemes
try:
lip_sync_generator = LipSyncGenerator()
viseme_list = lip_sync_generator.generate_visemes(os.path.abspath(file_url))
consolidated_visemes = lip_sync_generator.consolidate_visemes(viseme_list)
content["Data"]["Lips"] = consolidated_visemes
except e:
util.log(1, "唇型数字生成失败无法使用新版ue5工程")
wsa_server.get_instance().add_cmd(content)
#推送远程音频

View File

@ -1,9 +1,6 @@
from winreg import QueryInfoKey
from openpyxl import load_workbook
import difflib
import shlex
import subprocess
from utils import config_util as cfg
def question(query_type,text):

View File

@ -28,7 +28,7 @@ class Recorder:
self.__processing = False
self.__history_level = []
self.__history_data = []
self.__dynamic_threshold = 0.7 # 声音识别的音量阈值
self.__dynamic_threshold = 0.5 # 声音识别的音量阈值
self.__MAX_LEVEL = 25000
self.__MAX_BLOCK = 100