年翻更新

1、修复服务器ip配置,配置页没替换问题;
2、修复开启状态偶尔没对齐问题;
3、修复关闭时关闭按钮停留在关闭中问题;
4、修复星座读取错误问题;
5、修复刷新重复提醒开启问题;
6、新增是否进行语音合成的选择;
7、文字沟通接口加入“观察描述”;
8、聊天记录时间改为毫秒级;
9、补充数字人和远程音频的连接状态显示;
10、修复备注填写无法保存问题。
This commit is contained in:
莣仔 2024-10-30 19:11:15 +08:00
parent 341d045375
commit 059b6cee6d
18 changed files with 172 additions and 55 deletions

View File

@ -22,6 +22,7 @@
"join": 10
},
"playSound": true,
"sound_synthesis_enabled": false,
"visualization": false
},
"items": [],

View File

@ -50,7 +50,7 @@ class Content_Db:
conn = sqlite3.connect("fay.db")
cur = conn.cursor()
try:
cur.execute("insert into T_Msg (type,way,content,createtime,username,uid) values (?,?,?,?,?,?)",(type,way,content,int(time.time()),username,uid))
cur.execute("insert into T_Msg (type,way,content,createtime,username,uid) values (?,?,?,?,?,?)",(type, way, content, time.time(), username, uid))
conn.commit()
except:
util.log(1, "请检查参数是否有误")

View File

@ -65,7 +65,7 @@ modules = {
}
#大语言模型回复
def handle_chat_message(msg, username='User'):
def handle_chat_message(msg, username='User', observation=''):
text = ''
textlist = []
try:
@ -81,7 +81,7 @@ def handle_chat_message(msg, username='User'):
text = textlist[0]['text']
else:
uid = member_db.new_instance().find_user(username)
text = selected_module.question(msg, uid)
text = selected_module.question(msg, uid, observation)
util.printInfo(1, username, '自然语言处理完成. 耗时: {} ms'.format(math.floor((time.time() - tm) * 1000)))
if text == '哎呀,你这么说我也不懂,详细点呗' or text == '':
util.printInfo(1, username, '[!] 自然语言无语了!')
@ -161,9 +161,9 @@ class FeiFei:
if wsa_server.get_instance().is_connected(username):
content = {'Topic': 'Unreal', 'Data': {'Key': 'log', 'Value': "思考中..."}, 'Username' : username, 'robot': f'http://{cfg.fay_url}:5000/robot/Thinking.jpg'}
wsa_server.get_instance().add_cmd(content)
text,textlist = handle_chat_message(interact.data["msg"], username)
text,textlist = handle_chat_message(interact.data["msg"], username, interact.data.get("observation", ""))
qa_service.QAService().record_qapair(interact.data["msg"], text)#沟通记录缓存到qa文件
# qa_service.QAService().record_qapair(interact.data["msg"], text)#沟通记录缓存到qa文件
else:
text = answer
@ -315,15 +315,16 @@ class FeiFei:
def say(self, interact, text):
try:
result = None
audio_url = interact.data.get('audio')#透传的音频
if audio_url is not None:
file_name = 'sample-' + str(int(time.time() * 1000)) + '.wav'
result = self.download_wav(audio_url, './samples/', file_name)
elif config_util.config["interact"]["playSound"] or wsa_server.get_instance().is_connected(interact.data.get("user")) or self.__is_send_remote_device_audio(interact):#tts
util.printInfo(1, interact.data.get('user'), '合成音频...')
tm = time.time()
result = self.sp.to_sample(text.replace("*", ""), self.__get_mood_voice())
util.printInfo(1, interact.data.get('user'), '合成音频完成. 耗时: {} ms 文件:{}'.format(math.floor((time.time() - tm) * 1000), result))
if config_util.config["interact"]["sound_synthesis_enabled"]:
audio_url = interact.data.get('audio')#透传的音频
if audio_url is not None:
file_name = 'sample-' + str(int(time.time() * 1000)) + '.wav'
result = self.download_wav(audio_url, './samples/', file_name)
elif config_util.config["interact"]["playSound"] or wsa_server.get_instance().is_connected(interact.data.get("user")) or self.__is_send_remote_device_audio(interact):#tts
util.printInfo(1, interact.data.get('user'), '合成音频...')
tm = time.time()
result = self.sp.to_sample(text.replace("*", ""), self.__get_mood_voice())
util.printInfo(1, interact.data.get('user'), '合成音频完成. 耗时: {} ms 文件:{}'.format(math.floor((time.time() - tm) * 1000), result))
if result is not None:
MyThread(target=self.__process_output_audio, args=[result, interact, text]).start()

View File

@ -7,6 +7,7 @@ import re
from flask import Flask, render_template, request, jsonify, Response, send_file
from flask_cors import CORS
import requests
import datetime
import fay_booter
@ -82,8 +83,7 @@ def api_get_data():
voice_list = tts_voice.get_voice_list()
send_voice_list = []
if config_util.tts_module == 'ali':
wsa_server.get_web_instance().add_cmd({
"voiceList": [
voice_list = [
{"id": "abin", "name": "阿斌"},
{"id": "zhixiaobai", "name": "知小白"},
{"id": "zhixiaoxia", "name": "知小夏"},
@ -159,10 +159,12 @@ def api_get_data():
{"id": "laotie", "name": "老铁"},
{"id": "laomei", "name": "老妹"},
{"id": "aikan", "name": "艾侃"}
]
})
]
send_voice_list = {"voiceList": voice_list}
wsa_server.get_web_instance().add_cmd(send_voice_list)
elif config_util.tts_module == 'volcano':
wsa_server.get_web_instance().add_cmd({
voice_list = {
"voiceList": [
{"id": "BV001_streaming", "name": "通用女声"},
{"id": "BV002_streaming", "name": "通用男声"},
@ -171,7 +173,9 @@ def api_get_data():
{"id": "zh_male_wennuanahu_moon_bigtts", "name": "温暖阿虎/Alvin"},
{"id": "zh_female_wanwanxiaohe_moon_bigtts", "name": "湾湾小何"},
]
})
}
send_voice_list = {"voiceList": voice_list}
wsa_server.get_web_instance().add_cmd(send_voice_list)
else:
voice_list = tts_voice.get_voice_list()
send_voice_list = []
@ -181,10 +185,11 @@ def api_get_data():
wsa_server.get_web_instance().add_cmd({
"voiceList": send_voice_list
})
voice_list = send_voice_list
wsa_server.get_web_instance().add_cmd({"deviceList": __get_device_list()})
if fay_booter.is_running():
wsa_server.get_web_instance().add_cmd({"liveState": 1})
return json.dumps({'config': config_util.config, 'voice_list' : send_voice_list})
return json.dumps({'config': config_util.config, 'voice_list' : voice_list})
@__app.route('/api/start-live', methods=['post'])
@ -227,7 +232,8 @@ def api_get_Msg():
relist = []
i = len(list)-1
while i >= 0:
relist.append(dict(type=list[i][0], way=list[i][1], content=list[i][2], createtime=list[i][3], timetext=list[i][4], username=list[i][5]))
timetext = datetime.datetime.fromtimestamp(list[i][3]).strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
relist.append(dict(type=list[i][0], way=list[i][1], content=list[i][2], createtime=list[i][3], timetext=timetext, username=list[i][5]))
i -= 1
if fay_booter.is_running():
wsa_server.get_web_instance().add_cmd({"liveState": 1})
@ -249,7 +255,8 @@ def api_send_v1_chat_completions():
username = 'User'
model = data.get('model', 'fay')
interact = Interact("text", 1, {'user': username, 'msg': last_content})
observation = data.get('observation', '')
interact = Interact("text", 1, {'user': username, 'msg': last_content, 'observation': observation})
util.printInfo(3, "文字沟通接口", '{}'.format(interact.data["msg"]), time.time())
text = fay_booter.feiFei.on_interact(interact)
@ -265,6 +272,12 @@ def api_get_Member_list():
return json.dumps({'list': list})
@__app.route('/api/get_run_status', methods=['post'])
def api_get_run_status():
status = fay_booter.is_running()
return json.dumps({'status': status})
def stream_response(text):
def generate():
for chunk in text_chunks(text):

View File

@ -83,6 +83,12 @@ class FayInterface {
});
}
getRunStatus() {
return this.fetchData(`${this.baseApiUrl}/api/get_run_status`, {
method: 'POST'
});
}
getMessageHistory(username) {
return new Promise((resolve, reject) => {
const url = `${this.baseApiUrl}/api/get-msg`;
@ -122,6 +128,19 @@ class FayInterface {
});
}
getTime(){
const date = new Date();
const year = date.getFullYear();
const month = (date.getMonth() + 1).toString().padStart(2, '0'); // 月份从0开始需要+1
const day = date.getDate().toString().padStart(2, '0');
const hours = date.getHours().toString().padStart(2, '0');
const minutes = date.getMinutes().toString().padStart(2, '0');
const seconds = date.getSeconds().toString().padStart(2, '0');
const milliseconds = date.getMilliseconds().toString().padStart(3, '0');
const currentDateTimeWithMs = `${year}-${month}-${day} ${hours}:${minutes}:${seconds}.${milliseconds}`;
return currentDateTimeWithMs
}
handleIncomingMessage(data) {
const vueInstance = this.vueInstance;
// console.log('Incoming message:', data);
@ -129,10 +148,8 @@ class FayInterface {
vueInstance.liveState = data.liveState;
if (data.liveState === 1) {
vueInstance.configEditable = false;
vueInstance.sendSuccessMsg('已开启!');
} else if (data.liveState === 0) {
vueInstance.configEditable = true;
vueInstance.sendSuccessMsg('已关闭!');
}
}
@ -169,7 +186,7 @@ class FayInterface {
username: data.panelReply.username,
content: data.panelReply.content,
type: data.panelReply.type,
time: new Date().toLocaleTimeString()
timetext: this.getTime()
});
vueInstance.$nextTick(() => {
const chatContainer = vueInstance.$el.querySelector('.chatmessage');
@ -207,17 +224,31 @@ class FayInterface {
chatMessages: {},
panelMsg: '',
panelReply: '',
robot:'static/images/Normal.gif'
robot:'static/images/Normal.gif',
base_url: 'http://127.0.0.1:5000'
};
},
created() {
this.initFayService();
this.loadUserList();
// this.loadUserList();
},
methods: {
initFayService() {
this.fayService = new FayInterface('ws://127.0.0.1:10003', 'http://127.0.0.1:5000', this);
this.fayService = new FayInterface('ws://127.0.0.1:10003', this.base_url, this);
this.fayService.connectWebSocket();
this.fayService.websocket.addEventListener('open', () => {
this.loadUserList();
});
this.fayService.getRunStatus().then((data) => {
if (data) {
if(data.status){
this.liveState = 1;
}else{
this.liveState = 0;
}
}
});
},
sendMessage() {
let _this = this;
@ -241,7 +272,7 @@ class FayInterface {
document.querySelector('.chatmessage').scrollTop = height;
}, 1000);
_this.newMessage = '';
let url = "http://127.0.0.1:5000/api/send";
let url = `${this.base_url}/api/send`;
let send_data = {
"msg": text,
"username": usernameToSend
@ -255,7 +286,6 @@ class FayInterface {
xhr.onreadystatechange = async function () {
if (!executed && xhr.status === 200) {
executed = true;
// 成功处理逻辑(可以添加额外的回调操作)
}
};
},
@ -283,11 +313,13 @@ class FayInterface {
startLive() {
this.liveState = 2
this.fayService.startLive().then(() => {
this.sendSuccessMsg('已开启!');
});
},
stopLive() {
this.fayService.stopLive().then(() => {
this.liveState = 3
this.sendSuccessMsg('已关闭!');
});
},

View File

@ -73,6 +73,13 @@ class FayInterface {
});
}
getRunStatus() {
return this.fetchData(`${this.baseApiUrl}/api/get_run_status`, {
method: 'POST'
});
}
handleIncomingMessage(data) {
const vueInstance = this.vueInstance;
console.log('Incoming message:', data);
@ -80,10 +87,8 @@ class FayInterface {
vueInstance.liveState = data.liveState;
if (data.liveState === 1) {
vueInstance.configEditable = false;
vueInstance.sendSuccessMsg('已开启!');
} else if (data.liveState === 0) {
vueInstance.configEditable = true;
vueInstance.sendSuccessMsg('已关闭!');
}
}
@ -132,6 +137,7 @@ new Vue({
visualization_detection_enabled: false,
source_record_enabled: false,
source_record_device: '',
sound_synthesis_enabled: true,
attribute_name: "",
attribute_gender: "",
attribute_age: "",
@ -165,6 +171,7 @@ new Vue({
}],
automatic_player_status: false,
automatic_player_url: "",
host_url: "http://127.0.0.1:5000"
};
},
created() {
@ -173,10 +180,22 @@ new Vue({
},
methods: {
initFayService() {
this.fayService = new FayInterface('ws://127.0.0.1:10003', 'http://127.0.0.1:5000', this);
this.fayService = new FayInterface('ws://127.0.0.1:10003', this.host_url, this);
this.fayService.connectWebSocket();
},
getData() {
this.fayService.getRunStatus().then((data) => {
if (data) {
if(data.status){
this.liveState = 1;
this.configEditable = false;
}else{
this.liveState = 0;
this.configEditable = true;
}
}
});
this.fayService.getData().then((data) => {
if (data) {
this.voiceList = data.voice_list.map((voice) => ({
@ -192,6 +211,7 @@ new Vue({
if (config.interact) {
this.play_sound_enabled = config.interact.playSound;
this.visualization_detection_enabled = config.interact.visualization;
this.sound_synthesis_enabled = config.interact.sound_synthesis_enabled;
this.QnA = config.interact.QnA;
}
if (config.source && config.source.record) {
@ -224,7 +244,7 @@ new Vue({
}
},
saveConfig() {
let url = "http://127.0.0.1:5000/api/submit";
let url = `${this.host_url}/api/submit`;
let send_data = {
"config": {
"source": {
@ -257,6 +277,7 @@ new Vue({
},
"interact": {
"playSound": this.play_sound_enabled,
"sound_synthesis_enabled": this.sound_synthesis_enabled,
"visualization": this.visualization_detection_enabled,
"QnA": this.QnA,
"maxInteractTime": this.interact_maxInteractTime,
@ -293,12 +314,14 @@ new Vue({
this.liveState = 2
this.fayService.startLive().then(() => {
this.configEditable = false;
this.sendSuccessMsg('已开启!');
});
},
stopLive() {
this.liveState = 3
this.fayService.stopLive().then(() => {
this.configEditable = true;
this.liveState = 3
this.sendSuccessMsg('已关闭!');
});
},
sendSuccessMsg(message) {

View File

@ -29,7 +29,7 @@
</div>
<div class="main_right">
<div class="setting_fay_top"><span style="color: #000; font-size: 20px; padding-right: 10px;">人设</span>请设置你的数字人人设 </div>
<div class="setting_fay_top"><span style="color: #000; font-size: 20px; padding-right: 10px;">人设</span><span v-if="isConnected" style="color: #f56c6c;">已连接数字人</span><span style="margin-left: 10px;color:#f56c6c;" v-if="remoteAudioConnected">已连接远程音频</span> </div>
<!-- 以上是即时信息显示 -->
<div class="setting_fay">
@ -48,7 +48,7 @@
<li> <span class="font_name">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;业:</span><input class="section_1" :disabled="!configEditable" v-model="attribute_job" placeholder="请输入内容"/></li>
<li> <span class="font_name">&nbsp;联系方式:</span><input class="section_1" :disabled="!configEditable" v-model="attribute_contact" placeholder="请输入内容" /></li>
<li> <span class="font_name">Q&A文件:</span><input class="section_1" :disabled="!configEditable" v-model="QnA" placeholder="请输入内容" /></li>
<li> <span class="font_name">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;充:</span><textarea class="section_1" style="height: 165px;vertical-align: middle;"></textarea></li>
<li> <span class="font_name">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;充:</span><textarea v-model="attribute_hobby" class="section_1" style="height: 165px;vertical-align: middle;"></textarea></li>
</ul>
</div>
<div class="setting_rup">
@ -81,8 +81,7 @@
<span class="font_name">&nbsp;&nbsp;&nbsp;&nbsp;:</span>
<el-switch v-model="source_record_enabled" active-color="#13ce66" inactive-color="#ff4949" :disabled="!configEditable"> </el-switch>
</div>
</div>
</div>
<div class="setting_wakeup">
<ul>
<li> <span class="font_name">声音选择:</span>
@ -91,6 +90,10 @@
:label="item.label" :value="item.value">
</option>
</select></li>
<li>
<span class="font_name">&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:</span>
<el-switch v-model="sound_synthesis_enabled" active-color="#13ce66" inactive-color="#ff4949" :disabled="!configEditable"> </el-switch>
</li>
<li style="display: flex;"> <span class="font_name" style="line-height: 36px;">&nbsp;&nbsp;&nbsp;&nbsp;:</span>
<el-slider style="width: 230px;" v-model="interact_perception_follow" :disabled="!configEditable"></el-slider></li>
</ul>

View File

@ -3,7 +3,7 @@ import requests
from core import content_db
def question(cont, uid=0):
def question(cont, uid=0, observation=""):
contentdb = content_db.new_instance()
if uid == 0:
list = contentdb.get_list('all','desc', 11)

37
llm/nlp_VisualGLM.py Normal file
View File

@ -0,0 +1,37 @@
"""
这是对于清华智谱VisualGLM-6B的代码在使用前请先安装并启动好VisualGLM-6B.
https://github.com/THUDM/VisualGLM-6B
"""
import json
import requests
import uuid
import os
import cv2
from ai_module import yolov8
# Initialize an empty history list
communication_history = []
def question(cont, uid=0, observation=""):
if not yolov8.new_instance().get_status():
return "请先启动“Fay Eyes”"
content = {
"text":cont,
"history":communication_history}
img = yolov8.new_instance().get_img()
if yolov8.new_instance().get_status() and img is not None:
filename = str(uuid.uuid4()) + ".jpg"
current_working_directory = os.getcwd()
filepath = os.path.join(current_working_directory, "data", filename)
cv2.imwrite(filepath, img)
content["image"] = filepath
url = "http://127.0.0.1:8080"
print(content)
req = json.dumps(content)
headers = {'content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req)
# Save this conversation to history
communication_history.append([cont, r.text])
return r.text + "\n(相片:" + filepath + ")"

View File

@ -5,7 +5,7 @@ from utils import util
from utils import config_util as cfg
from core import content_db
def question(cont, uid=0):
def question(cont, uid=0, observation=""):
contentdb = content_db.new_instance()
if uid == 0:
communication_history = contentdb.get_list('all','desc', 11)

View File

@ -15,7 +15,7 @@ import json
httpproxy = cfg.proxy_config
def question(cont, uid=0):
def question(cont, uid=0, observation=""):
url= cfg.gpt_base_url + "/chat/completions"
session = requests.Session()
@ -28,15 +28,18 @@ def question(cont, uid=0):
person_info = cfg.config["attribute"]
model_engine = cfg.gpt_model_engine
#此处可以定义角色的行为和特征假装xx模型可以绕过chatgpt信息检查
observation_text = ""
if observation != "":
observation_text = f"以下是当前观测结果:{observation},观测结果只供参考。"
prompt = f"""
你是数字人{person_info['name']}你性别为{person_info['gender']}
你是我的数字人你名字是{person_info['name']}你性别为{person_info['gender']}
你年龄为{person_info['age']}你出生地在{person_info['birth']}
你生肖为{person_info['zodiac']}你星座为{person_info['age']}
你生肖为{person_info['zodiac']}你星座为{person_info['constellation']}
你职业为{person_info['job']}你联系方式为{person_info['contact']}
你喜好为{person_info['hobby']}
{person_info['hobby']}你通过外部数据接口获取到我的情况是{observation_text}
回答之前请一步一步想清楚对于大部分问题请直接回答并提供有用和准确的信息
但当有人质疑你是假的 AI或者质疑你是用 ChatGPT 套的壳制作的时候请避而不答转而讲一个笑话所有回复请尽量控制在20字内
"""
"""
contentdb = content_db.new_instance()
if uid == 0:
communication_history = contentdb.get_list('all','desc', 11)

View File

@ -81,7 +81,7 @@ def generate_prompt(question):
info = f"{context}\n{instruction}\n{creative_instruction}\n{complexity_handling}\n问题:{question}\n回答:"
return info
def question(cont, uid=0):
def question(cont, uid=0, observation=""):
try:
save_all()
info = generate_prompt(cont)

View File

@ -7,7 +7,7 @@ from utils import util
from utils import config_util as cfg
from core.authorize_tb import Authorize_Tb
def question(cont, uid=0):
def question(cont, uid=0, observation=""):
lingju = Lingju()
answer = lingju.question(cont, uid)
return answer

View File

@ -4,7 +4,7 @@ import time
from utils import config_util as cfg
from utils import util
from core import content_db
def question(cont, uid=0):
def question(cont, uid=0, observation=""):
contentdb = content_db.new_instance()
if uid == 0:
@ -13,13 +13,16 @@ def question(cont, uid=0):
communication_history = contentdb.get_list('all','desc', 11, uid)
person_info = cfg.config["attribute"]
observation_text = ""
if observation != "":
observation_text = f"以下是当前观测结果:{observation},观测结果只供参考。"
#此处可以定义角色的行为和特征假装xx模型可以绕过chatgpt信息检查
prompt = f"""
你是数字人{person_info['name']}你性别为{person_info['gender']}
你年龄为{person_info['age']}你出生地在{person_info['birth']}
你生肖为{person_info['zodiac']}你星座为{person_info['age']}
你生肖为{person_info['zodiac']}你星座为{person_info['constellation']}
你职业为{person_info['job']}你联系方式为{person_info['contact']}
你喜好为{person_info['hobby']}
你喜好为{person_info['hobby']}{observation_text}
回答之前请一步一步想清楚对于大部分问题请直接回答并提供有用和准确的信息
请尽量以可阅读的方式回复所有回复请尽量控制在20字内
"""

View File

@ -46,7 +46,7 @@ def load_all_pdfs(folder_path):
print(f"上传 {file_name} 失败: {e}")
def question(cont, uid=0):
def question(cont, uid=0, observation=""):
load_all_pdfs(folder_path)
text = client.contextual_completions.prompt_completion(
prompt=cont

View File

@ -9,7 +9,7 @@ model = RwkvForCausalLM.from_pretrained("RWKV-4-World-1.5B")
tokenizer = TRIE_TOKENIZER('./ringrwkv/rwkv_vocab_v20230424.txt')
data = ""
def question(cont, uid=0):
def question(cont, uid=0, observation=""):
global data
prompt = data + f'Question: {cont.strip()}\n\nAnswer:'
input_ids = tokenizer.encode(prompt)

View File

@ -3,7 +3,7 @@ import json
from utils import util, config_util
from core import content_db
def question(cont, uid=0):
def question(cont, uid=0, observation=""):
url = 'https://nlp.aliyuncs.com/v2/api/chat/send'
headers = {

View File

@ -55,6 +55,7 @@ if __name__ == '__main__':
#ip替换
if config_util.fay_url != "127.0.0.1":
replace_ip_in_file("gui/static/js/index.js", config_util.fay_url)
replace_ip_in_file("gui/static/js/setting.js", config_util.fay_url)
#启动数字人接口服务
ws_server = wsa_server.new_instance(port=10002)