mirror of
https://github.com/xszyou/Fay.git
synced 2026-03-12 17:51:28 +08:00
年番更新
1、修复UE连接数字人接口不稳定问题; 2、修复运行途中关闭麦克风依然监听问题; 3、增加打招呼接口; 4、智普清流llm对接;
This commit is contained in:
@@ -29,6 +29,8 @@ from llm import nlp_xingchen
|
||||
from llm import nlp_ollama_api
|
||||
from llm import nlp_coze
|
||||
from llm.agent import fay_agent
|
||||
from llm import nlp_qingliu
|
||||
|
||||
from core import member_db
|
||||
import threading
|
||||
import functools
|
||||
@@ -60,7 +62,8 @@ modules = {
|
||||
"nlp_xingchen": nlp_xingchen,
|
||||
"nlp_ollama_api": nlp_ollama_api,
|
||||
"nlp_coze": nlp_coze,
|
||||
"nlp_agent": fay_agent
|
||||
"nlp_agent": fay_agent,
|
||||
"nlp_qingliu": nlp_qingliu
|
||||
}
|
||||
|
||||
#大语言模型回复
|
||||
@@ -438,7 +441,7 @@ class FeiFei:
|
||||
|
||||
#发送音频给数字人接口
|
||||
if wsa_server.get_instance().is_connected(interact.data.get("user")):
|
||||
content = {'Topic': 'Unreal', 'Data': {'Key': 'audio', 'Value': os.path.abspath(file_url), 'HttpValue': f'http://{cfg.fay_url}:5000/audio/' + os.path.basename(file_url), 'Text': text, 'Time': audio_length, 'Type': 'interact' if interact.interact_type == 1 else 'auto_play'}, 'Username' : interact.data.get('user')}
|
||||
content = {'Topic': 'Unreal', 'Data': {'Key': 'audio', 'Value': os.path.abspath(file_url), 'HttpValue': f'http://{cfg.fay_url}:5000/audio/' + os.path.basename(file_url), 'Text': text, 'Time': audio_length, 'Type': 'hello' if interact.interleaver == 'hello' else ('interact' if interact.interact_type == 1 else 'auto_play')}, 'Username' : interact.data.get('user')}
|
||||
#计算lips
|
||||
if platform.system() == "Windows":
|
||||
try:
|
||||
|
||||
@@ -205,7 +205,7 @@ class Recorder:
|
||||
try:
|
||||
cfg.load_config()
|
||||
record = cfg.config['source']['record']
|
||||
if not record['enabled'] and not self.is_remote:
|
||||
if not record['enabled'] and not self.is_remote():
|
||||
time.sleep(1)
|
||||
continue
|
||||
self.is_reading = True
|
||||
|
||||
@@ -27,6 +27,7 @@ class MyServer:
|
||||
# 接收处理
|
||||
async def __consumer_handler(self, websocket, path):
|
||||
username = None
|
||||
output_setting = None
|
||||
try:
|
||||
async for message in websocket:
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
@@ -476,6 +476,18 @@ def serve_gif(filename):
|
||||
else:
|
||||
return jsonify({'error': '文件未找到'}), 404
|
||||
|
||||
#打招呼
|
||||
@__app.route('/to_greet', methods=['POST'])
|
||||
def to_greet():
|
||||
data = request.get_json()
|
||||
username = data.get('username', 'User')
|
||||
observation = data.get('observation', '')
|
||||
interact = Interact("hello", 1, {'user': username, 'msg': '按观测要求打个招呼', 'observation': str(observation)})
|
||||
text = fay_booter.feiFei.on_interact(interact)
|
||||
return jsonify({'status': 'success', 'data': text, 'msg': '已进行打招呼'}), 200
|
||||
|
||||
|
||||
|
||||
def run():
|
||||
server = pywsgi.WSGIServer(('0.0.0.0',5000), __app)
|
||||
server.serve_forever()
|
||||
|
||||
155
llm/nlp_qingliu.py
Normal file
155
llm/nlp_qingliu.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import json
|
||||
import requests
|
||||
from utils import util
|
||||
from core.authorize_tb import Authorize_Tb
|
||||
import os
|
||||
|
||||
def question(cont, uid=0, observation=""):
|
||||
bigmodel = BigModel()
|
||||
answer = bigmodel.question(cont, uid)
|
||||
return answer
|
||||
|
||||
class BigModel:
|
||||
|
||||
def __init__(self):
|
||||
|
||||
#服务地址:https://open.bigmodel.cn/
|
||||
self.api_key = ""#填写对应的api_key
|
||||
self.app_id = ""#写对应的智能体id
|
||||
|
||||
self.authorize_tb = Authorize_Tb()
|
||||
self.conversation_file = "cache_data/bigmodel_conversation_data.json"
|
||||
|
||||
def question(self, cont, uid):
|
||||
self.userid = uid
|
||||
conversation_id = self.__get_conversation_id()
|
||||
if not conversation_id:
|
||||
conversation_id = self.__create_conversation()
|
||||
if conversation_id:
|
||||
self.__store_conversation_id(conversation_id)
|
||||
else:
|
||||
return "网络异常,开了个小差,请稍后再问。"
|
||||
|
||||
request_id = self.__send_message(conversation_id, cont)
|
||||
if not request_id:
|
||||
return "网络异常,开了个小差,请稍后再问。"
|
||||
|
||||
answer = self.__get_response(request_id)
|
||||
return answer
|
||||
|
||||
def __get_conversation_id(self):
|
||||
if os.path.exists(self.conversation_file):
|
||||
with open(self.conversation_file, "r", encoding="utf-8") as f:
|
||||
try:
|
||||
data = json.load(f)
|
||||
except json.JSONDecodeError:
|
||||
data = {}
|
||||
return data.get(str(self.userid))
|
||||
return None
|
||||
|
||||
def __store_conversation_id(self, conversation_id):
|
||||
data = {}
|
||||
# 如果文件存在,读取内容
|
||||
if os.path.exists(self.conversation_file):
|
||||
with open(self.conversation_file, "r", encoding="utf-8") as f:
|
||||
try:
|
||||
data = json.load(f)
|
||||
except json.JSONDecodeError:
|
||||
data = {}
|
||||
# 如果文件不存在,data则为{}
|
||||
|
||||
# 更新/新增当前userid的conversation_id
|
||||
data[str(self.userid)] = conversation_id
|
||||
|
||||
# 写回文件
|
||||
with open(self.conversation_file, "w", encoding="utf-8") as f:
|
||||
json.dump(data, f, ensure_ascii=False, indent=4)
|
||||
|
||||
def __create_conversation(self):
|
||||
url = f"https://open.bigmodel.cn/api/llm-application/open/v2/application/{self.app_id}/conversation"
|
||||
headers = {
|
||||
'Authorization': f'Bearer {self.api_key}',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
try:
|
||||
response = requests.post(url, headers=headers)
|
||||
if response.status_code != 200:
|
||||
util.log(1, f"创建会话失败: {response.text}")
|
||||
return None
|
||||
data = response.json()
|
||||
if data['code'] != 200:
|
||||
util.log(1, f"创建会话失败: {data['message']}")
|
||||
return None
|
||||
return data['data']['conversation_id']
|
||||
except Exception as e:
|
||||
util.log(1, f"创建会话异常: {str(e)}")
|
||||
return None
|
||||
|
||||
def __send_message(self, conversation_id, message):
|
||||
url = "https://open.bigmodel.cn/api/llm-application/open/v2/application/generate_request_id"
|
||||
headers = {
|
||||
'Authorization': f'Bearer {self.api_key}',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
payload = {
|
||||
"app_id": self.app_id,
|
||||
"conversation_id": conversation_id,
|
||||
"key_value_pairs": [
|
||||
{
|
||||
"id": "user",
|
||||
"type": "input",
|
||||
"name": "用户提问",
|
||||
"value": message
|
||||
}
|
||||
]
|
||||
}
|
||||
try:
|
||||
response = requests.post(url, headers=headers, data=json.dumps(payload))
|
||||
if response.status_code != 200:
|
||||
util.log(1, f"发送消息失败: {response.text}")
|
||||
return None
|
||||
data = response.json()
|
||||
if data['code'] != 200:
|
||||
util.log(1, f"发送消息失败: {data['message']}")
|
||||
return None
|
||||
return data['data']['id']
|
||||
except Exception as e:
|
||||
util.log(1, f"发送消息异常: {str(e)}")
|
||||
return None
|
||||
|
||||
def __get_response(self, request_id):
|
||||
url = f"https://open.bigmodel.cn/api/llm-application/open/v2/model-api/{request_id}/sse-invoke"
|
||||
headers = {
|
||||
'Authorization': f'Bearer {self.api_key}',
|
||||
'Accept': 'text/event-stream'
|
||||
}
|
||||
try:
|
||||
with requests.post(url, headers=headers, stream=True, timeout=60) as response:
|
||||
if response.status_code != 200:
|
||||
util.log(1, f"获取响应失败: {response.text}")
|
||||
return "网络异常,开了个小差,请稍后再问。"
|
||||
answer = []
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith("data:"):
|
||||
data_str = decoded_line.replace("data:", "").strip()
|
||||
if data_str:
|
||||
try:
|
||||
data_json = json.loads(data_str)
|
||||
if 'usage' in data_json:
|
||||
break
|
||||
if 'msg' in data_json:
|
||||
msg = data_json['msg'].replace('#', '').replace('*', '')
|
||||
answer.append(msg)
|
||||
|
||||
except json.JSONDecodeError as je:
|
||||
util.log(1, f"JSON解析错误: {str(je)}")
|
||||
|
||||
return ''.join(answer) if answer else "网络异常,开了个小差,请稍后再问。"
|
||||
except requests.exceptions.Timeout:
|
||||
util.log(1, "获取响应超时。")
|
||||
return "网络异常,开了个小差,请稍后再问。"
|
||||
except Exception as e:
|
||||
util.log(1, f"获取响应异常: {str(e)}")
|
||||
return "网络异常,开了个小差,请稍后再问。"
|
||||
@@ -43,7 +43,7 @@ baidu_emotion_secret_key=
|
||||
|
||||
|
||||
|
||||
#NLP多选一:agent、lingju、gpt、rasa、VisualGLM、rwkv、xingchen、langchain 、ollama_api、privategpt、coze
|
||||
#NLP多选一:agent、lingju、gpt、rasa、VisualGLM、rwkv、xingchen、langchain 、ollama_api、privategpt、coze、qingliu(需到nlp_qingliu配置参数)
|
||||
chat_module= gpt
|
||||
|
||||
#灵聚 服务密钥(NLP多选1) https://open.lingju.ai
|
||||
|
||||
Reference in New Issue
Block a user