mirror of
https://github.com/xszyou/Fay.git
synced 2026-03-12 17:51:28 +08:00
1.修复append_to_buffer bug;
2.文字沟通接口新增透传llm支持; 3.文字沟通接口支持不回复控制; 4.获取消息接口支持不限制username。
This commit is contained in:
1693
core/fay_core.py
1693
core/fay_core.py
File diff suppressed because it is too large
Load Diff
@@ -74,9 +74,9 @@ def __get_template():
|
||||
except Exception as e:
|
||||
return f"Error rendering template: {e}", 500
|
||||
|
||||
def __get_device_list():
|
||||
try:
|
||||
if config_util.start_mode == 'common':
|
||||
def __get_device_list():
|
||||
try:
|
||||
if config_util.start_mode == 'common':
|
||||
audio = pyaudio.PyAudio()
|
||||
device_list = []
|
||||
for i in range(audio.get_device_count()):
|
||||
@@ -86,12 +86,33 @@ def __get_device_list():
|
||||
return list(set(device_list))
|
||||
else:
|
||||
return []
|
||||
except Exception as e:
|
||||
print(f"Error getting device list: {e}")
|
||||
return []
|
||||
|
||||
@__app.route('/api/submit', methods=['post'])
|
||||
def api_submit():
|
||||
except Exception as e:
|
||||
print(f"Error getting device list: {e}")
|
||||
return []
|
||||
|
||||
def _as_bool(value):
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if value is None:
|
||||
return False
|
||||
if isinstance(value, (int, float)):
|
||||
return value != 0
|
||||
if isinstance(value, str):
|
||||
return value.strip().lower() in ("1", "true", "yes", "y", "on")
|
||||
return False
|
||||
|
||||
def _build_llm_url(base_url: str) -> str:
|
||||
if not base_url:
|
||||
return ""
|
||||
url = base_url.rstrip("/")
|
||||
if url.endswith("/chat/completions"):
|
||||
return url
|
||||
if url.endswith("/v1"):
|
||||
return url + "/chat/completions"
|
||||
return url + "/v1/chat/completions"
|
||||
|
||||
@__app.route('/api/submit', methods=['post'])
|
||||
def api_submit():
|
||||
data = request.values.get('data')
|
||||
if not data:
|
||||
return jsonify({'result': 'error', 'message': '未提供数据'})
|
||||
@@ -288,23 +309,27 @@ def api_send():
|
||||
|
||||
# 获取指定用户的消息记录(支持分页)
|
||||
@__app.route('/api/get-msg', methods=['post'])
|
||||
def api_get_Msg():
|
||||
try:
|
||||
data = request.form.get('data')
|
||||
if data is None:
|
||||
data = request.get_json()
|
||||
else:
|
||||
data = json.loads(data)
|
||||
uid = member_db.new_instance().find_user(data["username"])
|
||||
limit = data.get("limit", 30) # 默认每页30条
|
||||
offset = data.get("offset", 0) # 默认从0开始
|
||||
contentdb = content_db.new_instance()
|
||||
if uid == 0:
|
||||
return json.dumps({'list': [], 'total': 0, 'hasMore': False})
|
||||
else:
|
||||
# 获取总数用于判断是否还有更多
|
||||
total = contentdb.get_message_count(uid)
|
||||
list = contentdb.get_list('all', 'desc', limit, uid, offset)
|
||||
def api_get_Msg():
|
||||
try:
|
||||
data = request.form.get('data')
|
||||
if data is None:
|
||||
data = request.get_json(silent=True) or {}
|
||||
else:
|
||||
data = json.loads(data)
|
||||
if not isinstance(data, dict):
|
||||
data = {}
|
||||
username = data.get("username")
|
||||
limit = data.get("limit", 30) # 默认每页30条
|
||||
offset = data.get("offset", 0) # 默认从0开始
|
||||
contentdb = content_db.new_instance()
|
||||
uid = 0
|
||||
if username:
|
||||
uid = member_db.new_instance().find_user(username)
|
||||
if uid == 0:
|
||||
return json.dumps({'list': [], 'total': 0, 'hasMore': False})
|
||||
# 获取总数用于判断是否还有更多
|
||||
total = contentdb.get_message_count(uid)
|
||||
list = contentdb.get_list('all', 'desc', limit, uid, offset)
|
||||
relist = []
|
||||
i = len(list) - 1
|
||||
while i >= 0:
|
||||
@@ -329,23 +354,128 @@ def api_send_v1_chat_completions():
|
||||
data = request.get_json()
|
||||
if not data:
|
||||
return jsonify({'error': '未提供数据'})
|
||||
try:
|
||||
last_content = ""
|
||||
if 'messages' in data and data['messages']:
|
||||
last_message = data['messages'][-1]
|
||||
username = last_message.get('role', 'User')
|
||||
if username == 'user':
|
||||
username = 'User'
|
||||
try:
|
||||
model = data.get('model', 'fay')
|
||||
if model == 'llm':
|
||||
try:
|
||||
config_util.load_config()
|
||||
llm_url = _build_llm_url(config_util.gpt_base_url)
|
||||
api_key = config_util.key_gpt_api_key
|
||||
model_engine = config_util.gpt_model_engine
|
||||
except Exception as exc:
|
||||
return jsonify({'error': f'LLM config load failed: {exc}'}), 500
|
||||
|
||||
if not llm_url:
|
||||
return jsonify({'error': 'LLM base_url is not configured'}), 500
|
||||
|
||||
payload = dict(data)
|
||||
if payload.get('model') == 'llm' and model_engine:
|
||||
payload['model'] = model_engine
|
||||
|
||||
stream_requested = _as_bool(payload.get('stream', False))
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
if api_key:
|
||||
headers['Authorization'] = f'Bearer {api_key}'
|
||||
|
||||
try:
|
||||
if stream_requested:
|
||||
resp = requests.post(llm_url, headers=headers, json=payload, stream=True)
|
||||
|
||||
def generate():
|
||||
try:
|
||||
for line in resp.iter_lines(decode_unicode=True):
|
||||
if line is None:
|
||||
continue
|
||||
yield f"{line}\n"
|
||||
finally:
|
||||
resp.close()
|
||||
|
||||
return Response(
|
||||
generate(),
|
||||
status=resp.status_code,
|
||||
mimetype=resp.headers.get("Content-Type", "text/event-stream"),
|
||||
)
|
||||
|
||||
resp = requests.post(llm_url, headers=headers, json=payload, timeout=60)
|
||||
return Response(
|
||||
resp.content,
|
||||
status=resp.status_code,
|
||||
content_type=resp.headers.get("Content-Type", "application/json"),
|
||||
)
|
||||
except Exception as exc:
|
||||
return jsonify({'error': f'LLM request failed: {exc}'}), 500
|
||||
|
||||
last_content = ""
|
||||
if 'messages' in data and data['messages']:
|
||||
last_message = data['messages'][-1]
|
||||
username = last_message.get('role', 'User')
|
||||
if username == 'user':
|
||||
username = 'User'
|
||||
last_content = last_message.get('content', 'No content provided')
|
||||
else:
|
||||
last_content = 'No messages found'
|
||||
username = 'User'
|
||||
|
||||
model = data.get('model', 'fay')
|
||||
observation = data.get('observation', '')
|
||||
observation = data.get('observation', '')
|
||||
# 检查请求中是否指定了流式传输
|
||||
stream_requested = data.get('stream', False)
|
||||
if stream_requested or model == 'fay-streaming':
|
||||
stream_requested = data.get('stream', False)
|
||||
no_reply = _as_bool(data.get('no_reply', data.get('noReply', False)))
|
||||
if no_reply:
|
||||
interact = Interact("text", 1, {'user': username, 'msg': last_content, 'observation': str(observation), 'stream': bool(stream_requested), 'no_reply': True})
|
||||
util.printInfo(1, username, '[text chat no_reply]{}'.format(interact.data["msg"]), time.time())
|
||||
fay_booter.feiFei.on_interact(interact)
|
||||
if stream_requested or model == 'fay-streaming':
|
||||
def generate():
|
||||
message = {
|
||||
"id": "faystreaming-" + str(uuid.uuid4()),
|
||||
"object": "chat.completion.chunk",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"delta": {
|
||||
"content": ""
|
||||
},
|
||||
"index": 0,
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": len(last_content),
|
||||
"completion_tokens": 0,
|
||||
"total_tokens": len(last_content)
|
||||
},
|
||||
"system_fingerprint": "",
|
||||
"no_reply": True
|
||||
}
|
||||
yield f"data: {json.dumps(message)}\n\n"
|
||||
yield 'data: [DONE]\n\n'
|
||||
return Response(generate(), mimetype='text/event-stream')
|
||||
return jsonify({
|
||||
"id": "fay-" + str(uuid.uuid4()),
|
||||
"object": "chat.completion",
|
||||
"created": int(time.time()),
|
||||
"model": model,
|
||||
"choices": [
|
||||
{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": ""
|
||||
},
|
||||
"logprobs": "",
|
||||
"finish_reason": "stop"
|
||||
}
|
||||
],
|
||||
"usage": {
|
||||
"prompt_tokens": len(last_content),
|
||||
"completion_tokens": 0,
|
||||
"total_tokens": len(last_content)
|
||||
},
|
||||
"system_fingerprint": "",
|
||||
"no_reply": True
|
||||
})
|
||||
if stream_requested or model == 'fay-streaming':
|
||||
interact = Interact("text", 1, {'user': username, 'msg': last_content, 'observation': str(observation), 'stream':True})
|
||||
util.printInfo(1, username, '[文字沟通接口(流式)]{}'.format(interact.data["msg"]), time.time())
|
||||
fay_booter.feiFei.on_interact(interact)
|
||||
|
||||
@@ -1851,19 +1851,22 @@ def question(content, username, observation=None):
|
||||
or messages_buffer[-1]['content'] != content
|
||||
):
|
||||
messages_buffer.append({"role": "user", "content": content})
|
||||
else:
|
||||
# 不隔离:按独立消息存储,保留用户名信息
|
||||
def append_to_buffer_multi(role: str, text_value: str, msg_username: str = "") -> None:
|
||||
if not text_value:
|
||||
return
|
||||
messages_buffer.append({"role": role, "content": text_value, "username": msg_username})
|
||||
if len(messages_buffer) > 60:
|
||||
del messages_buffer[:-60]
|
||||
|
||||
for record in history_records:
|
||||
msg_type, msg_text, msg_username = record
|
||||
if not msg_text:
|
||||
continue
|
||||
else:
|
||||
# 不隔离:按独立消息存储,保留用户名信息
|
||||
def append_to_buffer_multi(role: str, text_value: str, msg_username: str = "") -> None:
|
||||
if not text_value:
|
||||
return
|
||||
messages_buffer.append({"role": role, "content": text_value, "username": msg_username})
|
||||
if len(messages_buffer) > 60:
|
||||
del messages_buffer[:-60]
|
||||
|
||||
def append_to_buffer(role: str, text_value: str) -> None:
|
||||
append_to_buffer_multi(role, text_value, "")
|
||||
|
||||
for record in history_records:
|
||||
msg_type, msg_text, msg_username = record
|
||||
if not msg_text:
|
||||
continue
|
||||
if msg_type and msg_type.lower() in ('member', 'user'):
|
||||
append_to_buffer_multi("user", msg_text, msg_username)
|
||||
else:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import requests
|
||||
import json
|
||||
|
||||
def test_gpt(prompt):
|
||||
def test_gpt(prompt, username="张三", observation="", no_reply=False):
|
||||
url = 'http://127.0.0.1:5000/v1/chat/completions' # 替换为您的接口地址
|
||||
headers = {
|
||||
'Content-Type': 'application/json',
|
||||
@@ -10,11 +10,19 @@ def test_gpt(prompt):
|
||||
data = {
|
||||
'model': 'fay-streaming',
|
||||
'messages': [
|
||||
{'role': '张三', 'content': prompt}
|
||||
{'role': username, 'content': prompt}
|
||||
],
|
||||
'stream': True # 启用流式传输
|
||||
'stream': True, # 启用流式传输
|
||||
'observation': observation, # 观察数据
|
||||
'no_reply': no_reply
|
||||
}
|
||||
|
||||
print(f"[用户] {username}: {prompt}")
|
||||
if observation:
|
||||
print(f"[观察数据] {observation}")
|
||||
print("-" * 50)
|
||||
print("[Fay回复] ", end="")
|
||||
|
||||
response = requests.post(url, headers=headers, data=json.dumps(data), stream=True)
|
||||
|
||||
if response.status_code != 200:
|
||||
@@ -45,8 +53,57 @@ def test_gpt(prompt):
|
||||
else:
|
||||
print(f"\n收到未知格式的数据:{line}")
|
||||
|
||||
# 观察数据样本
|
||||
OBSERVATION_SAMPLES = {
|
||||
"张三": """识别到对话的人是张三
|
||||
认知状态:正常
|
||||
听力:正常
|
||||
视力:正常
|
||||
兴趣爱好:写代码、音乐、电影
|
||||
避免话题:学习成绩""",
|
||||
|
||||
"李奶奶": """识别到对话的人是李奶奶
|
||||
认知状态:轻度记忆衰退
|
||||
听力:需要大声说话
|
||||
视力:正常
|
||||
兴趣爱好:养花、看戏曲、聊家常
|
||||
避免话题:子女工作压力""",
|
||||
|
||||
"王叔叔": """识别到对话的人是王叔叔
|
||||
认知状态:正常
|
||||
听力:正常
|
||||
视力:老花眼
|
||||
兴趣爱好:钓鱼、下棋、看新闻
|
||||
避免话题:退休金""",
|
||||
|
||||
"小明": """识别到对话的人是小明
|
||||
认知状态:正常
|
||||
听力:正常
|
||||
视力:正常
|
||||
年龄:10岁
|
||||
兴趣爱好:玩游戏、看动画片、踢足球
|
||||
避免话题:考试分数、作业""",
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
user_input = "你好"
|
||||
print("GPT 的回复:")
|
||||
test_gpt(user_input)
|
||||
# 示例1:带观察数据的对话
|
||||
print("=" * 60)
|
||||
print("示例1:张三的对话(带观察数据)")
|
||||
print("=" * 60)
|
||||
test_gpt("你好,今天天气不错啊", username="张三", observation=OBSERVATION_SAMPLES["张三"])
|
||||
|
||||
print("\n")
|
||||
|
||||
# 示例2:不带观察数据的对话
|
||||
# print("=" * 60)
|
||||
# print("示例2:普通对话(不带观察数据)")
|
||||
# print("=" * 60)
|
||||
# test_gpt("你好", username="User", observation="")
|
||||
|
||||
# 示例3:李奶奶的对话
|
||||
# print("=" * 60)
|
||||
# print("示例3:李奶奶的对话")
|
||||
# print("=" * 60)
|
||||
# test_gpt("小菲啊,我今天有点闷", username="李奶奶", observation=OBSERVATION_SAMPLES["李奶奶"])
|
||||
|
||||
print("\n请求完成")
|
||||
|
||||
Reference in New Issue
Block a user