mirror of
https://github.com/zhayujie/chatgpt-on-wechat.git
synced 2026-03-12 18:01:30 +08:00
fix: complete openai_compat migration across all model bots
Replace all direct openai.error.* usages with the openai_compat compatibility layer to support openai>=1.0. Affected files: - models/chatgpt/chat_gpt_bot.py: fix isinstance checks (RateLimitError, Timeout, APIError, APIConnectionError) - models/openai/open_ai_bot.py: replace import + fix isinstance checks - models/ali/ali_qwen_bot.py: replace import + fix isinstance checks - models/modelscope/modelscope_bot.py: remove unused openai.error import The openai_compat layer (models/openai/openai_compat.py) already handles both openai<1.0 and openai>=1.0 gracefully. This completes the migration started in the existing PR #2688.
This commit is contained in:
@@ -5,7 +5,7 @@ import time
|
||||
from typing import List, Tuple
|
||||
|
||||
import openai
|
||||
import openai.error
|
||||
from models.openai.openai_compat import RateLimitError, Timeout, APIError, APIConnectionError
|
||||
import broadscope_bailian
|
||||
from broadscope_bailian import ChatQaMessage
|
||||
|
||||
@@ -115,22 +115,22 @@ class AliQwenBot(Bot):
|
||||
except Exception as e:
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if isinstance(e, openai.error.RateLimitError):
|
||||
if isinstance(e, RateLimitError):
|
||||
logger.warn("[QWEN] RateLimitError: {}".format(e))
|
||||
result["content"] = "提问太快啦,请休息一下再问我吧"
|
||||
if need_retry:
|
||||
time.sleep(20)
|
||||
elif isinstance(e, openai.error.Timeout):
|
||||
elif isinstance(e, Timeout):
|
||||
logger.warn("[QWEN] Timeout: {}".format(e))
|
||||
result["content"] = "我没有收到你的消息"
|
||||
if need_retry:
|
||||
time.sleep(5)
|
||||
elif isinstance(e, openai.error.APIError):
|
||||
elif isinstance(e, APIError):
|
||||
logger.warn("[QWEN] Bad Gateway: {}".format(e))
|
||||
result["content"] = "请再问我一次"
|
||||
if need_retry:
|
||||
time.sleep(10)
|
||||
elif isinstance(e, openai.error.APIConnectionError):
|
||||
elif isinstance(e, APIConnectionError):
|
||||
logger.warn("[QWEN] APIConnectionError: {}".format(e))
|
||||
need_retry = False
|
||||
result["content"] = "我连接不到你的网络"
|
||||
|
||||
@@ -244,22 +244,22 @@ class ChatGPTBot(Bot, OpenAIImage, OpenAICompatibleBot):
|
||||
except Exception as e:
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if isinstance(e, openai.error.RateLimitError):
|
||||
if isinstance(e, RateLimitError):
|
||||
logger.warn("[CHATGPT] RateLimitError: {}".format(e))
|
||||
result["content"] = "提问太快啦,请休息一下再问我吧"
|
||||
if need_retry:
|
||||
time.sleep(20)
|
||||
elif isinstance(e, openai.error.Timeout):
|
||||
elif isinstance(e, Timeout):
|
||||
logger.warn("[CHATGPT] Timeout: {}".format(e))
|
||||
result["content"] = "我没有收到你的消息"
|
||||
if need_retry:
|
||||
time.sleep(5)
|
||||
elif isinstance(e, openai.error.APIError):
|
||||
elif isinstance(e, APIError):
|
||||
logger.warn("[CHATGPT] Bad Gateway: {}".format(e))
|
||||
result["content"] = "请再问我一次"
|
||||
if need_retry:
|
||||
time.sleep(10)
|
||||
elif isinstance(e, openai.error.APIConnectionError):
|
||||
elif isinstance(e, APIConnectionError):
|
||||
logger.warn("[CHATGPT] APIConnectionError: {}".format(e))
|
||||
result["content"] = "我连接不到你的网络"
|
||||
if need_retry:
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
import time
|
||||
import json
|
||||
import openai
|
||||
import openai.error
|
||||
from models.bot import Bot
|
||||
from models.session_manager import SessionManager
|
||||
from bridge.context import ContextType
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import time
|
||||
|
||||
import openai
|
||||
import openai.error
|
||||
from models.openai.openai_compat import RateLimitError, Timeout, APIConnectionError
|
||||
|
||||
from models.bot import Bot
|
||||
from models.openai_compatible_bot import OpenAICompatibleBot
|
||||
@@ -109,17 +109,17 @@ class OpenAIBot(Bot, OpenAIImage, OpenAICompatibleBot):
|
||||
except Exception as e:
|
||||
need_retry = retry_count < 2
|
||||
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
|
||||
if isinstance(e, openai.error.RateLimitError):
|
||||
if isinstance(e, RateLimitError):
|
||||
logger.warn("[OPEN_AI] RateLimitError: {}".format(e))
|
||||
result["content"] = "提问太快啦,请休息一下再问我吧"
|
||||
if need_retry:
|
||||
time.sleep(20)
|
||||
elif isinstance(e, openai.error.Timeout):
|
||||
elif isinstance(e, Timeout):
|
||||
logger.warn("[OPEN_AI] Timeout: {}".format(e))
|
||||
result["content"] = "我没有收到你的消息"
|
||||
if need_retry:
|
||||
time.sleep(5)
|
||||
elif isinstance(e, openai.error.APIConnectionError):
|
||||
elif isinstance(e, APIConnectionError):
|
||||
logger.warn("[OPEN_AI] APIConnectionError: {}".format(e))
|
||||
need_retry = False
|
||||
result["content"] = "我连接不到你的网络"
|
||||
|
||||
Reference in New Issue
Block a user