添加 AI

This commit is contained in:
2026-01-12 23:12:28 +08:00
parent 24457ff7cd
commit c44e29a907
5 changed files with 103 additions and 3 deletions

View File

@ -51,6 +51,8 @@ class AlibabaGreen:
@staticmethod
def _detect_sync(content: str) -> bool:
if len(content) == 0:
return True
if not AlibabaGreen.get_config().module_aligreen_enable:
logger.debug("该环境未启用阿里内容审查,直接跳过")
return True

View File

@ -1,4 +1,4 @@
from typing import Any
from typing import Any, cast
import openai
from loguru import logger
@ -26,14 +26,14 @@ class LLMInfo(BaseModel):
async def chat(
self,
messages: list[ChatCompletionMessageParam],
messages: list[ChatCompletionMessageParam] | list[dict[str, Any]],
timeout: float | None = 30.0,
max_tokens: int | None = None,
**kwargs: Any,
) -> ChatCompletionMessage:
logger.info(f"调用 LLM: BASE_URL={self.base_url} MODEL_NAME={self.model_name}")
completion: ChatCompletion = await self.get_openai_client().chat.completions.create(
messages=messages,
messages=cast(Any, messages),
model=self.model_name,
max_tokens=max_tokens,
timeout=timeout,

View File

@ -0,0 +1,52 @@
from io import BytesIO
import base64
import re
from loguru import logger
from nonebot import on_message
from nonebot.rule import Rule
from konabot.common.apis.ali_content_safety import AlibabaGreen
from konabot.common.llm import get_llm
from konabot.common.longtask import DepLongTaskTarget
from konabot.common.nb.extract_image import DepPILImage
from konabot.common.nb.match_keyword import match_keyword
cmd = on_message(rule=Rule(match_keyword(re.compile(r"^千问识图\s*$"))))
@cmd.handle()
async def _(img: DepPILImage, target: DepLongTaskTarget):
if 1:
return #TODO:这里还没写完,还有 Bug 要修
jpeg_data = BytesIO()
if img.width > 2160:
img = img.resize((2160, img.height * 2160 // img.width))
if img.height > 2160:
img = img.resize((img.width * 2160 // img.height, 2160))
img = img.convert("RGB")
img.save(jpeg_data, format="jpeg", optimize=True, quality=85)
data_url = "data:image/jpeg;base64,"
data_url += base64.b64encode(jpeg_data.getvalue()).decode('ascii')
llm = get_llm("qwen3-vl-plus")
res = await llm.chat([
{ "role": "user", "content": [
{ "type": "image_url", "image_url": {
"url": data_url
} },
{ "type": "text", "text": "请你提取这张图片中的所有文字,并尽量按照原图的排版输出,不需要其他内容" },
] }
])
result = res.content
logger.info(res)
if result is None:
await target.send_message("提取失败:可能存在网络异常")
return
if not await AlibabaGreen.detect(result):
await target.send_message("提取失败:图片中可能存在一些不合适的内容")
return
await target.send_message(result, at=False)

View File

@ -11,6 +11,7 @@ from nonebot.adapters.onebot.v11.message import Message as OB11Message
from konabot.common.apis.ali_content_safety import AlibabaGreen
from konabot.common.longtask import DepLongTaskTarget
from konabot.plugins.handle_text.base import PipelineRunner, TextHandlerEnvironment, register_text_handlers
from konabot.plugins.handle_text.handlers.ai_handlers import THQwen
from konabot.plugins.handle_text.handlers.encoding_handlers import THAlign, THAlphaConv, THB64Hex, THBase64, THBaseConv, THCaesar, THMorse, THReverse
from konabot.plugins.handle_text.handlers.random_handlers import THShuffle, THSorted
from konabot.plugins.handle_text.handlers.unix_handlers import THCat, THEcho, THReplace, THRm
@ -79,6 +80,7 @@ async def _():
THAlign(),
THSorted(),
THMorse(),
THQwen(),
)
logger.info(f"注册了 TextHandler{PipelineRunner.get_runner().handlers}")

View File

@ -0,0 +1,44 @@
from typing import Any, cast
from konabot.common.llm import get_llm
from konabot.plugins.handle_text.base import TextHandler, TextHandlerEnvironment, TextHandleResult
class THQwen(TextHandler):
name = "qwen"
async def handle(self, env: TextHandlerEnvironment, istream: str | None, args: list[str]) -> TextHandleResult:
llm = get_llm("qwen3-max")
messages = []
if istream is not None:
messages.append({
"role": "user",
"content": istream
})
if len(args) > 0:
message = ' '.join(args)
messages.append({
"role": "user",
"content": message,
})
if len(messages) == 0:
return TextHandleResult(
code=1,
ostream="使用方法qwen <提示词>",
)
messages = [{
"role": "system",
"content": "除非用户要求,请尽可能短点回答。另外,当前环境不支持 Markdown 语法,如果可以,请使用纯文本回答"
}] + messages
result = await llm.chat(cast(Any, messages))
content = result.content
if content is None:
return TextHandleResult(
code=500,
ostream="问 AI 的时候发生了未知的错误",
)
return TextHandleResult(
code=0,
ostream=content,
)