Merge branch 'master' into enhancement/typst-binary

This commit is contained in:
2026-03-21 20:13:34 +08:00
10 changed files with 1215 additions and 251 deletions

View File

@ -1,4 +1,5 @@
from typing import cast
import asyncio
from loguru import logger
from nonebot import on_command
import nonebot
@ -31,8 +32,11 @@ from konabot.plugins.handle_text.handlers.random_handlers import THShuffle, THSo
from konabot.plugins.handle_text.handlers.unix_handlers import (
THCat,
THEcho,
THFalse,
THReplace,
THRm,
THTest,
THTrue,
)
from konabot.plugins.handle_text.handlers.whitespace_handlers import (
THLines,
@ -43,11 +47,37 @@ from konabot.plugins.handle_text.handlers.whitespace_handlers import (
)
TEXTFX_MAX_RUNTIME_SECONDS = 60
_textfx_running_users: set[str] = set()
def _get_textfx_user_key(evt: Event) -> str:
user_id = getattr(evt, "user_id", None)
self_id = getattr(evt, "self_id", None)
group_id = getattr(evt, "group_id", None)
if user_id is not None:
if group_id is not None:
return f"{self_id}:{group_id}:{user_id}"
return f"{self_id}:private:{user_id}"
session_id = getattr(evt, "get_session_id", None)
if callable(session_id):
try:
return f"session:{evt.get_session_id()}"
except Exception:
pass
return f"event:{evt.__class__.__name__}:{id(evt)}"
cmd = on_command(cmd="textfx", aliases={"处理文字", "处理文本"})
@cmd.handle()
async def _(msg: UniMsg, evt: Event, bot: Bot, target: DepLongTaskTarget):
user_key = _get_textfx_user_key(evt)
if user_key in _textfx_running_users:
await target.send_message("你当前已有一个 textfx 脚本正在运行,请等待它结束后再试。")
return
istream = ""
if isinstance(evt, OB11MessageEvent):
if evt.reply is not None:
@ -71,9 +101,22 @@ async def _(msg: UniMsg, evt: Event, bot: Bot, target: DepLongTaskTarget):
return
env = TextHandlerEnvironment(is_trusted=False, event=evt)
results = await runner.run_pipeline(res, istream or None, env)
# 检查是否有错误
_textfx_running_users.add(user_key)
try:
results = await asyncio.wait_for(
runner.run_pipeline(res, istream or None, env),
timeout=TEXTFX_MAX_RUNTIME_SECONDS,
)
except asyncio.TimeoutError:
rendered = await render_error_message(
f"处理指令时出现问题:脚本执行超时(超过 {TEXTFX_MAX_RUNTIME_SECONDS} 秒)"
)
await target.send_message(rendered)
return
finally:
_textfx_running_users.discard(user_key)
for r in results:
if r.code != 0:
message = f"处理指令时出现问题:{r.ostream}"
@ -81,7 +124,6 @@ async def _(msg: UniMsg, evt: Event, bot: Bot, target: DepLongTaskTarget):
await target.send_message(rendered)
return
# 收集所有组的文本输出和附件
ostreams = [r.ostream for r in results if r.ostream is not None]
attachments = [r.attachment for r in results if r.attachment is not None]
@ -108,6 +150,9 @@ async def _():
THCat(),
THEcho(),
THRm(),
THTrue(),
THFalse(),
THTest(),
THShuffle(),
THReplace(),
THBase64(),

View File

@ -1,15 +1,16 @@
import asyncio
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
from string import whitespace
from typing import cast
from loguru import logger
from nonebot.adapters import Event
MAX_WHILE_ITERATIONS = 100
@dataclass
class TextHandlerEnvironment:
is_trusted: bool
@ -53,29 +54,63 @@ class TextHandlerSync(TextHandler):
@dataclass
class PipelineCommand:
handler: TextHandler
args: list[str]
# 新增重定向目标buffer key
redirect_target: str | None = None
# 新增:是否为追加模式 (>>)
redirect_append: bool = False
class Redirect:
target: str
append: bool = False
@dataclass
class Pipeline:
command_groups: list[list[PipelineCommand]] = field(default_factory=list)
"一个列表的列表,每一组之间的指令之间使用管道符连接,而不同组之间不会有数据流"
class CommandNode:
name: str
handler: TextHandler
args: list[str]
redirects: list[Redirect] = field(default_factory=list)
class PipelineParseStatus(Enum):
normal = 0
in_string = 1
in_string_to_escape = 2
off_string = 3
@dataclass
class PipelineNode:
commands: list[CommandNode] = field(default_factory=list)
negate: bool = False
whitespaces = whitespace + ""
@dataclass
class ConditionalPipeline:
op: str | None
pipeline: PipelineNode
@dataclass
class CommandGroup:
chains: list[ConditionalPipeline] = field(default_factory=list)
@dataclass
class IfNode:
condition: CommandGroup
then_body: "Script"
else_body: "Script | None" = None
@dataclass
class WhileNode:
condition: CommandGroup
body: "Script"
@dataclass
class Script:
statements: list[CommandGroup | IfNode | WhileNode] = field(default_factory=list)
class TokenKind(Enum):
WORD = "word"
OP = "op"
@dataclass
class Token:
kind: TokenKind
value: str
class PipelineRunner:
@ -93,198 +128,433 @@ class PipelineRunner:
def register(self, handler: TextHandler):
self.handlers.append(handler)
def parse_pipeline(self, script: str) -> Pipeline | str:
pipeline = Pipeline()
# 当前正在构建的上下文
current_group: list[PipelineCommand] = []
current_command_args: list[str] = []
# 字符串解析状态
status = PipelineParseStatus.normal
current_string = ""
current_string_raw = ""
status_in_string_pair = ""
has_token = False # 是否正在构建一个 token区分空字符串和无 token
# 重定向解析状态
is_parsing_redirect_filename = False
current_redirect_target: str | None = None
current_redirect_append = False
# 辅助函数:将当前解析到的字符串 flush 到 参数列表 或 重定向目标
def _flush_token():
nonlocal \
current_string, \
current_string_raw, \
is_parsing_redirect_filename, \
current_redirect_target, \
has_token
if not has_token:
return
if is_parsing_redirect_filename:
current_redirect_target = current_string
is_parsing_redirect_filename = False # 重定向文件名只取一个 token
else:
current_command_args.append(current_string)
current_string = ""
current_string_raw = ""
has_token = False
# 辅助函数:将当前指令 flush 到当前组
def _flush_command() -> str | None:
nonlocal \
current_command_args, \
current_redirect_target, \
current_redirect_append
if not current_command_args:
return None
cmd_name = current_command_args[0]
args = current_command_args[1:]
matched = [
h for h in self.handlers if cmd_name in h.keywords or cmd_name == h.name
]
if not matched:
return f"不存在名为 {cmd_name} 的函数"
if len(matched) > 1:
logger.warning(
f"指令能对应超过一个文本处理器 CMD={cmd_name} handlers={self.handlers}"
)
cmd = PipelineCommand(
handler=matched[0],
args=args,
redirect_target=current_redirect_target,
redirect_append=current_redirect_append,
def _resolve_handler(self, cmd_name: str) -> TextHandler | str:
matched = [
h for h in self.handlers if cmd_name == h.name or cmd_name in h.keywords
]
if not matched:
return f"不存在名为 {cmd_name} 的函数"
if len(matched) > 1:
logger.warning(
f"指令能对应超过一个文本处理器 CMD={cmd_name} handlers={self.handlers}"
)
current_group.append(cmd)
return matched[0]
# 重置指令级状态
current_command_args = []
current_redirect_target = None
current_redirect_append = False
return None
# 使用索引遍历以支持 look-ahead (处理 >>)
def tokenize(self, script: str) -> list[Token] | str:
tokens: list[Token] = []
buf = ""
quote: str | None = None
escape = False
i = 0
length = len(script)
operators = {"|", ";", ">", "&&", "||", ">>", "!"}
escape_map = {
"n": "\n",
"r": "\r",
"t": "\t",
"0": "\0",
"a": "\a",
"b": "\b",
"f": "\f",
"v": "\v",
"\\": "\\",
'"': '"',
"'": "'",
}
while i < length:
def flush_word(force: bool = False):
nonlocal buf
if buf or force:
tokens.append(Token(TokenKind.WORD, buf))
buf = ""
while i < len(script):
c = script[i]
match status:
case PipelineParseStatus.normal:
if c in whitespaces:
_flush_token()
if quote is not None:
if escape:
buf += escape_map.get(c, c)
escape = False
elif c == "\\":
escape = True
elif c == quote:
quote = None
flush_word(force=True) # 引号闭合时强制 flush即使是空字符串
else:
buf += c
i += 1
continue
elif c in "'\"":
status_in_string_pair = c
status = PipelineParseStatus.in_string
current_string_raw = ""
has_token = True
if c in "'\"":
quote = c
i += 1
continue
elif c == "|":
_flush_token()
if err := _flush_command():
return err
# 管道符不结束 group继续在 current_group 添加
if c.isspace():
flush_word()
i += 1
continue
elif c == ";":
_flush_token()
if err := _flush_command():
return err
# 分号结束 group
if current_group:
pipeline.command_groups.append(current_group)
current_group = []
two = script[i : i + 2]
if two in operators:
flush_word()
tokens.append(Token(TokenKind.OP, two))
i += 2
continue
elif c == ">":
_flush_token() # 先结束之前的参数
# 检查是否是 append 模式 (>>)
if i + 1 < length and script[i + 1] == ">":
current_redirect_append = True
i += 1 # 跳过下一个 >
else:
current_redirect_append = False
if c in {"|", ";", ">", "!"}:
flush_word()
tokens.append(Token(TokenKind.OP, c))
i += 1
continue
# 标记下一个 token 为文件名
is_parsing_redirect_filename = True
else:
current_string += c
has_token = True
case PipelineParseStatus.in_string:
current_string_raw += c
if c == status_in_string_pair:
status = PipelineParseStatus.off_string
elif c == "\\":
status = PipelineParseStatus.in_string_to_escape
else:
current_string += c
case PipelineParseStatus.in_string_to_escape:
escape_map = {
"n": "\n",
"r": "\r",
"t": "\t",
"0": "\0",
"a": "\a",
"b": "\b",
"f": "\f",
"v": "\v",
"\\": "\\",
}
current_string += escape_map.get(c, c)
status = PipelineParseStatus.in_string
case PipelineParseStatus.off_string:
if c in whitespaces:
_flush_token()
status = PipelineParseStatus.normal
elif c == "|":
_flush_token()
if err := _flush_command():
return err
status = PipelineParseStatus.normal
elif c == ";":
_flush_token()
if err := _flush_command():
return err
if current_group:
pipeline.command_groups.append(current_group)
current_group = []
status = PipelineParseStatus.normal
elif c == ">":
_flush_token()
status = PipelineParseStatus.normal
# 回退索引,让下一次循环进入 normal 状态的 > 处理逻辑
i -= 1
else:
# 紧接着的字符继续作为当前字符串的一部分 (如 "abc"d)
current_string += c
current_string_raw = ""
status = PipelineParseStatus.normal
if c == "\\":
if i + 1 < len(script):
i += 1
buf += escape_map.get(script[i], script[i])
else:
buf += c
i += 1
continue
buf += c
i += 1
# 循环结束后的收尾
_flush_token()
if err := _flush_command():
return err
if quote is not None:
return "存在未闭合的引号"
if escape:
buf += "\\"
if current_group:
pipeline.command_groups.append(current_group)
flush_word()
return tokens
return pipeline
def parse_pipeline(self, script: str) -> Script | str:
tokens = self.tokenize(script)
if isinstance(tokens, str):
return tokens
if not tokens:
return Script()
pos = 0
def peek(offset: int = 0) -> Token | None:
idx = pos + offset
return tokens[idx] if idx < len(tokens) else None
def consume() -> Token:
nonlocal pos
tok = tokens[pos]
pos += 1
return tok
def consume_if_op(value: str) -> bool:
tok = peek()
if tok is not None and tok.kind == TokenKind.OP and tok.value == value:
consume()
return True
return False
def consume_if_word(value: str) -> bool:
tok = peek()
if tok is not None and tok.kind == TokenKind.WORD and tok.value == value:
consume()
return True
return False
def expect_word(msg: str) -> Token | str:
tok = peek()
if tok is None or tok.kind != TokenKind.WORD:
return msg
return consume()
def parse_command() -> CommandNode | str:
first = expect_word("缺少指令名")
if isinstance(first, str):
return first
handler = self._resolve_handler(first.value)
if isinstance(handler, str):
return handler
args: list[str] = []
redirects: list[Redirect] = []
while True:
tok = peek()
if tok is None:
break
if tok.kind == TokenKind.OP and tok.value in {"|", ";", "&&", "||"}:
break
if tok.kind == TokenKind.OP and tok.value in {">", ">>"}:
op_tok = consume()
target = expect_word("重定向操作符后面需要缓存名")
if isinstance(target, str):
return target
redirects.append(
Redirect(target=target.value, append=op_tok.value == ">>")
)
continue
if tok.kind != TokenKind.WORD:
return f"无法解析的 token: {tok.value}"
args.append(consume().value)
return CommandNode(
name=first.value,
handler=handler,
args=args,
redirects=redirects,
)
def parse_pipe() -> PipelineNode | str:
negate = False
while consume_if_op("!"):
negate = not negate
pipeline = PipelineNode(negate=negate)
command = parse_command()
if isinstance(command, str):
return command
pipeline.commands.append(command)
while True:
tok = peek()
if tok is None or tok.kind != TokenKind.OP or tok.value != "|":
break
consume()
next_command = parse_command()
if isinstance(next_command, str):
return next_command
pipeline.commands.append(next_command)
return pipeline
def parse_chain() -> CommandGroup | str:
group = CommandGroup()
first_pipeline = parse_pipe()
if isinstance(first_pipeline, str):
return first_pipeline
group.chains.append(ConditionalPipeline(op=None, pipeline=first_pipeline))
while True:
tok = peek()
if tok is None or tok.kind != TokenKind.OP or tok.value not in {"&&", "||"}:
break
op = consume().value
next_pipeline = parse_pipe()
if isinstance(next_pipeline, str):
return next_pipeline
group.chains.append(ConditionalPipeline(op=op, pipeline=next_pipeline))
return group
def parse_if() -> IfNode | str:
if not consume_if_word("if"):
return "缺少 if"
condition = parse_chain()
if isinstance(condition, str):
return condition
consume_if_op(";")
if not consume_if_word("then"):
return "if 语句缺少 then"
then_body = parse_script(stop_words={"else", "fi"})
if isinstance(then_body, str):
return then_body
else_body: Script | None = None
if consume_if_word("else"):
else_body = parse_script(stop_words={"fi"})
if isinstance(else_body, str):
return else_body
if not consume_if_word("fi"):
return "if 语句缺少 fi"
return IfNode(condition=condition, then_body=then_body, else_body=else_body)
def parse_while() -> WhileNode | str:
if not consume_if_word("while"):
return "缺少 while"
condition = parse_chain()
if isinstance(condition, str):
return condition
consume_if_op(";")
if not consume_if_word("do"):
return "while 语句缺少 do"
body = parse_script(stop_words={"done"})
if isinstance(body, str):
return body
if not consume_if_word("done"):
return "while 语句缺少 done"
return WhileNode(condition=condition, body=body)
def parse_statement() -> CommandGroup | IfNode | WhileNode | str:
tok = peek()
if tok is not None and tok.kind == TokenKind.WORD:
if tok.value == "if":
return parse_if()
if tok.value == "while":
return parse_while()
return parse_chain()
def parse_script(stop_words: set[str] | None = None) -> Script | str:
parsed = Script()
nonlocal pos
while pos < len(tokens):
tok = peek()
if tok is None:
break
if stop_words and tok.kind == TokenKind.WORD and tok.value in stop_words:
break
if tok.kind == TokenKind.OP and tok.value == ";":
consume()
continue
statement = parse_statement()
if isinstance(statement, str):
return statement
parsed.statements.append(statement)
tok = peek()
if tok is not None and tok.kind == TokenKind.OP and tok.value == ";":
consume()
return parsed
parsed = parse_script()
if isinstance(parsed, str):
return parsed
if pos != len(tokens):
tok = tokens[pos]
return f"无法解析的 token: {tok.value}"
return parsed
async def _execute_command(
self,
command: CommandNode,
istream: str | None,
env: TextHandlerEnvironment,
) -> TextHandleResult:
logger.debug(
f"Executing: {command.name} args={command.args} redirects={command.redirects}"
)
result = await command.handler.handle(env, istream, command.args)
if result.code != 0:
return result
if command.redirects:
content = result.ostream or ""
for redirect in command.redirects:
if redirect.append:
old_content = env.buffers.get(redirect.target, "")
env.buffers[redirect.target] = old_content + content
else:
env.buffers[redirect.target] = content
return TextHandleResult(code=0, ostream=None, attachment=result.attachment)
return result
async def _execute_pipeline(
self,
pipeline: PipelineNode,
istream: str | None,
env: TextHandlerEnvironment,
) -> TextHandleResult:
current_stream = istream
last_result = TextHandleResult(code=0, ostream=None)
for command in pipeline.commands:
try:
last_result = await self._execute_command(command, current_stream, env)
except Exception as e:
logger.error(f"Pipeline execution failed at {command.name}")
logger.exception(e)
return TextHandleResult(code=-1, ostream="处理流水线时出现 python 错误")
if last_result.code != 0:
if pipeline.negate:
return TextHandleResult(code=0, ostream=None)
return last_result
current_stream = last_result.ostream
if pipeline.negate:
return TextHandleResult(code=1, ostream=None)
return last_result
async def _execute_group(
self,
group: CommandGroup,
istream: str | None,
env: TextHandlerEnvironment,
) -> TextHandleResult:
last_result = TextHandleResult(code=0, ostream=None)
for chain in group.chains:
should_run = True
if chain.op == "&&":
should_run = last_result.code == 0
elif chain.op == "||":
should_run = last_result.code != 0
if should_run:
last_result = await self._execute_pipeline(chain.pipeline, istream, env)
return last_result
async def _execute_if(
self,
if_node: IfNode,
istream: str | None,
env: TextHandlerEnvironment,
) -> TextHandleResult:
condition_result = await self._execute_group(if_node.condition, istream, env)
if condition_result.code == 0:
results = await self.run_pipeline(if_node.then_body, istream, env)
else:
results = (
await self.run_pipeline(if_node.else_body, istream, env)
if if_node.else_body is not None
else [TextHandleResult(code=0, ostream=None)]
)
return results[-1] if results else TextHandleResult(code=0, ostream=None)
async def _execute_while(
self,
while_node: WhileNode,
istream: str | None,
env: TextHandlerEnvironment,
) -> TextHandleResult:
last_result = TextHandleResult(code=0, ostream=None)
for _ in range(MAX_WHILE_ITERATIONS):
condition_result = await self._execute_group(while_node.condition, istream, env)
if condition_result.code != 0:
return last_result
body_results = await self.run_pipeline(while_node.body, istream, env)
if body_results:
last_result = body_results[-1]
if last_result.code != 0:
return last_result
return TextHandleResult(
code=2,
ostream=f"while 循环超过最大迭代次数限制({MAX_WHILE_ITERATIONS}",
)
async def run_pipeline(
self,
pipeline: Pipeline,
pipeline: Script,
istream: str | None,
env: TextHandlerEnvironment | None = None,
) -> list[TextHandleResult]:
@ -293,54 +563,21 @@ class PipelineRunner:
results: list[TextHandleResult] = []
# 遍历执行指令组 (分号分隔),每个组独立产生输出
for group in pipeline.command_groups:
current_stream = istream
group_result = TextHandleResult(code=0, ostream=None)
# 遍历组内指令 (管道分隔)
for cmd in group:
try:
logger.debug(
f"Executing: {cmd.handler.name} args={cmd.args} redirect={cmd.redirect_target}"
)
result = await cmd.handler.handle(env, current_stream, cmd.args)
if result.code != 0:
# 组内出错,整条流水线中止
results.append(result)
return results
# 处理重定向逻辑
if cmd.redirect_target:
content_to_write = result.ostream or ""
target_buffer = cmd.redirect_target
if cmd.redirect_append:
old_content = env.buffers.get(target_buffer, "")
env.buffers[target_buffer] = old_content + content_to_write
else:
env.buffers[target_buffer] = content_to_write
current_stream = None
group_result = TextHandleResult(
code=0, ostream=None, attachment=result.attachment
)
else:
current_stream = result.ostream
group_result = result
except Exception as e:
logger.error(f"Pipeline execution failed at {cmd.handler.name}")
logger.exception(e)
results.append(
TextHandleResult(
code=-1, ostream="处理流水线时出现 python 错误"
)
)
return results
results.append(group_result)
for statement in pipeline.statements:
try:
if isinstance(statement, IfNode):
results.append(await self._execute_if(statement, istream, env))
elif isinstance(statement, WhileNode):
results.append(await self._execute_while(statement, istream, env))
else:
results.append(await self._execute_group(statement, istream, env))
except Exception as e:
logger.error(f"Pipeline execution failed: {e}")
logger.exception(e)
results.append(
TextHandleResult(code=-1, ostream="处理流水线时出现 python 错误")
)
return results
return results

View File

@ -13,10 +13,8 @@ class THEcho(TextHandler):
async def handle(
self, env: TextHandlerEnvironment, istream: str | None, args: list[str]
) -> TextHandleResult:
if len(args) == 0 and istream is None:
return TextHandleResult(1, "请在 echo 后面添加需要输出的文本")
if istream is not None:
return TextHandleResult(0, "\n".join([istream] + args))
# echo 不读 stdin只输出参数Unix 语义)
# 无参数时输出空行(与 Unix echo 行为一致)
return TextHandleResult(0, "\n".join(args))
@ -26,7 +24,6 @@ class THCat(TextHandler):
async def handle(
self, env: TextHandlerEnvironment, istream: str | None, args: list[str]
) -> TextHandleResult:
# No args: pass through stdin (like Unix cat with no arguments)
if len(args) == 0:
if istream is None:
return TextHandleResult(
@ -35,7 +32,6 @@ class THCat(TextHandler):
)
return TextHandleResult(0, istream)
# Concatenate all specified sources in order
parts: list[str] = []
for arg in args:
if arg == "-":
@ -74,7 +70,6 @@ class THReplace(TextHandler):
async def handle(
self, env: TextHandlerEnvironment, istream: str | None, args: list[str]
) -> TextHandleResult:
# 用法: replace <pattern> <replacement> [text]
if len(args) < 2:
return TextHandleResult(1, "用法replace <正则> <替换内容> [文本]")
@ -90,3 +85,77 @@ class THReplace(TextHandler):
return TextHandleResult(0, res)
except Exception as e:
return TextHandleResult(1, f"正则错误: {str(e)}")
class THTrue(TextHandler):
name = "true"
async def handle(
self, env: TextHandlerEnvironment, istream: str | None, args: list[str]
) -> TextHandleResult:
return TextHandleResult(0, istream)
class THFalse(TextHandler):
name = "false"
async def handle(
self, env: TextHandlerEnvironment, istream: str | None, args: list[str]
) -> TextHandleResult:
return TextHandleResult(1, None)
class THTest(TextHandler):
name = "test"
keywords = ["["]
def _bool_result(self, value: bool) -> TextHandleResult:
return TextHandleResult(0 if value else 1, None)
async def handle(
self, env: TextHandlerEnvironment, istream: str | None, args: list[str]
) -> TextHandleResult:
expr = list(args)
# 支持方括号语法:[ expr ] 会自动移除末尾的 ]
if expr and expr[-1] == "]":
expr = expr[:-1]
if not expr:
return TextHandleResult(1, None)
if len(expr) == 1:
return self._bool_result(len(expr[0]) > 0)
if len(expr) == 2:
op, value = expr
if op == "-n":
return self._bool_result(len(value) > 0)
if op == "-z":
return self._bool_result(len(value) == 0)
return TextHandleResult(2, f"test 不支持的表达式: {' '.join(args)}")
if len(expr) == 3:
left, op, right = expr
if op == "=":
return self._bool_result(left == right)
if op == "!=":
return self._bool_result(left != right)
if op in {"-eq", "-ne", "-gt", "-ge", "-lt", "-le"}:
try:
li = int(left)
ri = int(right)
except ValueError:
return TextHandleResult(2, "test 的数字比较参数必须是整数")
mapping = {
"-eq": li == ri,
"-ne": li != ri,
"-gt": li > ri,
"-ge": li >= ri,
"-lt": li < ri,
"-le": li <= ri,
}
return self._bool_result(mapping[op])
return TextHandleResult(2, f"test 不支持的操作符: {op}")
return TextHandleResult(2, f"test 不支持的表达式: {' '.join(args)}")

View File

@ -0,0 +1,210 @@
import copy
import re
from pathlib import Path
import nonebot
from nonebot import on_command
from nonebot.adapters import Bot, Event, Message
from nonebot.log import logger
from nonebot.message import handle_event
from nonebot.params import CommandArg
from konabot.common.database import DatabaseManager
from konabot.common.longtask import DepLongTaskTarget
ROOT_PATH = Path(__file__).resolve().parent
cmd = on_command(cmd="语法糖", aliases={"", "sugar"}, block=True)
db_manager = DatabaseManager()
driver = nonebot.get_driver()
@driver.on_startup
async def register_startup_hook():
await init_db()
@driver.on_shutdown
async def register_shutdown_hook():
await db_manager.close_all_connections()
async def init_db():
await db_manager.execute_by_sql_file(ROOT_PATH / "sql" / "create_table.sql")
table_info = await db_manager.query("PRAGMA table_info(syntactic_sugar)")
columns = {str(row.get("name")) for row in table_info}
if "channel_id" not in columns:
await db_manager.execute(
"ALTER TABLE syntactic_sugar ADD COLUMN channel_id VARCHAR(255) NOT NULL DEFAULT ''"
)
await db_manager.execute("DROP INDEX IF EXISTS idx_syntactic_sugar_name_belong_to")
await db_manager.execute(
"CREATE UNIQUE INDEX IF NOT EXISTS idx_syntactic_sugar_name_channel_target "
"ON syntactic_sugar(name, channel_id, belong_to)"
)
def _extract_reply_plain_text(evt: Event) -> str:
reply = getattr(evt, "reply", None)
if reply is None:
return ""
reply_message = getattr(reply, "message", None)
if reply_message is None:
return ""
extract_plain_text = getattr(reply_message, "extract_plain_text", None)
if callable(extract_plain_text):
return extract_plain_text().strip()
return str(reply_message).strip()
def _split_variables(tokens: list[str]) -> tuple[list[str], dict[str, str]]:
positional: list[str] = []
named: dict[str, str] = {}
for token in tokens:
if "=" in token:
key, value = token.split("=", 1)
key = key.strip()
if key:
named[key] = value
continue
positional.append(token)
return positional, named
def _render_template(content: str, positional: list[str], named: dict[str, str]) -> str:
def replace(match: re.Match[str]) -> str:
key = match.group(1).strip()
if key.isdigit():
idx = int(key) - 1
if 0 <= idx < len(positional):
return positional[idx]
return match.group(0)
return named.get(key, match.group(0))
return re.sub(r"\{([^{}]+)\}", replace, content)
async def _store_sugar(name: str, content: str, belong_to: str, channel_id: str):
await db_manager.execute_by_sql_file(
ROOT_PATH / "sql" / "insert_sugar.sql",
(name, content, belong_to, channel_id),
)
async def _delete_sugar(name: str, belong_to: str, channel_id: str):
await db_manager.execute(
"DELETE FROM syntactic_sugar WHERE name = ? AND belong_to = ? AND channel_id = ?",
(name, belong_to, channel_id),
)
async def _find_sugar(name: str, belong_to: str, channel_id: str) -> str | None:
rows = await db_manager.query(
(
"SELECT content FROM syntactic_sugar "
"WHERE name = ? AND channel_id = ? "
"ORDER BY CASE WHEN belong_to = ? THEN 0 ELSE 1 END, id ASC "
"LIMIT 1"
),
(name, channel_id, belong_to),
)
if not rows:
return None
return rows[0].get("content")
async def _reinject_command(bot: Bot, evt: Event, command_text: str) -> bool:
depth = int(getattr(evt, "_syntactic_sugar_depth", 0))
if depth >= 3:
return False
try:
cloned_evt = copy.deepcopy(evt)
except Exception:
logger.exception("语法糖克隆事件失败")
return False
message = getattr(cloned_evt, "message", None)
if message is None:
return False
try:
msg_obj = type(message)(command_text)
except Exception:
msg_obj = command_text
setattr(cloned_evt, "message", msg_obj)
if hasattr(cloned_evt, "original_message"):
setattr(cloned_evt, "original_message", msg_obj)
if hasattr(cloned_evt, "raw_message"):
setattr(cloned_evt, "raw_message", command_text)
setattr(cloned_evt, "_syntactic_sugar_depth", depth + 1)
try:
await handle_event(bot, cloned_evt)
except Exception:
logger.exception("语法糖回注事件失败")
return False
return True
@cmd.handle()
async def _(bot: Bot, evt: Event, target: DepLongTaskTarget, args: Message = CommandArg()):
raw = args.extract_plain_text().strip()
if not raw:
return
tokens = raw.split()
action = tokens[0]
target_id = target.target_id
channel_id = target.channel_id
if action == "存入":
if len(tokens) < 2:
await cmd.finish("请提供要存入的名称")
name = tokens[1].strip()
content = " ".join(tokens[2:]).strip()
if not content:
content = _extract_reply_plain_text(evt)
if not content:
await cmd.finish("请提供要存入的内容")
await _store_sugar(name, content, target_id, channel_id)
await cmd.finish(f"糖已存入:「{name}」!")
if action == "删除":
if len(tokens) < 2:
await cmd.finish("请提供要删除的名称")
name = tokens[1].strip()
await _delete_sugar(name, target_id, channel_id)
await cmd.finish(f"已删除糖:「{name}」!")
if action == "查看":
if len(tokens) < 2:
await cmd.finish("请提供要查看的名称")
name = tokens[1].strip()
content = await _find_sugar(name, target_id, channel_id)
if content is None:
await cmd.finish(f"没有糖:「{name}")
await cmd.finish(f"糖的内容:「{content}")
name = action
content = await _find_sugar(name, target_id, channel_id)
if content is None:
await cmd.finish(f"没有糖:「{name}")
positional, named = _split_variables(tokens[1:])
rendered = _render_template(content, positional, named)
ok = await _reinject_command(bot, evt, rendered)
if not ok:
await cmd.finish(f"糖的展开结果:「{rendered}")

View File

@ -0,0 +1,12 @@
-- 创建语法糖表
CREATE TABLE IF NOT EXISTS syntactic_sugar (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(255) NOT NULL,
content TEXT NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
belong_to VARCHAR(255) NOT NULL,
channel_id VARCHAR(255) NOT NULL DEFAULT ''
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_syntactic_sugar_name_channel_target
ON syntactic_sugar(name, channel_id, belong_to);

View File

@ -0,0 +1,5 @@
-- 插入语法糖,如果同一用户下名称已存在则更新内容
INSERT INTO syntactic_sugar (name, content, belong_to, channel_id)
VALUES (?, ?, ?, ?)
ON CONFLICT(name, channel_id, belong_to) DO UPDATE SET
content = excluded.content;