Compare commits
22 Commits
0055ea2df7
...
2706263a4b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2706263a4b | ||
|
|
ee34f13307 | ||
|
|
65a4cf59c2 | ||
|
|
a7a56b5058 | ||
|
|
8c21432291 | ||
|
|
87b3f79ae9 | ||
|
|
f42aad5093 | ||
|
|
725f60fba3 | ||
|
|
be83907394 | ||
|
|
eba48a0f1a | ||
|
|
ee1a9e7cce | ||
|
|
fc06be6f7a | ||
|
|
883b513b91 | ||
|
|
24cebaf4ec | ||
|
|
858b5f69b0 | ||
|
|
63c61e6204 | ||
|
|
82aac97980 | ||
|
|
045cdb15d8 | ||
|
|
e78e8b0909 | ||
|
|
07974a26d0 | ||
|
|
3e56c074cc | ||
|
|
72dbe856d2 |
27
Dockerfile
27
Dockerfile
@@ -3,39 +3,36 @@
|
||||
# - 如何构建: 先修改 `config.py`, 然后 `docker build -t gpt-academic . `
|
||||
# - 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic `
|
||||
# - 如何运行(其他操作系统,选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic `
|
||||
FROM python:3.11
|
||||
|
||||
FROM ghcr.io/astral-sh/uv:python3.12-bookworm
|
||||
|
||||
# 非必要步骤,更换pip源 (以下三行,可以删除)
|
||||
RUN echo '[global]' > /etc/pip.conf && \
|
||||
echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||
echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||
|
||||
|
||||
# 语音输出功能(以下两行,第一行更换阿里源,第二行安装ffmpeg,都可以删除)
|
||||
RUN UBUNTU_VERSION=$(awk -F= '/^VERSION_CODENAME=/{print $2}' /etc/os-release); echo "deb https://mirrors.aliyun.com/debian/ $UBUNTU_VERSION main non-free contrib" > /etc/apt/sources.list; apt-get update
|
||||
# 语音输出功能(以下1,2行更换阿里源,第3,4行安装ffmpeg,都可以删除)
|
||||
RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
|
||||
sed -i 's/security.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list.d/debian.sources && \
|
||||
apt-get update
|
||||
RUN apt-get install ffmpeg -y
|
||||
RUN apt-get clean
|
||||
|
||||
|
||||
# 进入工作路径(必要)
|
||||
WORKDIR /gpt
|
||||
|
||||
|
||||
# 安装大部分依赖,利用Docker缓存加速以后的构建 (以下两行,可以删除)
|
||||
COPY requirements.txt ./
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
RUN uv venv --python=3.12 && uv pip install --verbose -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
ENV PATH="/gpt/.venv/bin:$PATH"
|
||||
RUN python -c 'import loguru'
|
||||
|
||||
# 装载项目文件,安装剩余依赖(必要)
|
||||
COPY . .
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
|
||||
# 非必要步骤,用于预热模块(可以删除)
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
RUN python3 -m pip cache purge
|
||||
RUN uv venv --python=3.12 && uv pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
||||
|
||||
# # 非必要步骤,用于预热模块(可以删除)
|
||||
RUN python -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
# 启动(必要)
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
CMD ["bash", "-c", "python main.py"]
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
> [!IMPORTANT]
|
||||
> `master主分支`最新动态(2025.7.31): 新GUI前端,Coming Soon
|
||||
> `master主分支`最新动态(2025.3.2): 修复大量代码typo / 联网组件支持Jina的api / 增加deepseek-r1支持
|
||||
> `frontier开发分支`最新动态(2024.12.9): 更新对话时间线功能,优化xelatex论文翻译
|
||||
> `wiki文档`最新动态(2024.12.5): 更新ollama接入指南
|
||||
|
||||
26
config.py
26
config.py
@@ -7,11 +7,11 @@
|
||||
Configuration reading priority: environment variable > config_private.py > config.py
|
||||
"""
|
||||
|
||||
# [step 1-1]>> ( 接入GPT等模型 ) API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项
|
||||
# [step 1-1]>> ( 接入OpenAI模型家族 ) API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项
|
||||
API_KEY = "在此处填写APIKEY" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4"
|
||||
|
||||
# [step 1-2]>> ( 接入通义 qwen-max ) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/
|
||||
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY
|
||||
# [step 1-2]>> ( 强烈推荐!接入通义家族 & 大模型服务平台百炼 ) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/
|
||||
DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY(用于接入qwen-max,dashscope-qwen3-14b,dashscope-deepseek-r1等)
|
||||
|
||||
# [step 1-3]>> ( 接入 deepseek-reasoner, 即 deepseek-r1 ) 深度求索(DeepSeek) API KEY,默认请求地址为"https://api.deepseek.com/v1/chat/completions"
|
||||
DEEPSEEK_API_KEY = ""
|
||||
@@ -43,7 +43,10 @@ AVAIL_LLM_MODELS = ["qwen-max", "o1-mini", "o1-mini-2024-09-12", "o1", "o1-2024-
|
||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
|
||||
"gemini-1.5-pro", "chatglm3", "chatglm4",
|
||||
"deepseek-chat", "deepseek-coder", "deepseek-reasoner"
|
||||
"deepseek-chat", "deepseek-coder", "deepseek-reasoner",
|
||||
"volcengine-deepseek-r1-250120", "volcengine-deepseek-v3-241226",
|
||||
"dashscope-deepseek-r1", "dashscope-deepseek-v3",
|
||||
"dashscope-qwen3-14b", "dashscope-qwen3-235b-a22b", "dashscope-qwen3-32b",
|
||||
]
|
||||
|
||||
EMBEDDING_MODEL = "text-embedding-3-small"
|
||||
@@ -267,6 +270,10 @@ MOONSHOT_API_KEY = ""
|
||||
YIMODEL_API_KEY = ""
|
||||
|
||||
|
||||
# 接入火山引擎的在线大模型),api-key获取地址 https://console.volcengine.com/ark/region:ark+cn-beijing/endpoint
|
||||
ARK_API_KEY = "00000000-0000-0000-0000-000000000000" # 火山引擎 API KEY
|
||||
|
||||
|
||||
# 紫东太初大模型 https://ai-maas.wair.ac.cn
|
||||
TAICHU_API_KEY = ""
|
||||
|
||||
@@ -347,6 +354,17 @@ DAAS_SERVER_URLS = [ f"https://niuziniu-biligpt{i}.hf.space/stream" for i in ran
|
||||
# 在互联网搜索组件中,负责将搜索结果整理成干净的Markdown
|
||||
JINA_API_KEY = ""
|
||||
|
||||
|
||||
# 是否自动裁剪上下文长度(是否启动,默认不启动)
|
||||
AUTO_CONTEXT_CLIP_ENABLE = False
|
||||
# 目标裁剪上下文的token长度(如果超过这个长度,则会自动裁剪)
|
||||
AUTO_CONTEXT_CLIP_TRIGGER_TOKEN_LEN = 30*1000
|
||||
# 无条件丢弃x以上的轮数
|
||||
AUTO_CONTEXT_MAX_ROUND = 64
|
||||
# 在裁剪上下文时,倒数第x次对话能“最多”保留的上下文token的比例占 AUTO_CONTEXT_CLIP_TRIGGER_TOKEN_LEN 的多少
|
||||
AUTO_CONTEXT_MAX_CLIP_RATIO = [0.80, 0.60, 0.45, 0.25, 0.20, 0.18, 0.16, 0.14, 0.12, 0.10, 0.08, 0.07, 0.06, 0.05, 0.04, 0.03, 0.02, 0.01]
|
||||
|
||||
|
||||
"""
|
||||
--------------- 配置关联关系说明 ---------------
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ AVAIL_FONTS = [
|
||||
|
||||
|
||||
# 默认的系统提示词(system prompt)
|
||||
INIT_SYS_PROMPT = "Serve me as a writing and programming assistant."
|
||||
INIT_SYS_PROMPT = " "
|
||||
|
||||
|
||||
# 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效)
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder, get_user
|
||||
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
|
||||
import re
|
||||
from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder, get_user, update_ui_latest_msg
|
||||
from crazy_functions.plugin_template.plugin_class_template import GptAcademicPluginTemplate, ArgProperty
|
||||
from loguru import logger
|
||||
|
||||
f_prefix = 'GPT-Academic对话存档'
|
||||
|
||||
def write_chat_to_file(chatbot, history=None, file_name=None):
|
||||
def write_chat_to_file_legacy(chatbot, history=None, file_name=None):
|
||||
"""
|
||||
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
||||
"""
|
||||
@@ -12,6 +13,9 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
|
||||
import time
|
||||
from themes.theme import advanced_css
|
||||
|
||||
if (file_name is not None) and (file_name != "") and (not file_name.endswith('.html')): file_name += '.html'
|
||||
else: file_name = None
|
||||
|
||||
if file_name is None:
|
||||
file_name = f_prefix + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
|
||||
fp = os.path.join(get_log_folder(get_user(chatbot), plugin_name='chat_history'), file_name)
|
||||
@@ -68,6 +72,147 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
|
||||
promote_file_to_downloadzone(fp, rename_file=file_name, chatbot=chatbot)
|
||||
return '对话历史写入:' + fp
|
||||
|
||||
def write_chat_to_file(chatbot, history=None, file_name=None):
|
||||
"""
|
||||
将对话记录history以多种格式(HTML、Word、Markdown)写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
||||
|
||||
Args:
|
||||
chatbot: 聊天机器人对象,包含对话内容
|
||||
history: 对话历史记录
|
||||
file_name: 指定的文件名,如果为None则使用时间戳
|
||||
|
||||
Returns:
|
||||
str: 提示信息,包含文件保存路径
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
import asyncio
|
||||
import aiofiles
|
||||
from toolbox import promote_file_to_downloadzone
|
||||
from crazy_functions.doc_fns.conversation_doc.excel_doc import save_chat_tables
|
||||
from crazy_functions.doc_fns.conversation_doc.html_doc import HtmlFormatter
|
||||
from crazy_functions.doc_fns.conversation_doc.markdown_doc import MarkdownFormatter
|
||||
from crazy_functions.doc_fns.conversation_doc.word_doc import WordFormatter
|
||||
from crazy_functions.doc_fns.conversation_doc.txt_doc import TxtFormatter
|
||||
from crazy_functions.doc_fns.conversation_doc.word2pdf import WordToPdfConverter
|
||||
|
||||
async def save_html():
|
||||
try:
|
||||
html_formatter = HtmlFormatter(chatbot, history)
|
||||
html_content = html_formatter.create_document()
|
||||
html_file = os.path.join(save_dir, base_name + '.html')
|
||||
async with aiofiles.open(html_file, 'w', encoding='utf8') as f:
|
||||
await f.write(html_content)
|
||||
return html_file
|
||||
except Exception as e:
|
||||
print(f"保存HTML格式失败: {str(e)}")
|
||||
return None
|
||||
|
||||
async def save_word():
|
||||
try:
|
||||
word_formatter = WordFormatter()
|
||||
doc = word_formatter.create_document(history)
|
||||
docx_file = os.path.join(save_dir, base_name + '.docx')
|
||||
# 由于python-docx不支持异步,使用线程池执行
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, doc.save, docx_file)
|
||||
return docx_file
|
||||
except Exception as e:
|
||||
print(f"保存Word格式失败: {str(e)}")
|
||||
return None
|
||||
async def save_pdf(docx_file):
|
||||
try:
|
||||
if docx_file:
|
||||
# 获取文件名和保存路径
|
||||
pdf_file = os.path.join(save_dir, base_name + '.pdf')
|
||||
|
||||
# 在线程池中执行转换
|
||||
loop = asyncio.get_event_loop()
|
||||
pdf_file = await loop.run_in_executor(
|
||||
None,
|
||||
WordToPdfConverter.convert_to_pdf,
|
||||
docx_file
|
||||
# save_dir
|
||||
)
|
||||
|
||||
return pdf_file
|
||||
|
||||
except Exception as e:
|
||||
print(f"保存PDF格式失败: {str(e)}")
|
||||
return None
|
||||
|
||||
async def save_markdown():
|
||||
try:
|
||||
md_formatter = MarkdownFormatter()
|
||||
md_content = md_formatter.create_document(history)
|
||||
md_file = os.path.join(save_dir, base_name + '.md')
|
||||
async with aiofiles.open(md_file, 'w', encoding='utf8') as f:
|
||||
await f.write(md_content)
|
||||
return md_file
|
||||
except Exception as e:
|
||||
print(f"保存Markdown格式失败: {str(e)}")
|
||||
return None
|
||||
|
||||
async def save_txt():
|
||||
try:
|
||||
txt_formatter = TxtFormatter()
|
||||
txt_content = txt_formatter.create_document(history)
|
||||
txt_file = os.path.join(save_dir, base_name + '.txt')
|
||||
async with aiofiles.open(txt_file, 'w', encoding='utf8') as f:
|
||||
await f.write(txt_content)
|
||||
return txt_file
|
||||
except Exception as e:
|
||||
print(f"保存TXT格式失败: {str(e)}")
|
||||
return None
|
||||
|
||||
async def main():
|
||||
# 并发执行所有保存任务
|
||||
html_task = asyncio.create_task(save_html())
|
||||
word_task = asyncio.create_task(save_word())
|
||||
md_task = asyncio.create_task(save_markdown())
|
||||
txt_task = asyncio.create_task(save_txt())
|
||||
|
||||
# 等待所有任务完成
|
||||
html_file = await html_task
|
||||
docx_file = await word_task
|
||||
md_file = await md_task
|
||||
txt_file = await txt_task
|
||||
|
||||
# PDF转换需要等待word文件生成完成
|
||||
pdf_file = await save_pdf(docx_file)
|
||||
# 收集所有成功生成的文件
|
||||
result_files = [f for f in [html_file, docx_file, md_file, txt_file, pdf_file] if f]
|
||||
|
||||
# 保存Excel表格
|
||||
excel_files = save_chat_tables(history, save_dir, base_name)
|
||||
result_files.extend(excel_files)
|
||||
|
||||
return result_files
|
||||
|
||||
# 生成时间戳
|
||||
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||
|
||||
# 获取保存目录
|
||||
save_dir = get_log_folder(get_user(chatbot), plugin_name='chat_history')
|
||||
|
||||
# 处理文件名
|
||||
base_name = file_name if file_name else f"聊天记录_{timestamp}"
|
||||
|
||||
# 运行异步任务
|
||||
result_files = asyncio.run(main())
|
||||
|
||||
# 将生成的文件添加到下载区
|
||||
for file in result_files:
|
||||
promote_file_to_downloadzone(file, rename_file=os.path.basename(file), chatbot=chatbot)
|
||||
|
||||
# 如果没有成功保存任何文件,返回错误信息
|
||||
if not result_files:
|
||||
return "保存对话记录失败,请检查错误日志"
|
||||
|
||||
ext_list = [os.path.splitext(f)[1] for f in result_files]
|
||||
# 返回成功信息和文件路径
|
||||
return f"对话历史已保存至以下格式文件:" + "、".join(ext_list)
|
||||
|
||||
def gen_file_preview(file_name):
|
||||
try:
|
||||
with open(file_name, 'r', encoding='utf8') as f:
|
||||
@@ -119,12 +264,21 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
||||
user_request 当前用户的请求信息(IP地址等)
|
||||
"""
|
||||
file_name = plugin_kwargs.get("file_name", None)
|
||||
if (file_name is not None) and (file_name != "") and (not file_name.endswith('.html')): file_name += '.html'
|
||||
else: file_name = None
|
||||
|
||||
chatbot.append((None, f"[Local Message] {write_chat_to_file(chatbot, history, file_name)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话。"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
chatbot.append((None, f"[Local Message] {write_chat_to_file_legacy(chatbot, history, file_name)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话。"))
|
||||
try:
|
||||
chatbot.append((None, f"[Local Message] 正在尝试生成pdf以及word格式的对话存档,请稍等..."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求需要一段时间,我们先及时地做一次界面更新
|
||||
lastmsg = f"[Local Message] {write_chat_to_file(chatbot, history, file_name)}。" \
|
||||
f"您可以调用下拉菜单中的“载入对话历史会话”还原当下的对话,请注意,目前只支持html格式载入历史。" \
|
||||
f"当模型回答中存在表格,将提取表格内容存储为Excel的xlsx格式,如果你提供一些数据,然后输入指令要求模型帮你整理为表格" \
|
||||
f"(如“请帮我将下面的数据整理为表格:”),再利用此插件就可以获取到Excel表格。"
|
||||
yield from update_ui_latest_msg(lastmsg, chatbot, history) # 刷新界面 # 由于请求需要一段时间,我们先及时地做一次界面更新
|
||||
except Exception as e:
|
||||
logger.exception(f"已完成对话存档(pdf和word格式的对话存档生成未成功)。{str(e)}")
|
||||
lastmsg = "已完成对话存档(pdf和word格式的对话存档生成未成功)。"
|
||||
yield from update_ui_latest_msg(lastmsg, chatbot, history) # 刷新界面 # 由于请求需要一段时间,我们先及时地做一次界面更新
|
||||
return
|
||||
|
||||
class Conversation_To_File_Wrap(GptAcademicPluginTemplate):
|
||||
def __init__(self):
|
||||
|
||||
812
crazy_functions/doc_fns/AI_review_doc.py
Normal file
812
crazy_functions/doc_fns/AI_review_doc.py
Normal file
@@ -0,0 +1,812 @@
|
||||
import os
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
from docx import Document
|
||||
from docx.enum.style import WD_STYLE_TYPE
|
||||
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT, WD_LINE_SPACING
|
||||
from docx.oxml.ns import qn
|
||||
from docx.shared import Inches, Cm
|
||||
from docx.shared import Pt, RGBColor, Inches
|
||||
from typing import Dict, List, Tuple
|
||||
import markdown
|
||||
from crazy_functions.doc_fns.conversation_doc.word_doc import convert_markdown_to_word
|
||||
|
||||
|
||||
|
||||
class DocumentFormatter(ABC):
|
||||
"""文档格式化基类,定义文档格式化的基本接口"""
|
||||
|
||||
def __init__(self, final_summary: str, file_summaries_map: Dict, failed_files: List[Tuple]):
|
||||
self.final_summary = final_summary
|
||||
self.file_summaries_map = file_summaries_map
|
||||
self.failed_files = failed_files
|
||||
|
||||
@abstractmethod
|
||||
def format_failed_files(self) -> str:
|
||||
"""格式化失败文件列表"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def format_file_summaries(self) -> str:
|
||||
"""格式化文件总结内容"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def create_document(self) -> str:
|
||||
"""创建完整文档"""
|
||||
pass
|
||||
|
||||
|
||||
class WordFormatter(DocumentFormatter):
|
||||
"""Word格式文档生成器 - 符合中国政府公文格式规范(GB/T 9704-2012),并进行了优化"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.doc = Document()
|
||||
self._setup_document()
|
||||
self._create_styles()
|
||||
# 初始化三级标题编号系统
|
||||
self.numbers = {
|
||||
1: 0, # 一级标题编号
|
||||
2: 0, # 二级标题编号
|
||||
3: 0 # 三级标题编号
|
||||
}
|
||||
|
||||
def _setup_document(self):
|
||||
"""设置文档基本格式,包括页面设置和页眉"""
|
||||
sections = self.doc.sections
|
||||
for section in sections:
|
||||
# 设置页面大小为A4
|
||||
section.page_width = Cm(21)
|
||||
section.page_height = Cm(29.7)
|
||||
# 设置页边距
|
||||
section.top_margin = Cm(3.7) # 上边距37mm
|
||||
section.bottom_margin = Cm(3.5) # 下边距35mm
|
||||
section.left_margin = Cm(2.8) # 左边距28mm
|
||||
section.right_margin = Cm(2.6) # 右边距26mm
|
||||
# 设置页眉页脚距离
|
||||
section.header_distance = Cm(2.0)
|
||||
section.footer_distance = Cm(2.0)
|
||||
|
||||
# 添加页眉
|
||||
header = section.header
|
||||
header_para = header.paragraphs[0]
|
||||
header_para.alignment = WD_PARAGRAPH_ALIGNMENT.RIGHT
|
||||
header_run = header_para.add_run("该文档由GPT-academic生成")
|
||||
header_run.font.name = '仿宋'
|
||||
header_run._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
||||
header_run.font.size = Pt(9)
|
||||
|
||||
def _create_styles(self):
|
||||
"""创建文档样式"""
|
||||
# 创建正文样式
|
||||
style = self.doc.styles.add_style('Normal_Custom', WD_STYLE_TYPE.PARAGRAPH)
|
||||
style.font.name = '仿宋'
|
||||
style._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
||||
style.font.size = Pt(14)
|
||||
style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
||||
style.paragraph_format.space_after = Pt(0)
|
||||
style.paragraph_format.first_line_indent = Pt(28)
|
||||
|
||||
# 创建各级标题样式
|
||||
self._create_heading_style('Title_Custom', '方正小标宋简体', 32, WD_PARAGRAPH_ALIGNMENT.CENTER)
|
||||
self._create_heading_style('Heading1_Custom', '黑体', 22, WD_PARAGRAPH_ALIGNMENT.LEFT)
|
||||
self._create_heading_style('Heading2_Custom', '黑体', 18, WD_PARAGRAPH_ALIGNMENT.LEFT)
|
||||
self._create_heading_style('Heading3_Custom', '黑体', 16, WD_PARAGRAPH_ALIGNMENT.LEFT)
|
||||
|
||||
def _create_heading_style(self, style_name: str, font_name: str, font_size: int, alignment):
|
||||
"""创建标题样式"""
|
||||
style = self.doc.styles.add_style(style_name, WD_STYLE_TYPE.PARAGRAPH)
|
||||
style.font.name = font_name
|
||||
style._element.rPr.rFonts.set(qn('w:eastAsia'), font_name)
|
||||
style.font.size = Pt(font_size)
|
||||
style.font.bold = True
|
||||
style.paragraph_format.alignment = alignment
|
||||
style.paragraph_format.space_before = Pt(12)
|
||||
style.paragraph_format.space_after = Pt(12)
|
||||
style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
||||
return style
|
||||
|
||||
def _get_heading_number(self, level: int) -> str:
|
||||
"""
|
||||
生成标题编号
|
||||
|
||||
Args:
|
||||
level: 标题级别 (0-3)
|
||||
|
||||
Returns:
|
||||
str: 格式化的标题编号
|
||||
"""
|
||||
if level == 0: # 主标题不需要编号
|
||||
return ""
|
||||
|
||||
self.numbers[level] += 1 # 增加当前级别的编号
|
||||
|
||||
# 重置下级标题编号
|
||||
for i in range(level + 1, 4):
|
||||
self.numbers[i] = 0
|
||||
|
||||
# 根据级别返回不同格式的编号
|
||||
if level == 1:
|
||||
return f"{self.numbers[1]}. "
|
||||
elif level == 2:
|
||||
return f"{self.numbers[1]}.{self.numbers[2]} "
|
||||
elif level == 3:
|
||||
return f"{self.numbers[1]}.{self.numbers[2]}.{self.numbers[3]} "
|
||||
return ""
|
||||
|
||||
def _add_heading(self, text: str, level: int):
|
||||
"""
|
||||
添加带编号的标题
|
||||
|
||||
Args:
|
||||
text: 标题文本
|
||||
level: 标题级别 (0-3)
|
||||
"""
|
||||
style_map = {
|
||||
0: 'Title_Custom',
|
||||
1: 'Heading1_Custom',
|
||||
2: 'Heading2_Custom',
|
||||
3: 'Heading3_Custom'
|
||||
}
|
||||
|
||||
number = self._get_heading_number(level)
|
||||
paragraph = self.doc.add_paragraph(style=style_map[level])
|
||||
|
||||
if number:
|
||||
number_run = paragraph.add_run(number)
|
||||
font_size = 22 if level == 1 else (18 if level == 2 else 16)
|
||||
self._get_run_style(number_run, '黑体', font_size, True)
|
||||
|
||||
text_run = paragraph.add_run(text)
|
||||
font_size = 32 if level == 0 else (22 if level == 1 else (18 if level == 2 else 16))
|
||||
self._get_run_style(text_run, '黑体', font_size, True)
|
||||
|
||||
# 主标题添加日期
|
||||
if level == 0:
|
||||
date_paragraph = self.doc.add_paragraph()
|
||||
date_paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
|
||||
date_run = date_paragraph.add_run(datetime.now().strftime('%Y年%m月%d日'))
|
||||
self._get_run_style(date_run, '仿宋', 16, False)
|
||||
|
||||
return paragraph
|
||||
|
||||
def _get_run_style(self, run, font_name: str, font_size: int, bold: bool = False):
|
||||
"""设置文本运行对象的样式"""
|
||||
run.font.name = font_name
|
||||
run._element.rPr.rFonts.set(qn('w:eastAsia'), font_name)
|
||||
run.font.size = Pt(font_size)
|
||||
run.font.bold = bold
|
||||
|
||||
def format_failed_files(self) -> str:
|
||||
"""格式化失败文件列表"""
|
||||
result = []
|
||||
if not self.failed_files:
|
||||
return "\n".join(result)
|
||||
|
||||
result.append("处理失败文件:")
|
||||
for fp, reason in self.failed_files:
|
||||
result.append(f"• {os.path.basename(fp)}: {reason}")
|
||||
|
||||
self._add_heading("处理失败文件", 1)
|
||||
for fp, reason in self.failed_files:
|
||||
self._add_content(f"• {os.path.basename(fp)}: {reason}", indent=False)
|
||||
self.doc.add_paragraph()
|
||||
|
||||
return "\n".join(result)
|
||||
|
||||
def _add_content(self, text: str, indent: bool = True):
|
||||
"""添加正文内容,使用convert_markdown_to_word处理文本"""
|
||||
# 使用convert_markdown_to_word处理markdown文本
|
||||
processed_text = convert_markdown_to_word(text)
|
||||
paragraph = self.doc.add_paragraph(processed_text, style='Normal_Custom')
|
||||
if not indent:
|
||||
paragraph.paragraph_format.first_line_indent = Pt(0)
|
||||
return paragraph
|
||||
|
||||
def format_file_summaries(self) -> str:
|
||||
"""
|
||||
格式化文件总结内容,确保正确的标题层级并处理markdown文本
|
||||
"""
|
||||
result = []
|
||||
# 首先对文件路径进行分组整理
|
||||
file_groups = {}
|
||||
for path in sorted(self.file_summaries_map.keys()):
|
||||
dir_path = os.path.dirname(path)
|
||||
if dir_path not in file_groups:
|
||||
file_groups[dir_path] = []
|
||||
file_groups[dir_path].append(path)
|
||||
|
||||
# 处理没有目录的文件
|
||||
root_files = file_groups.get("", [])
|
||||
if root_files:
|
||||
for path in sorted(root_files):
|
||||
file_name = os.path.basename(path)
|
||||
result.append(f"\n📄 {file_name}")
|
||||
result.append(self.file_summaries_map[path])
|
||||
# 无目录的文件作为二级标题
|
||||
self._add_heading(f"📄 {file_name}", 2)
|
||||
# 使用convert_markdown_to_word处理文件内容
|
||||
self._add_content(convert_markdown_to_word(self.file_summaries_map[path]))
|
||||
self.doc.add_paragraph()
|
||||
|
||||
# 处理有目录的文件
|
||||
for dir_path in sorted(file_groups.keys()):
|
||||
if dir_path == "": # 跳过已处理的根目录文件
|
||||
continue
|
||||
|
||||
# 添加目录作为二级标题
|
||||
result.append(f"\n📁 {dir_path}")
|
||||
self._add_heading(f"📁 {dir_path}", 2)
|
||||
|
||||
# 该目录下的所有文件作为三级标题
|
||||
for path in sorted(file_groups[dir_path]):
|
||||
file_name = os.path.basename(path)
|
||||
result.append(f"\n📄 {file_name}")
|
||||
result.append(self.file_summaries_map[path])
|
||||
|
||||
# 添加文件名作为三级标题
|
||||
self._add_heading(f"📄 {file_name}", 3)
|
||||
# 使用convert_markdown_to_word处理文件内容
|
||||
self._add_content(convert_markdown_to_word(self.file_summaries_map[path]))
|
||||
self.doc.add_paragraph()
|
||||
|
||||
return "\n".join(result)
|
||||
|
||||
|
||||
def create_document(self):
|
||||
"""创建完整Word文档并返回文档对象"""
|
||||
# 重置所有编号
|
||||
for level in self.numbers:
|
||||
self.numbers[level] = 0
|
||||
|
||||
# 添加主标题
|
||||
self._add_heading("文档总结报告", 0)
|
||||
self.doc.add_paragraph()
|
||||
|
||||
# 添加总体摘要,使用convert_markdown_to_word处理
|
||||
self._add_heading("总体摘要", 1)
|
||||
self._add_content(convert_markdown_to_word(self.final_summary))
|
||||
self.doc.add_paragraph()
|
||||
|
||||
# 添加失败文件列表(如果有)
|
||||
if self.failed_files:
|
||||
self.format_failed_files()
|
||||
|
||||
# 添加文件详细总结
|
||||
self._add_heading("各文件详细总结", 1)
|
||||
self.format_file_summaries()
|
||||
|
||||
return self.doc
|
||||
|
||||
def save_as_pdf(self, word_path, pdf_path=None):
|
||||
"""将生成的Word文档转换为PDF
|
||||
|
||||
参数:
|
||||
word_path: Word文档的路径
|
||||
pdf_path: 可选,PDF文件的输出路径。如果未指定,将使用与Word文档相同的名称和位置
|
||||
|
||||
返回:
|
||||
生成的PDF文件路径,如果转换失败则返回None
|
||||
"""
|
||||
from crazy_functions.doc_fns.conversation_doc.word2pdf import WordToPdfConverter
|
||||
try:
|
||||
pdf_path = WordToPdfConverter.convert_to_pdf(word_path, pdf_path)
|
||||
return pdf_path
|
||||
except Exception as e:
|
||||
print(f"PDF转换失败: {str(e)}")
|
||||
return None
|
||||
|
||||
|
||||
class MarkdownFormatter(DocumentFormatter):
|
||||
"""Markdown格式文档生成器"""
|
||||
|
||||
def format_failed_files(self) -> str:
|
||||
if not self.failed_files:
|
||||
return ""
|
||||
|
||||
formatted_text = ["\n## ⚠️ 处理失败的文件"]
|
||||
for fp, reason in self.failed_files:
|
||||
formatted_text.append(f"- {os.path.basename(fp)}: {reason}")
|
||||
formatted_text.append("\n---")
|
||||
return "\n".join(formatted_text)
|
||||
|
||||
def format_file_summaries(self) -> str:
|
||||
formatted_text = []
|
||||
sorted_paths = sorted(self.file_summaries_map.keys())
|
||||
current_dir = ""
|
||||
|
||||
for path in sorted_paths:
|
||||
dir_path = os.path.dirname(path)
|
||||
if dir_path != current_dir:
|
||||
if dir_path:
|
||||
formatted_text.append(f"\n## 📁 {dir_path}")
|
||||
current_dir = dir_path
|
||||
|
||||
file_name = os.path.basename(path)
|
||||
formatted_text.append(f"\n### 📄 {file_name}")
|
||||
formatted_text.append(self.file_summaries_map[path])
|
||||
formatted_text.append("\n---")
|
||||
|
||||
return "\n".join(formatted_text)
|
||||
|
||||
def create_document(self) -> str:
|
||||
document = [
|
||||
"# 📑 文档总结报告",
|
||||
"\n## 总体摘要",
|
||||
self.final_summary
|
||||
]
|
||||
|
||||
if self.failed_files:
|
||||
document.append(self.format_failed_files())
|
||||
|
||||
document.extend([
|
||||
"\n# 📚 各文件详细总结",
|
||||
self.format_file_summaries()
|
||||
])
|
||||
|
||||
return "\n".join(document)
|
||||
|
||||
|
||||
|
||||
class HtmlFormatter(DocumentFormatter):
|
||||
"""HTML格式文档生成器 - 优化版"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.md = markdown.Markdown(extensions=['extra','codehilite', 'tables','nl2br'])
|
||||
self.css_styles = """
|
||||
@keyframes fadeIn {
|
||||
from { opacity: 0; transform: translateY(20px); }
|
||||
to { opacity: 1; transform: translateY(0); }
|
||||
}
|
||||
|
||||
@keyframes slideIn {
|
||||
from { transform: translateX(-20px); opacity: 0; }
|
||||
to { transform: translateX(0); opacity: 1; }
|
||||
}
|
||||
|
||||
@keyframes pulse {
|
||||
0% { transform: scale(1); }
|
||||
50% { transform: scale(1.05); }
|
||||
100% { transform: scale(1); }
|
||||
}
|
||||
|
||||
:root {
|
||||
/* Enhanced color palette */
|
||||
--primary-color: #2563eb;
|
||||
--primary-light: #eff6ff;
|
||||
--secondary-color: #1e293b;
|
||||
--background-color: #f8fafc;
|
||||
--text-color: #334155;
|
||||
--text-light: #64748b;
|
||||
--border-color: #e2e8f0;
|
||||
--error-color: #ef4444;
|
||||
--error-light: #fef2f2;
|
||||
--success-color: #22c55e;
|
||||
--warning-color: #f59e0b;
|
||||
--card-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);
|
||||
--hover-shadow: 0 20px 25px -5px rgb(0 0 0 / 0.1), 0 8px 10px -6px rgb(0 0 0 / 0.1);
|
||||
|
||||
/* Typography */
|
||||
--heading-font: "Plus Jakarta Sans", system-ui, sans-serif;
|
||||
--body-font: "Inter", system-ui, sans-serif;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: var(--body-font);
|
||||
line-height: 1.8;
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
padding: 2rem;
|
||||
color: var(--text-color);
|
||||
background-color: var(--background-color);
|
||||
font-size: 16px;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
}
|
||||
|
||||
.container {
|
||||
background: white;
|
||||
padding: 3rem;
|
||||
border-radius: 24px;
|
||||
box-shadow: var(--card-shadow);
|
||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
animation: fadeIn 0.6s ease-out;
|
||||
border: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
.container:hover {
|
||||
box-shadow: var(--hover-shadow);
|
||||
transform: translateY(-2px);
|
||||
}
|
||||
|
||||
h1, h2, h3 {
|
||||
font-family: var(--heading-font);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
h1 {
|
||||
color: var(--primary-color);
|
||||
font-size: 2.8em;
|
||||
text-align: center;
|
||||
margin: 2rem 0 3rem;
|
||||
padding-bottom: 1.5rem;
|
||||
border-bottom: 3px solid var(--primary-color);
|
||||
letter-spacing: -0.03em;
|
||||
position: relative;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 1rem;
|
||||
}
|
||||
|
||||
h1::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
bottom: -3px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
width: 120px;
|
||||
height: 3px;
|
||||
background: linear-gradient(90deg, var(--primary-color), var(--primary-light));
|
||||
border-radius: 3px;
|
||||
transition: width 0.3s ease;
|
||||
}
|
||||
|
||||
h1:hover::after {
|
||||
width: 180px;
|
||||
}
|
||||
|
||||
h2 {
|
||||
color: var(--secondary-color);
|
||||
font-size: 1.9em;
|
||||
margin: 2.5rem 0 1.5rem;
|
||||
padding-left: 1.2rem;
|
||||
border-left: 4px solid var(--primary-color);
|
||||
letter-spacing: -0.02em;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1rem;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
h2:hover {
|
||||
color: var(--primary-color);
|
||||
transform: translateX(5px);
|
||||
}
|
||||
|
||||
h3 {
|
||||
color: var(--text-color);
|
||||
font-size: 1.5em;
|
||||
margin: 2rem 0 1rem;
|
||||
padding-bottom: 0.8rem;
|
||||
border-bottom: 2px solid var(--border-color);
|
||||
transition: all 0.3s ease;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.8rem;
|
||||
}
|
||||
|
||||
h3:hover {
|
||||
color: var(--primary-color);
|
||||
border-bottom-color: var(--primary-color);
|
||||
}
|
||||
|
||||
.summary {
|
||||
background: var(--primary-light);
|
||||
padding: 2.5rem;
|
||||
border-radius: 16px;
|
||||
margin: 2.5rem 0;
|
||||
box-shadow: 0 4px 6px -1px rgba(37, 99, 235, 0.1);
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
||||
animation: slideIn 0.5s ease-out;
|
||||
}
|
||||
|
||||
.summary:hover {
|
||||
transform: translateY(-3px);
|
||||
box-shadow: 0 8px 12px -2px rgba(37, 99, 235, 0.15);
|
||||
}
|
||||
|
||||
.summary::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 4px;
|
||||
height: 100%;
|
||||
background: linear-gradient(to bottom, var(--primary-color), rgba(37, 99, 235, 0.6));
|
||||
}
|
||||
|
||||
.summary p {
|
||||
margin: 1.2rem 0;
|
||||
line-height: 1.9;
|
||||
color: var(--text-color);
|
||||
transition: color 0.3s ease;
|
||||
}
|
||||
|
||||
.summary:hover p {
|
||||
color: var(--secondary-color);
|
||||
}
|
||||
|
||||
.details {
|
||||
margin-top: 3.5rem;
|
||||
padding-top: 2.5rem;
|
||||
border-top: 2px dashed var(--border-color);
|
||||
animation: fadeIn 0.8s ease-out;
|
||||
}
|
||||
|
||||
.failed-files {
|
||||
background: var(--error-light);
|
||||
padding: 2rem;
|
||||
border-radius: 16px;
|
||||
margin: 3rem 0;
|
||||
border-left: 4px solid var(--error-color);
|
||||
position: relative;
|
||||
transition: all 0.3s ease;
|
||||
animation: slideIn 0.5s ease-out;
|
||||
}
|
||||
|
||||
.failed-files:hover {
|
||||
transform: translateX(5px);
|
||||
box-shadow: 0 8px 15px -3px rgba(239, 68, 68, 0.1);
|
||||
}
|
||||
|
||||
.failed-files h2 {
|
||||
color: var(--error-color);
|
||||
border-left: none;
|
||||
padding-left: 0;
|
||||
}
|
||||
|
||||
.failed-files ul {
|
||||
margin: 1.8rem 0;
|
||||
padding-left: 1.2rem;
|
||||
list-style-type: none;
|
||||
}
|
||||
|
||||
.failed-files li {
|
||||
margin: 1.2rem 0;
|
||||
padding: 1.2rem 1.8rem;
|
||||
background: rgba(239, 68, 68, 0.08);
|
||||
border-radius: 12px;
|
||||
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
}
|
||||
|
||||
.failed-files li:hover {
|
||||
transform: translateX(8px);
|
||||
background: rgba(239, 68, 68, 0.12);
|
||||
}
|
||||
|
||||
.directory-section {
|
||||
margin: 3.5rem 0;
|
||||
padding: 2rem;
|
||||
background: var(--background-color);
|
||||
border-radius: 16px;
|
||||
position: relative;
|
||||
transition: all 0.3s ease;
|
||||
animation: fadeIn 0.6s ease-out;
|
||||
}
|
||||
|
||||
.directory-section:hover {
|
||||
background: white;
|
||||
box-shadow: var(--card-shadow);
|
||||
}
|
||||
|
||||
.file-summary {
|
||||
background: white;
|
||||
padding: 2rem;
|
||||
margin: 1.8rem 0;
|
||||
border-radius: 16px;
|
||||
box-shadow: var(--card-shadow);
|
||||
border-left: 4px solid var(--border-color);
|
||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.file-summary:hover {
|
||||
border-left-color: var(--primary-color);
|
||||
transform: translateX(8px) translateY(-2px);
|
||||
box-shadow: var(--hover-shadow);
|
||||
}
|
||||
|
||||
.file-summary {
|
||||
background: white;
|
||||
padding: 2rem;
|
||||
margin: 1.8rem 0;
|
||||
border-radius: 16px;
|
||||
box-shadow: var(--card-shadow);
|
||||
border-left: 4px solid var(--border-color);
|
||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.file-summary:hover {
|
||||
border-left-color: var(--primary-color);
|
||||
transform: translateX(8px) translateY(-2px);
|
||||
box-shadow: var(--hover-shadow);
|
||||
}
|
||||
|
||||
.icon {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 8px;
|
||||
background: var(--primary-light);
|
||||
color: var(--primary-color);
|
||||
font-size: 1.2em;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
.file-summary:hover .icon,
|
||||
.directory-section:hover .icon {
|
||||
transform: scale(1.1);
|
||||
background: var(--primary-color);
|
||||
color: white;
|
||||
}
|
||||
|
||||
/* Smooth scrolling */
|
||||
html {
|
||||
scroll-behavior: smooth;
|
||||
}
|
||||
|
||||
/* Selection style */
|
||||
::selection {
|
||||
background: var(--primary-light);
|
||||
color: var(--primary-color);
|
||||
}
|
||||
|
||||
/* Print styles */
|
||||
@media print {
|
||||
body {
|
||||
background: white;
|
||||
}
|
||||
.container {
|
||||
box-shadow: none;
|
||||
padding: 0;
|
||||
}
|
||||
.file-summary, .failed-files {
|
||||
break-inside: avoid;
|
||||
box-shadow: none;
|
||||
}
|
||||
.icon {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
/* Responsive design */
|
||||
@media (max-width: 768px) {
|
||||
body {
|
||||
padding: 1rem;
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
.container {
|
||||
padding: 1.5rem;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 2.2em;
|
||||
margin: 1.5rem 0 2rem;
|
||||
}
|
||||
|
||||
h2 {
|
||||
font-size: 1.7em;
|
||||
}
|
||||
|
||||
h3 {
|
||||
font-size: 1.4em;
|
||||
}
|
||||
|
||||
.summary, .failed-files, .directory-section {
|
||||
padding: 1.5rem;
|
||||
}
|
||||
|
||||
.file-summary {
|
||||
padding: 1.2rem;
|
||||
}
|
||||
|
||||
.icon {
|
||||
width: 28px;
|
||||
height: 28px;
|
||||
}
|
||||
}
|
||||
|
||||
/* Dark mode support */
|
||||
@media (prefers-color-scheme: dark) {
|
||||
:root {
|
||||
--primary-light: rgba(37, 99, 235, 0.15);
|
||||
--background-color: #0f172a;
|
||||
--text-color: #e2e8f0;
|
||||
--text-light: #94a3b8;
|
||||
--border-color: #1e293b;
|
||||
--error-light: rgba(239, 68, 68, 0.15);
|
||||
}
|
||||
|
||||
.container, .file-summary {
|
||||
background: #1e293b;
|
||||
}
|
||||
|
||||
.directory-section {
|
||||
background: #0f172a;
|
||||
}
|
||||
|
||||
.directory-section:hover {
|
||||
background: #1e293b;
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def format_failed_files(self) -> str:
|
||||
if not self.failed_files:
|
||||
return ""
|
||||
|
||||
failed_files_html = ['<div class="failed-files">']
|
||||
failed_files_html.append('<h2><span class="icon">⚠️</span> 处理失败的文件</h2>')
|
||||
failed_files_html.append("<ul>")
|
||||
for fp, reason in self.failed_files:
|
||||
failed_files_html.append(
|
||||
f'<li><strong>📄 {os.path.basename(fp)}</strong><br><span style="color: var(--text-light)">{reason}</span></li>'
|
||||
)
|
||||
failed_files_html.append("</ul></div>")
|
||||
return "\n".join(failed_files_html)
|
||||
|
||||
def format_file_summaries(self) -> str:
|
||||
formatted_html = []
|
||||
sorted_paths = sorted(self.file_summaries_map.keys())
|
||||
current_dir = ""
|
||||
|
||||
for path in sorted_paths:
|
||||
dir_path = os.path.dirname(path)
|
||||
if dir_path != current_dir:
|
||||
if dir_path:
|
||||
formatted_html.append('<div class="directory-section">')
|
||||
formatted_html.append(f'<h2><span class="icon">📁</span> {dir_path}</h2>')
|
||||
formatted_html.append('</div>')
|
||||
current_dir = dir_path
|
||||
|
||||
file_name = os.path.basename(path)
|
||||
formatted_html.append('<div class="file-summary">')
|
||||
formatted_html.append(f'<h3><span class="icon">📄</span> {file_name}</h3>')
|
||||
formatted_html.append(self.md.convert(self.file_summaries_map[path]))
|
||||
formatted_html.append('</div>')
|
||||
|
||||
return "\n".join(formatted_html)
|
||||
|
||||
def create_document(self) -> str:
|
||||
"""生成HTML文档
|
||||
Returns:
|
||||
str: 完整的HTML文档字符串
|
||||
"""
|
||||
return f"""
|
||||
<!DOCTYPE html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>文档总结报告</title>
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/inter/3.19.3/inter.css" rel="stylesheet">
|
||||
<link href="https://fonts.googleapis.com/css2?family=Plus+Jakarta+Sans:wght@400;600&display=swap" rel="stylesheet">
|
||||
<style>{self.css_styles}</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1><span class="icon">📑</span> 文档总结报告</h1>
|
||||
<div class="summary">
|
||||
<h2><span class="icon">📋</span> 总体摘要</h2>
|
||||
<p>{self.md.convert(self.final_summary)}</p>
|
||||
</div>
|
||||
{self.format_failed_files()}
|
||||
<div class="details">
|
||||
<h2><span class="icon">📚</span> 各文件详细总结</h2>
|
||||
{self.format_file_summaries()}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
0
crazy_functions/doc_fns/__init__.py
Normal file
0
crazy_functions/doc_fns/__init__.py
Normal file
812
crazy_functions/doc_fns/batch_file_query_doc.py
Normal file
812
crazy_functions/doc_fns/batch_file_query_doc.py
Normal file
@@ -0,0 +1,812 @@
|
||||
import os
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
from docx import Document
|
||||
from docx.enum.style import WD_STYLE_TYPE
|
||||
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT, WD_LINE_SPACING
|
||||
from docx.oxml.ns import qn
|
||||
from docx.shared import Inches, Cm
|
||||
from docx.shared import Pt, RGBColor, Inches
|
||||
from typing import Dict, List, Tuple
|
||||
import markdown
|
||||
from crazy_functions.doc_fns.conversation_doc.word_doc import convert_markdown_to_word
|
||||
|
||||
|
||||
|
||||
class DocumentFormatter(ABC):
|
||||
"""文档格式化基类,定义文档格式化的基本接口"""
|
||||
|
||||
def __init__(self, final_summary: str, file_summaries_map: Dict, failed_files: List[Tuple]):
|
||||
self.final_summary = final_summary
|
||||
self.file_summaries_map = file_summaries_map
|
||||
self.failed_files = failed_files
|
||||
|
||||
@abstractmethod
|
||||
def format_failed_files(self) -> str:
|
||||
"""格式化失败文件列表"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def format_file_summaries(self) -> str:
|
||||
"""格式化文件总结内容"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def create_document(self) -> str:
|
||||
"""创建完整文档"""
|
||||
pass
|
||||
|
||||
|
||||
class WordFormatter(DocumentFormatter):
|
||||
"""Word格式文档生成器 - 符合中国政府公文格式规范(GB/T 9704-2012),并进行了优化"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.doc = Document()
|
||||
self._setup_document()
|
||||
self._create_styles()
|
||||
# 初始化三级标题编号系统
|
||||
self.numbers = {
|
||||
1: 0, # 一级标题编号
|
||||
2: 0, # 二级标题编号
|
||||
3: 0 # 三级标题编号
|
||||
}
|
||||
|
||||
def _setup_document(self):
|
||||
"""设置文档基本格式,包括页面设置和页眉"""
|
||||
sections = self.doc.sections
|
||||
for section in sections:
|
||||
# 设置页面大小为A4
|
||||
section.page_width = Cm(21)
|
||||
section.page_height = Cm(29.7)
|
||||
# 设置页边距
|
||||
section.top_margin = Cm(3.7) # 上边距37mm
|
||||
section.bottom_margin = Cm(3.5) # 下边距35mm
|
||||
section.left_margin = Cm(2.8) # 左边距28mm
|
||||
section.right_margin = Cm(2.6) # 右边距26mm
|
||||
# 设置页眉页脚距离
|
||||
section.header_distance = Cm(2.0)
|
||||
section.footer_distance = Cm(2.0)
|
||||
|
||||
# 添加页眉
|
||||
header = section.header
|
||||
header_para = header.paragraphs[0]
|
||||
header_para.alignment = WD_PARAGRAPH_ALIGNMENT.RIGHT
|
||||
header_run = header_para.add_run("该文档由GPT-academic生成")
|
||||
header_run.font.name = '仿宋'
|
||||
header_run._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
||||
header_run.font.size = Pt(9)
|
||||
|
||||
def _create_styles(self):
|
||||
"""创建文档样式"""
|
||||
# 创建正文样式
|
||||
style = self.doc.styles.add_style('Normal_Custom', WD_STYLE_TYPE.PARAGRAPH)
|
||||
style.font.name = '仿宋'
|
||||
style._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
||||
style.font.size = Pt(14)
|
||||
style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
||||
style.paragraph_format.space_after = Pt(0)
|
||||
style.paragraph_format.first_line_indent = Pt(28)
|
||||
|
||||
# 创建各级标题样式
|
||||
self._create_heading_style('Title_Custom', '方正小标宋简体', 32, WD_PARAGRAPH_ALIGNMENT.CENTER)
|
||||
self._create_heading_style('Heading1_Custom', '黑体', 22, WD_PARAGRAPH_ALIGNMENT.LEFT)
|
||||
self._create_heading_style('Heading2_Custom', '黑体', 18, WD_PARAGRAPH_ALIGNMENT.LEFT)
|
||||
self._create_heading_style('Heading3_Custom', '黑体', 16, WD_PARAGRAPH_ALIGNMENT.LEFT)
|
||||
|
||||
def _create_heading_style(self, style_name: str, font_name: str, font_size: int, alignment):
|
||||
"""创建标题样式"""
|
||||
style = self.doc.styles.add_style(style_name, WD_STYLE_TYPE.PARAGRAPH)
|
||||
style.font.name = font_name
|
||||
style._element.rPr.rFonts.set(qn('w:eastAsia'), font_name)
|
||||
style.font.size = Pt(font_size)
|
||||
style.font.bold = True
|
||||
style.paragraph_format.alignment = alignment
|
||||
style.paragraph_format.space_before = Pt(12)
|
||||
style.paragraph_format.space_after = Pt(12)
|
||||
style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
||||
return style
|
||||
|
||||
def _get_heading_number(self, level: int) -> str:
|
||||
"""
|
||||
生成标题编号
|
||||
|
||||
Args:
|
||||
level: 标题级别 (0-3)
|
||||
|
||||
Returns:
|
||||
str: 格式化的标题编号
|
||||
"""
|
||||
if level == 0: # 主标题不需要编号
|
||||
return ""
|
||||
|
||||
self.numbers[level] += 1 # 增加当前级别的编号
|
||||
|
||||
# 重置下级标题编号
|
||||
for i in range(level + 1, 4):
|
||||
self.numbers[i] = 0
|
||||
|
||||
# 根据级别返回不同格式的编号
|
||||
if level == 1:
|
||||
return f"{self.numbers[1]}. "
|
||||
elif level == 2:
|
||||
return f"{self.numbers[1]}.{self.numbers[2]} "
|
||||
elif level == 3:
|
||||
return f"{self.numbers[1]}.{self.numbers[2]}.{self.numbers[3]} "
|
||||
return ""
|
||||
|
||||
def _add_heading(self, text: str, level: int):
|
||||
"""
|
||||
添加带编号的标题
|
||||
|
||||
Args:
|
||||
text: 标题文本
|
||||
level: 标题级别 (0-3)
|
||||
"""
|
||||
style_map = {
|
||||
0: 'Title_Custom',
|
||||
1: 'Heading1_Custom',
|
||||
2: 'Heading2_Custom',
|
||||
3: 'Heading3_Custom'
|
||||
}
|
||||
|
||||
number = self._get_heading_number(level)
|
||||
paragraph = self.doc.add_paragraph(style=style_map[level])
|
||||
|
||||
if number:
|
||||
number_run = paragraph.add_run(number)
|
||||
font_size = 22 if level == 1 else (18 if level == 2 else 16)
|
||||
self._get_run_style(number_run, '黑体', font_size, True)
|
||||
|
||||
text_run = paragraph.add_run(text)
|
||||
font_size = 32 if level == 0 else (22 if level == 1 else (18 if level == 2 else 16))
|
||||
self._get_run_style(text_run, '黑体', font_size, True)
|
||||
|
||||
# 主标题添加日期
|
||||
if level == 0:
|
||||
date_paragraph = self.doc.add_paragraph()
|
||||
date_paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
|
||||
date_run = date_paragraph.add_run(datetime.now().strftime('%Y年%m月%d日'))
|
||||
self._get_run_style(date_run, '仿宋', 16, False)
|
||||
|
||||
return paragraph
|
||||
|
||||
def _get_run_style(self, run, font_name: str, font_size: int, bold: bool = False):
|
||||
"""设置文本运行对象的样式"""
|
||||
run.font.name = font_name
|
||||
run._element.rPr.rFonts.set(qn('w:eastAsia'), font_name)
|
||||
run.font.size = Pt(font_size)
|
||||
run.font.bold = bold
|
||||
|
||||
def format_failed_files(self) -> str:
|
||||
"""格式化失败文件列表"""
|
||||
result = []
|
||||
if not self.failed_files:
|
||||
return "\n".join(result)
|
||||
|
||||
result.append("处理失败文件:")
|
||||
for fp, reason in self.failed_files:
|
||||
result.append(f"• {os.path.basename(fp)}: {reason}")
|
||||
|
||||
self._add_heading("处理失败文件", 1)
|
||||
for fp, reason in self.failed_files:
|
||||
self._add_content(f"• {os.path.basename(fp)}: {reason}", indent=False)
|
||||
self.doc.add_paragraph()
|
||||
|
||||
return "\n".join(result)
|
||||
|
||||
def _add_content(self, text: str, indent: bool = True):
|
||||
"""添加正文内容,使用convert_markdown_to_word处理文本"""
|
||||
# 使用convert_markdown_to_word处理markdown文本
|
||||
processed_text = convert_markdown_to_word(text)
|
||||
paragraph = self.doc.add_paragraph(processed_text, style='Normal_Custom')
|
||||
if not indent:
|
||||
paragraph.paragraph_format.first_line_indent = Pt(0)
|
||||
return paragraph
|
||||
|
||||
def format_file_summaries(self) -> str:
|
||||
"""
|
||||
格式化文件总结内容,确保正确的标题层级并处理markdown文本
|
||||
"""
|
||||
result = []
|
||||
# 首先对文件路径进行分组整理
|
||||
file_groups = {}
|
||||
for path in sorted(self.file_summaries_map.keys()):
|
||||
dir_path = os.path.dirname(path)
|
||||
if dir_path not in file_groups:
|
||||
file_groups[dir_path] = []
|
||||
file_groups[dir_path].append(path)
|
||||
|
||||
# 处理没有目录的文件
|
||||
root_files = file_groups.get("", [])
|
||||
if root_files:
|
||||
for path in sorted(root_files):
|
||||
file_name = os.path.basename(path)
|
||||
result.append(f"\n📄 {file_name}")
|
||||
result.append(self.file_summaries_map[path])
|
||||
# 无目录的文件作为二级标题
|
||||
self._add_heading(f"📄 {file_name}", 2)
|
||||
# 使用convert_markdown_to_word处理文件内容
|
||||
self._add_content(convert_markdown_to_word(self.file_summaries_map[path]))
|
||||
self.doc.add_paragraph()
|
||||
|
||||
# 处理有目录的文件
|
||||
for dir_path in sorted(file_groups.keys()):
|
||||
if dir_path == "": # 跳过已处理的根目录文件
|
||||
continue
|
||||
|
||||
# 添加目录作为二级标题
|
||||
result.append(f"\n📁 {dir_path}")
|
||||
self._add_heading(f"📁 {dir_path}", 2)
|
||||
|
||||
# 该目录下的所有文件作为三级标题
|
||||
for path in sorted(file_groups[dir_path]):
|
||||
file_name = os.path.basename(path)
|
||||
result.append(f"\n📄 {file_name}")
|
||||
result.append(self.file_summaries_map[path])
|
||||
|
||||
# 添加文件名作为三级标题
|
||||
self._add_heading(f"📄 {file_name}", 3)
|
||||
# 使用convert_markdown_to_word处理文件内容
|
||||
self._add_content(convert_markdown_to_word(self.file_summaries_map[path]))
|
||||
self.doc.add_paragraph()
|
||||
|
||||
return "\n".join(result)
|
||||
|
||||
|
||||
def create_document(self):
|
||||
"""创建完整Word文档并返回文档对象"""
|
||||
# 重置所有编号
|
||||
for level in self.numbers:
|
||||
self.numbers[level] = 0
|
||||
|
||||
# 添加主标题
|
||||
self._add_heading("文档总结报告", 0)
|
||||
self.doc.add_paragraph()
|
||||
|
||||
# 添加总体摘要,使用convert_markdown_to_word处理
|
||||
self._add_heading("总体摘要", 1)
|
||||
self._add_content(convert_markdown_to_word(self.final_summary))
|
||||
self.doc.add_paragraph()
|
||||
|
||||
# 添加失败文件列表(如果有)
|
||||
if self.failed_files:
|
||||
self.format_failed_files()
|
||||
|
||||
# 添加文件详细总结
|
||||
self._add_heading("各文件详细总结", 1)
|
||||
self.format_file_summaries()
|
||||
|
||||
return self.doc
|
||||
|
||||
def save_as_pdf(self, word_path, pdf_path=None):
|
||||
"""将生成的Word文档转换为PDF
|
||||
|
||||
参数:
|
||||
word_path: Word文档的路径
|
||||
pdf_path: 可选,PDF文件的输出路径。如果未指定,将使用与Word文档相同的名称和位置
|
||||
|
||||
返回:
|
||||
生成的PDF文件路径,如果转换失败则返回None
|
||||
"""
|
||||
from crazy_functions.doc_fns.conversation_doc.word2pdf import WordToPdfConverter
|
||||
try:
|
||||
pdf_path = WordToPdfConverter.convert_to_pdf(word_path, pdf_path)
|
||||
return pdf_path
|
||||
except Exception as e:
|
||||
print(f"PDF转换失败: {str(e)}")
|
||||
return None
|
||||
|
||||
|
||||
class MarkdownFormatter(DocumentFormatter):
|
||||
"""Markdown格式文档生成器"""
|
||||
|
||||
def format_failed_files(self) -> str:
|
||||
if not self.failed_files:
|
||||
return ""
|
||||
|
||||
formatted_text = ["\n## ⚠️ 处理失败的文件"]
|
||||
for fp, reason in self.failed_files:
|
||||
formatted_text.append(f"- {os.path.basename(fp)}: {reason}")
|
||||
formatted_text.append("\n---")
|
||||
return "\n".join(formatted_text)
|
||||
|
||||
def format_file_summaries(self) -> str:
|
||||
formatted_text = []
|
||||
sorted_paths = sorted(self.file_summaries_map.keys())
|
||||
current_dir = ""
|
||||
|
||||
for path in sorted_paths:
|
||||
dir_path = os.path.dirname(path)
|
||||
if dir_path != current_dir:
|
||||
if dir_path:
|
||||
formatted_text.append(f"\n## 📁 {dir_path}")
|
||||
current_dir = dir_path
|
||||
|
||||
file_name = os.path.basename(path)
|
||||
formatted_text.append(f"\n### 📄 {file_name}")
|
||||
formatted_text.append(self.file_summaries_map[path])
|
||||
formatted_text.append("\n---")
|
||||
|
||||
return "\n".join(formatted_text)
|
||||
|
||||
def create_document(self) -> str:
|
||||
document = [
|
||||
"# 📑 文档总结报告",
|
||||
"\n## 总体摘要",
|
||||
self.final_summary
|
||||
]
|
||||
|
||||
if self.failed_files:
|
||||
document.append(self.format_failed_files())
|
||||
|
||||
document.extend([
|
||||
"\n# 📚 各文件详细总结",
|
||||
self.format_file_summaries()
|
||||
])
|
||||
|
||||
return "\n".join(document)
|
||||
|
||||
|
||||
|
||||
class HtmlFormatter(DocumentFormatter):
|
||||
"""HTML格式文档生成器 - 优化版"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.md = markdown.Markdown(extensions=['extra','codehilite', 'tables','nl2br'])
|
||||
self.css_styles = """
|
||||
@keyframes fadeIn {
|
||||
from { opacity: 0; transform: translateY(20px); }
|
||||
to { opacity: 1; transform: translateY(0); }
|
||||
}
|
||||
|
||||
@keyframes slideIn {
|
||||
from { transform: translateX(-20px); opacity: 0; }
|
||||
to { transform: translateX(0); opacity: 1; }
|
||||
}
|
||||
|
||||
@keyframes pulse {
|
||||
0% { transform: scale(1); }
|
||||
50% { transform: scale(1.05); }
|
||||
100% { transform: scale(1); }
|
||||
}
|
||||
|
||||
:root {
|
||||
/* Enhanced color palette */
|
||||
--primary-color: #2563eb;
|
||||
--primary-light: #eff6ff;
|
||||
--secondary-color: #1e293b;
|
||||
--background-color: #f8fafc;
|
||||
--text-color: #334155;
|
||||
--text-light: #64748b;
|
||||
--border-color: #e2e8f0;
|
||||
--error-color: #ef4444;
|
||||
--error-light: #fef2f2;
|
||||
--success-color: #22c55e;
|
||||
--warning-color: #f59e0b;
|
||||
--card-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);
|
||||
--hover-shadow: 0 20px 25px -5px rgb(0 0 0 / 0.1), 0 8px 10px -6px rgb(0 0 0 / 0.1);
|
||||
|
||||
/* Typography */
|
||||
--heading-font: "Plus Jakarta Sans", system-ui, sans-serif;
|
||||
--body-font: "Inter", system-ui, sans-serif;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: var(--body-font);
|
||||
line-height: 1.8;
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
padding: 2rem;
|
||||
color: var(--text-color);
|
||||
background-color: var(--background-color);
|
||||
font-size: 16px;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
}
|
||||
|
||||
.container {
|
||||
background: white;
|
||||
padding: 3rem;
|
||||
border-radius: 24px;
|
||||
box-shadow: var(--card-shadow);
|
||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
animation: fadeIn 0.6s ease-out;
|
||||
border: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
.container:hover {
|
||||
box-shadow: var(--hover-shadow);
|
||||
transform: translateY(-2px);
|
||||
}
|
||||
|
||||
h1, h2, h3 {
|
||||
font-family: var(--heading-font);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
h1 {
|
||||
color: var(--primary-color);
|
||||
font-size: 2.8em;
|
||||
text-align: center;
|
||||
margin: 2rem 0 3rem;
|
||||
padding-bottom: 1.5rem;
|
||||
border-bottom: 3px solid var(--primary-color);
|
||||
letter-spacing: -0.03em;
|
||||
position: relative;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 1rem;
|
||||
}
|
||||
|
||||
h1::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
bottom: -3px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
width: 120px;
|
||||
height: 3px;
|
||||
background: linear-gradient(90deg, var(--primary-color), var(--primary-light));
|
||||
border-radius: 3px;
|
||||
transition: width 0.3s ease;
|
||||
}
|
||||
|
||||
h1:hover::after {
|
||||
width: 180px;
|
||||
}
|
||||
|
||||
h2 {
|
||||
color: var(--secondary-color);
|
||||
font-size: 1.9em;
|
||||
margin: 2.5rem 0 1.5rem;
|
||||
padding-left: 1.2rem;
|
||||
border-left: 4px solid var(--primary-color);
|
||||
letter-spacing: -0.02em;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1rem;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
h2:hover {
|
||||
color: var(--primary-color);
|
||||
transform: translateX(5px);
|
||||
}
|
||||
|
||||
h3 {
|
||||
color: var(--text-color);
|
||||
font-size: 1.5em;
|
||||
margin: 2rem 0 1rem;
|
||||
padding-bottom: 0.8rem;
|
||||
border-bottom: 2px solid var(--border-color);
|
||||
transition: all 0.3s ease;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.8rem;
|
||||
}
|
||||
|
||||
h3:hover {
|
||||
color: var(--primary-color);
|
||||
border-bottom-color: var(--primary-color);
|
||||
}
|
||||
|
||||
.summary {
|
||||
background: var(--primary-light);
|
||||
padding: 2.5rem;
|
||||
border-radius: 16px;
|
||||
margin: 2.5rem 0;
|
||||
box-shadow: 0 4px 6px -1px rgba(37, 99, 235, 0.1);
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
||||
animation: slideIn 0.5s ease-out;
|
||||
}
|
||||
|
||||
.summary:hover {
|
||||
transform: translateY(-3px);
|
||||
box-shadow: 0 8px 12px -2px rgba(37, 99, 235, 0.15);
|
||||
}
|
||||
|
||||
.summary::before {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 4px;
|
||||
height: 100%;
|
||||
background: linear-gradient(to bottom, var(--primary-color), rgba(37, 99, 235, 0.6));
|
||||
}
|
||||
|
||||
.summary p {
|
||||
margin: 1.2rem 0;
|
||||
line-height: 1.9;
|
||||
color: var(--text-color);
|
||||
transition: color 0.3s ease;
|
||||
}
|
||||
|
||||
.summary:hover p {
|
||||
color: var(--secondary-color);
|
||||
}
|
||||
|
||||
.details {
|
||||
margin-top: 3.5rem;
|
||||
padding-top: 2.5rem;
|
||||
border-top: 2px dashed var(--border-color);
|
||||
animation: fadeIn 0.8s ease-out;
|
||||
}
|
||||
|
||||
.failed-files {
|
||||
background: var(--error-light);
|
||||
padding: 2rem;
|
||||
border-radius: 16px;
|
||||
margin: 3rem 0;
|
||||
border-left: 4px solid var(--error-color);
|
||||
position: relative;
|
||||
transition: all 0.3s ease;
|
||||
animation: slideIn 0.5s ease-out;
|
||||
}
|
||||
|
||||
.failed-files:hover {
|
||||
transform: translateX(5px);
|
||||
box-shadow: 0 8px 15px -3px rgba(239, 68, 68, 0.1);
|
||||
}
|
||||
|
||||
.failed-files h2 {
|
||||
color: var(--error-color);
|
||||
border-left: none;
|
||||
padding-left: 0;
|
||||
}
|
||||
|
||||
.failed-files ul {
|
||||
margin: 1.8rem 0;
|
||||
padding-left: 1.2rem;
|
||||
list-style-type: none;
|
||||
}
|
||||
|
||||
.failed-files li {
|
||||
margin: 1.2rem 0;
|
||||
padding: 1.2rem 1.8rem;
|
||||
background: rgba(239, 68, 68, 0.08);
|
||||
border-radius: 12px;
|
||||
transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
}
|
||||
|
||||
.failed-files li:hover {
|
||||
transform: translateX(8px);
|
||||
background: rgba(239, 68, 68, 0.12);
|
||||
}
|
||||
|
||||
.directory-section {
|
||||
margin: 3.5rem 0;
|
||||
padding: 2rem;
|
||||
background: var(--background-color);
|
||||
border-radius: 16px;
|
||||
position: relative;
|
||||
transition: all 0.3s ease;
|
||||
animation: fadeIn 0.6s ease-out;
|
||||
}
|
||||
|
||||
.directory-section:hover {
|
||||
background: white;
|
||||
box-shadow: var(--card-shadow);
|
||||
}
|
||||
|
||||
.file-summary {
|
||||
background: white;
|
||||
padding: 2rem;
|
||||
margin: 1.8rem 0;
|
||||
border-radius: 16px;
|
||||
box-shadow: var(--card-shadow);
|
||||
border-left: 4px solid var(--border-color);
|
||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.file-summary:hover {
|
||||
border-left-color: var(--primary-color);
|
||||
transform: translateX(8px) translateY(-2px);
|
||||
box-shadow: var(--hover-shadow);
|
||||
}
|
||||
|
||||
.file-summary {
|
||||
background: white;
|
||||
padding: 2rem;
|
||||
margin: 1.8rem 0;
|
||||
border-radius: 16px;
|
||||
box-shadow: var(--card-shadow);
|
||||
border-left: 4px solid var(--border-color);
|
||||
transition: all 0.4s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.file-summary:hover {
|
||||
border-left-color: var(--primary-color);
|
||||
transform: translateX(8px) translateY(-2px);
|
||||
box-shadow: var(--hover-shadow);
|
||||
}
|
||||
|
||||
.icon {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
border-radius: 8px;
|
||||
background: var(--primary-light);
|
||||
color: var(--primary-color);
|
||||
font-size: 1.2em;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
.file-summary:hover .icon,
|
||||
.directory-section:hover .icon {
|
||||
transform: scale(1.1);
|
||||
background: var(--primary-color);
|
||||
color: white;
|
||||
}
|
||||
|
||||
/* Smooth scrolling */
|
||||
html {
|
||||
scroll-behavior: smooth;
|
||||
}
|
||||
|
||||
/* Selection style */
|
||||
::selection {
|
||||
background: var(--primary-light);
|
||||
color: var(--primary-color);
|
||||
}
|
||||
|
||||
/* Print styles */
|
||||
@media print {
|
||||
body {
|
||||
background: white;
|
||||
}
|
||||
.container {
|
||||
box-shadow: none;
|
||||
padding: 0;
|
||||
}
|
||||
.file-summary, .failed-files {
|
||||
break-inside: avoid;
|
||||
box-shadow: none;
|
||||
}
|
||||
.icon {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
/* Responsive design */
|
||||
@media (max-width: 768px) {
|
||||
body {
|
||||
padding: 1rem;
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
.container {
|
||||
padding: 1.5rem;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 2.2em;
|
||||
margin: 1.5rem 0 2rem;
|
||||
}
|
||||
|
||||
h2 {
|
||||
font-size: 1.7em;
|
||||
}
|
||||
|
||||
h3 {
|
||||
font-size: 1.4em;
|
||||
}
|
||||
|
||||
.summary, .failed-files, .directory-section {
|
||||
padding: 1.5rem;
|
||||
}
|
||||
|
||||
.file-summary {
|
||||
padding: 1.2rem;
|
||||
}
|
||||
|
||||
.icon {
|
||||
width: 28px;
|
||||
height: 28px;
|
||||
}
|
||||
}
|
||||
|
||||
/* Dark mode support */
|
||||
@media (prefers-color-scheme: dark) {
|
||||
:root {
|
||||
--primary-light: rgba(37, 99, 235, 0.15);
|
||||
--background-color: #0f172a;
|
||||
--text-color: #e2e8f0;
|
||||
--text-light: #94a3b8;
|
||||
--border-color: #1e293b;
|
||||
--error-light: rgba(239, 68, 68, 0.15);
|
||||
}
|
||||
|
||||
.container, .file-summary {
|
||||
background: #1e293b;
|
||||
}
|
||||
|
||||
.directory-section {
|
||||
background: #0f172a;
|
||||
}
|
||||
|
||||
.directory-section:hover {
|
||||
background: #1e293b;
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def format_failed_files(self) -> str:
|
||||
if not self.failed_files:
|
||||
return ""
|
||||
|
||||
failed_files_html = ['<div class="failed-files">']
|
||||
failed_files_html.append('<h2><span class="icon">⚠️</span> 处理失败的文件</h2>')
|
||||
failed_files_html.append("<ul>")
|
||||
for fp, reason in self.failed_files:
|
||||
failed_files_html.append(
|
||||
f'<li><strong>📄 {os.path.basename(fp)}</strong><br><span style="color: var(--text-light)">{reason}</span></li>'
|
||||
)
|
||||
failed_files_html.append("</ul></div>")
|
||||
return "\n".join(failed_files_html)
|
||||
|
||||
def format_file_summaries(self) -> str:
|
||||
formatted_html = []
|
||||
sorted_paths = sorted(self.file_summaries_map.keys())
|
||||
current_dir = ""
|
||||
|
||||
for path in sorted_paths:
|
||||
dir_path = os.path.dirname(path)
|
||||
if dir_path != current_dir:
|
||||
if dir_path:
|
||||
formatted_html.append('<div class="directory-section">')
|
||||
formatted_html.append(f'<h2><span class="icon">📁</span> {dir_path}</h2>')
|
||||
formatted_html.append('</div>')
|
||||
current_dir = dir_path
|
||||
|
||||
file_name = os.path.basename(path)
|
||||
formatted_html.append('<div class="file-summary">')
|
||||
formatted_html.append(f'<h3><span class="icon">📄</span> {file_name}</h3>')
|
||||
formatted_html.append(self.md.convert(self.file_summaries_map[path]))
|
||||
formatted_html.append('</div>')
|
||||
|
||||
return "\n".join(formatted_html)
|
||||
|
||||
def create_document(self) -> str:
|
||||
"""生成HTML文档
|
||||
Returns:
|
||||
str: 完整的HTML文档字符串
|
||||
"""
|
||||
return f"""
|
||||
<!DOCTYPE html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>文档总结报告</title>
|
||||
<link href="https://cdnjs.cloudflare.com/ajax/libs/inter/3.19.3/inter.css" rel="stylesheet">
|
||||
<link href="https://fonts.googleapis.com/css2?family=Plus+Jakarta+Sans:wght@400;600&display=swap" rel="stylesheet">
|
||||
<style>{self.css_styles}</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1><span class="icon">📑</span> 文档总结报告</h1>
|
||||
<div class="summary">
|
||||
<h2><span class="icon">📋</span> 总体摘要</h2>
|
||||
<p>{self.md.convert(self.final_summary)}</p>
|
||||
</div>
|
||||
{self.format_failed_files()}
|
||||
<div class="details">
|
||||
<h2><span class="icon">📚</span> 各文件详细总结</h2>
|
||||
{self.format_file_summaries()}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
237
crazy_functions/doc_fns/content_folder.py
Normal file
237
crazy_functions/doc_fns/content_folder.py
Normal file
@@ -0,0 +1,237 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, Optional, Type, TypeVar, Generic, Union
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum, auto
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
# 设置日志
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# 自定义异常类定义
|
||||
class FoldingError(Exception):
|
||||
"""折叠相关的自定义异常基类"""
|
||||
pass
|
||||
|
||||
|
||||
class FormattingError(FoldingError):
|
||||
"""格式化过程中的错误"""
|
||||
pass
|
||||
|
||||
|
||||
class MetadataError(FoldingError):
|
||||
"""元数据相关的错误"""
|
||||
pass
|
||||
|
||||
|
||||
class ValidationError(FoldingError):
|
||||
"""验证错误"""
|
||||
pass
|
||||
|
||||
|
||||
class FoldingStyle(Enum):
|
||||
"""折叠样式枚举"""
|
||||
SIMPLE = auto() # 简单折叠
|
||||
DETAILED = auto() # 详细折叠(带有额外信息)
|
||||
NESTED = auto() # 嵌套折叠
|
||||
|
||||
|
||||
@dataclass
|
||||
class FoldingOptions:
|
||||
"""折叠选项配置"""
|
||||
style: FoldingStyle = FoldingStyle.DETAILED
|
||||
code_language: Optional[str] = None # 代码块的语言
|
||||
show_timestamp: bool = False # 是否显示时间戳
|
||||
indent_level: int = 0 # 缩进级别
|
||||
custom_css: Optional[str] = None # 自定义CSS类
|
||||
|
||||
|
||||
T = TypeVar('T') # 用于泛型类型
|
||||
|
||||
|
||||
class BaseMetadata(ABC):
|
||||
"""元数据基类"""
|
||||
|
||||
@abstractmethod
|
||||
def validate(self) -> bool:
|
||||
"""验证元数据的有效性"""
|
||||
pass
|
||||
|
||||
def _validate_non_empty_str(self, value: Optional[str]) -> bool:
|
||||
"""验证字符串非空"""
|
||||
return bool(value and value.strip())
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileMetadata(BaseMetadata):
|
||||
"""文件元数据"""
|
||||
rel_path: str
|
||||
size: float
|
||||
last_modified: Optional[datetime] = None
|
||||
mime_type: Optional[str] = None
|
||||
encoding: str = 'utf-8'
|
||||
|
||||
def validate(self) -> bool:
|
||||
"""验证文件元数据的有效性"""
|
||||
try:
|
||||
if not self._validate_non_empty_str(self.rel_path):
|
||||
return False
|
||||
if self.size < 0:
|
||||
return False
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"File metadata validation error: {str(e)}")
|
||||
return False
|
||||
|
||||
|
||||
|
||||
|
||||
class ContentFormatter(ABC, Generic[T]):
|
||||
"""内容格式化抽象基类
|
||||
|
||||
支持泛型类型参数,可以指定具体的元数据类型。
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def format(self,
|
||||
content: str,
|
||||
metadata: T,
|
||||
options: Optional[FoldingOptions] = None) -> str:
|
||||
"""格式化内容
|
||||
|
||||
Args:
|
||||
content: 需要格式化的内容
|
||||
metadata: 类型化的元数据
|
||||
options: 折叠选项
|
||||
|
||||
Returns:
|
||||
str: 格式化后的内容
|
||||
|
||||
Raises:
|
||||
FormattingError: 格式化过程中的错误
|
||||
"""
|
||||
pass
|
||||
|
||||
def _create_summary(self, metadata: T) -> str:
|
||||
"""创建折叠摘要,可被子类重写"""
|
||||
return str(metadata)
|
||||
|
||||
def _format_content_block(self,
|
||||
content: str,
|
||||
options: Optional[FoldingOptions]) -> str:
|
||||
"""格式化内容块,处理代码块等特殊格式"""
|
||||
if not options:
|
||||
return content
|
||||
|
||||
if options.code_language:
|
||||
return f"```{options.code_language}\n{content}\n```"
|
||||
return content
|
||||
|
||||
def _add_indent(self, text: str, level: int) -> str:
|
||||
"""添加缩进"""
|
||||
if level <= 0:
|
||||
return text
|
||||
indent = " " * level
|
||||
return "\n".join(indent + line for line in text.splitlines())
|
||||
|
||||
|
||||
class FileContentFormatter(ContentFormatter[FileMetadata]):
|
||||
"""文件内容格式化器"""
|
||||
|
||||
def format(self,
|
||||
content: str,
|
||||
metadata: FileMetadata,
|
||||
options: Optional[FoldingOptions] = None) -> str:
|
||||
"""格式化文件内容"""
|
||||
if not metadata.validate():
|
||||
raise MetadataError("Invalid file metadata")
|
||||
|
||||
try:
|
||||
options = options or FoldingOptions()
|
||||
|
||||
# 构建摘要信息
|
||||
summary_parts = [
|
||||
f"{metadata.rel_path} ({metadata.size:.2f}MB)",
|
||||
f"Type: {metadata.mime_type}" if metadata.mime_type else None,
|
||||
(f"Modified: {metadata.last_modified.strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
if metadata.last_modified and options.show_timestamp else None)
|
||||
]
|
||||
summary = " | ".join(filter(None, summary_parts))
|
||||
|
||||
# 构建HTML类
|
||||
css_class = f' class="{options.custom_css}"' if options.custom_css else ''
|
||||
|
||||
# 格式化内容
|
||||
formatted_content = self._format_content_block(content, options)
|
||||
|
||||
# 组装最终结果
|
||||
result = (
|
||||
f'<details{css_class}><summary>{summary}</summary>\n\n'
|
||||
f'{formatted_content}\n\n'
|
||||
f'</details>\n\n'
|
||||
)
|
||||
|
||||
return self._add_indent(result, options.indent_level)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error formatting file content: {str(e)}")
|
||||
raise FormattingError(f"Failed to format file content: {str(e)}")
|
||||
|
||||
|
||||
class ContentFoldingManager:
|
||||
"""内容折叠管理器"""
|
||||
|
||||
def __init__(self):
|
||||
"""初始化折叠管理器"""
|
||||
self._formatters: Dict[str, ContentFormatter] = {}
|
||||
self._register_default_formatters()
|
||||
|
||||
def _register_default_formatters(self) -> None:
|
||||
"""注册默认的格式化器"""
|
||||
self.register_formatter('file', FileContentFormatter())
|
||||
|
||||
def register_formatter(self, name: str, formatter: ContentFormatter) -> None:
|
||||
"""注册新的格式化器"""
|
||||
if not isinstance(formatter, ContentFormatter):
|
||||
raise TypeError("Formatter must implement ContentFormatter interface")
|
||||
self._formatters[name] = formatter
|
||||
|
||||
def _guess_language(self, extension: str) -> Optional[str]:
|
||||
"""根据文件扩展名猜测编程语言"""
|
||||
extension = extension.lower().lstrip('.')
|
||||
language_map = {
|
||||
'py': 'python',
|
||||
'js': 'javascript',
|
||||
'java': 'java',
|
||||
'cpp': 'cpp',
|
||||
'cs': 'csharp',
|
||||
'html': 'html',
|
||||
'css': 'css',
|
||||
'md': 'markdown',
|
||||
'json': 'json',
|
||||
'xml': 'xml',
|
||||
'sql': 'sql',
|
||||
'sh': 'bash',
|
||||
'yaml': 'yaml',
|
||||
'yml': 'yaml',
|
||||
'txt': None # 纯文本不需要语言标识
|
||||
}
|
||||
return language_map.get(extension)
|
||||
|
||||
def format_content(self,
|
||||
content: str,
|
||||
formatter_type: str,
|
||||
metadata: Union[FileMetadata],
|
||||
options: Optional[FoldingOptions] = None) -> str:
|
||||
"""格式化内容"""
|
||||
formatter = self._formatters.get(formatter_type)
|
||||
if not formatter:
|
||||
raise KeyError(f"No formatter registered for type: {formatter_type}")
|
||||
|
||||
if not isinstance(metadata, FileMetadata):
|
||||
raise TypeError("Invalid metadata type")
|
||||
|
||||
return formatter.format(content, metadata, options)
|
||||
|
||||
211
crazy_functions/doc_fns/conversation_doc/excel_doc.py
Normal file
211
crazy_functions/doc_fns/conversation_doc/excel_doc.py
Normal file
@@ -0,0 +1,211 @@
|
||||
import re
|
||||
import os
|
||||
import pandas as pd
|
||||
from datetime import datetime
|
||||
from openpyxl import Workbook
|
||||
|
||||
|
||||
class ExcelTableFormatter:
|
||||
"""聊天记录中Markdown表格转Excel生成器"""
|
||||
|
||||
def __init__(self):
|
||||
"""初始化Excel文档对象"""
|
||||
self.workbook = Workbook()
|
||||
self._table_count = 0
|
||||
self._current_sheet = None
|
||||
|
||||
def _normalize_table_row(self, row):
|
||||
"""标准化表格行,处理不同的分隔符情况"""
|
||||
row = row.strip()
|
||||
if row.startswith('|'):
|
||||
row = row[1:]
|
||||
if row.endswith('|'):
|
||||
row = row[:-1]
|
||||
return [cell.strip() for cell in row.split('|')]
|
||||
|
||||
def _is_separator_row(self, row):
|
||||
"""检查是否是分隔行(由 - 或 : 组成)"""
|
||||
clean_row = re.sub(r'[\s|]', '', row)
|
||||
return bool(re.match(r'^[-:]+$', clean_row))
|
||||
|
||||
def _extract_tables_from_text(self, text):
|
||||
"""从文本中提取所有表格内容"""
|
||||
if not isinstance(text, str):
|
||||
return []
|
||||
|
||||
tables = []
|
||||
current_table = []
|
||||
is_in_table = False
|
||||
|
||||
for line in text.split('\n'):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
if is_in_table and current_table:
|
||||
if len(current_table) >= 2:
|
||||
tables.append(current_table)
|
||||
current_table = []
|
||||
is_in_table = False
|
||||
continue
|
||||
|
||||
if '|' in line:
|
||||
if not is_in_table:
|
||||
is_in_table = True
|
||||
current_table.append(line)
|
||||
else:
|
||||
if is_in_table and current_table:
|
||||
if len(current_table) >= 2:
|
||||
tables.append(current_table)
|
||||
current_table = []
|
||||
is_in_table = False
|
||||
|
||||
if is_in_table and current_table and len(current_table) >= 2:
|
||||
tables.append(current_table)
|
||||
|
||||
return tables
|
||||
|
||||
def _parse_table(self, table_lines):
|
||||
"""解析表格内容为结构化数据"""
|
||||
try:
|
||||
headers = self._normalize_table_row(table_lines[0])
|
||||
|
||||
separator_index = next(
|
||||
(i for i, line in enumerate(table_lines) if self._is_separator_row(line)),
|
||||
1
|
||||
)
|
||||
|
||||
data_rows = []
|
||||
for line in table_lines[separator_index + 1:]:
|
||||
cells = self._normalize_table_row(line)
|
||||
# 确保单元格数量与表头一致
|
||||
while len(cells) < len(headers):
|
||||
cells.append('')
|
||||
cells = cells[:len(headers)]
|
||||
data_rows.append(cells)
|
||||
|
||||
if headers and data_rows:
|
||||
return {
|
||||
'headers': headers,
|
||||
'data': data_rows
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"解析表格时发生错误: {str(e)}")
|
||||
|
||||
return None
|
||||
|
||||
def _create_sheet(self, question_num, table_num):
|
||||
"""创建新的工作表"""
|
||||
sheet_name = f'Q{question_num}_T{table_num}'
|
||||
if len(sheet_name) > 31:
|
||||
sheet_name = f'Table{self._table_count}'
|
||||
|
||||
if sheet_name in self.workbook.sheetnames:
|
||||
sheet_name = f'{sheet_name}_{datetime.now().strftime("%H%M%S")}'
|
||||
|
||||
return self.workbook.create_sheet(title=sheet_name)
|
||||
|
||||
def create_document(self, history):
|
||||
"""
|
||||
处理聊天历史中的所有表格并创建Excel文档
|
||||
|
||||
Args:
|
||||
history: 聊天历史列表
|
||||
|
||||
Returns:
|
||||
Workbook: 处理完成的Excel工作簿对象,如果没有表格则返回None
|
||||
"""
|
||||
has_tables = False
|
||||
|
||||
# 删除默认创建的工作表
|
||||
default_sheet = self.workbook['Sheet']
|
||||
self.workbook.remove(default_sheet)
|
||||
|
||||
# 遍历所有回答
|
||||
for i in range(1, len(history), 2):
|
||||
answer = history[i]
|
||||
tables = self._extract_tables_from_text(answer)
|
||||
|
||||
for table_lines in tables:
|
||||
parsed_table = self._parse_table(table_lines)
|
||||
if parsed_table:
|
||||
self._table_count += 1
|
||||
sheet = self._create_sheet(i // 2 + 1, self._table_count)
|
||||
|
||||
# 写入表头
|
||||
for col, header in enumerate(parsed_table['headers'], 1):
|
||||
sheet.cell(row=1, column=col, value=header)
|
||||
|
||||
# 写入数据
|
||||
for row_idx, row_data in enumerate(parsed_table['data'], 2):
|
||||
for col_idx, value in enumerate(row_data, 1):
|
||||
sheet.cell(row=row_idx, column=col_idx, value=value)
|
||||
|
||||
has_tables = True
|
||||
|
||||
return self.workbook if has_tables else None
|
||||
|
||||
|
||||
def save_chat_tables(history, save_dir, base_name):
|
||||
"""
|
||||
保存聊天历史中的表格到Excel文件
|
||||
|
||||
Args:
|
||||
history: 聊天历史列表
|
||||
save_dir: 保存目录
|
||||
base_name: 基础文件名
|
||||
|
||||
Returns:
|
||||
list: 保存的文件路径列表
|
||||
"""
|
||||
result_files = []
|
||||
|
||||
try:
|
||||
# 创建Excel格式
|
||||
excel_formatter = ExcelTableFormatter()
|
||||
workbook = excel_formatter.create_document(history)
|
||||
|
||||
if workbook is not None:
|
||||
# 确保保存目录存在
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
# 生成Excel文件路径
|
||||
excel_file = os.path.join(save_dir, base_name + '.xlsx')
|
||||
|
||||
# 保存Excel文件
|
||||
workbook.save(excel_file)
|
||||
result_files.append(excel_file)
|
||||
print(f"已保存表格到Excel文件: {excel_file}")
|
||||
except Exception as e:
|
||||
print(f"保存Excel格式失败: {str(e)}")
|
||||
|
||||
return result_files
|
||||
|
||||
|
||||
# 使用示例
|
||||
if __name__ == "__main__":
|
||||
# 示例聊天历史
|
||||
history = [
|
||||
"问题1",
|
||||
"""这是第一个表格:
|
||||
| A | B | C |
|
||||
|---|---|---|
|
||||
| 1 | 2 | 3 |""",
|
||||
|
||||
"问题2",
|
||||
"这是没有表格的回答",
|
||||
|
||||
"问题3",
|
||||
"""回答包含多个表格:
|
||||
| Name | Age |
|
||||
|------|-----|
|
||||
| Tom | 20 |
|
||||
|
||||
第二个表格:
|
||||
| X | Y |
|
||||
|---|---|
|
||||
| 1 | 2 |"""
|
||||
]
|
||||
|
||||
# 保存表格
|
||||
save_dir = "output"
|
||||
base_name = "chat_tables"
|
||||
saved_files = save_chat_tables(history, save_dir, base_name)
|
||||
190
crazy_functions/doc_fns/conversation_doc/html_doc.py
Normal file
190
crazy_functions/doc_fns/conversation_doc/html_doc.py
Normal file
@@ -0,0 +1,190 @@
|
||||
|
||||
|
||||
class HtmlFormatter:
|
||||
"""聊天记录HTML格式生成器"""
|
||||
|
||||
def __init__(self, chatbot, history):
|
||||
self.chatbot = chatbot
|
||||
self.history = history
|
||||
self.css_styles = """
|
||||
:root {
|
||||
--primary-color: #2563eb;
|
||||
--primary-light: #eff6ff;
|
||||
--secondary-color: #1e293b;
|
||||
--background-color: #f8fafc;
|
||||
--text-color: #334155;
|
||||
--border-color: #e2e8f0;
|
||||
--card-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1);
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||
line-height: 1.8;
|
||||
margin: 0;
|
||||
padding: 2rem;
|
||||
color: var(--text-color);
|
||||
background-color: var(--background-color);
|
||||
}
|
||||
|
||||
.container {
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
background: white;
|
||||
padding: 2rem;
|
||||
border-radius: 16px;
|
||||
box-shadow: var(--card-shadow);
|
||||
}
|
||||
::selection {
|
||||
background: var(--primary-light);
|
||||
color: var(--primary-color);
|
||||
}
|
||||
@keyframes fadeIn {
|
||||
from { opacity: 0; transform: translateY(20px); }
|
||||
to { opacity: 1; transform: translateY(0); }
|
||||
}
|
||||
|
||||
@keyframes slideIn {
|
||||
from { transform: translateX(-20px); opacity: 0; }
|
||||
to { transform: translateX(0); opacity: 1; }
|
||||
}
|
||||
|
||||
.container {
|
||||
animation: fadeIn 0.6s ease-out;
|
||||
}
|
||||
|
||||
.QaBox {
|
||||
animation: slideIn 0.5s ease-out;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
.QaBox:hover {
|
||||
transform: translateX(5px);
|
||||
}
|
||||
.Question, .Answer, .historyBox {
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
.chat-title {
|
||||
color: var(--primary-color);
|
||||
font-size: 2em;
|
||||
text-align: center;
|
||||
margin: 1rem 0 2rem;
|
||||
padding-bottom: 1rem;
|
||||
border-bottom: 2px solid var(--primary-color);
|
||||
}
|
||||
|
||||
.chat-body {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 1.5rem;
|
||||
margin: 2rem 0;
|
||||
}
|
||||
|
||||
.QaBox {
|
||||
background: white;
|
||||
padding: 1.5rem;
|
||||
border-radius: 8px;
|
||||
border-left: 4px solid var(--primary-color);
|
||||
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
|
||||
.Question {
|
||||
color: var(--secondary-color);
|
||||
font-weight: 500;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.Answer {
|
||||
color: var(--text-color);
|
||||
background: var(--primary-light);
|
||||
padding: 1rem;
|
||||
border-radius: 6px;
|
||||
}
|
||||
|
||||
.history-section {
|
||||
margin-top: 3rem;
|
||||
padding-top: 2rem;
|
||||
border-top: 2px solid var(--border-color);
|
||||
}
|
||||
|
||||
.history-title {
|
||||
color: var(--secondary-color);
|
||||
font-size: 1.5em;
|
||||
margin-bottom: 1.5rem;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.historyBox {
|
||||
background: white;
|
||||
padding: 1rem;
|
||||
margin: 0.5rem 0;
|
||||
border-radius: 6px;
|
||||
border: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
:root {
|
||||
--background-color: #0f172a;
|
||||
--text-color: #e2e8f0;
|
||||
--border-color: #1e293b;
|
||||
}
|
||||
|
||||
.container, .QaBox {
|
||||
background: #1e293b;
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def format_chat_content(self) -> str:
|
||||
"""格式化聊天内容"""
|
||||
chat_content = []
|
||||
for q, a in self.chatbot:
|
||||
question = str(q) if q is not None else ""
|
||||
answer = str(a) if a is not None else ""
|
||||
chat_content.append(f'''
|
||||
<div class="QaBox">
|
||||
<div class="Question">{question}</div>
|
||||
<div class="Answer">{answer}</div>
|
||||
</div>
|
||||
''')
|
||||
return "\n".join(chat_content)
|
||||
|
||||
def format_history_content(self) -> str:
|
||||
"""格式化历史记录内容"""
|
||||
if not self.history:
|
||||
return ""
|
||||
|
||||
history_content = []
|
||||
for entry in self.history:
|
||||
history_content.append(f'''
|
||||
<div class="historyBox">
|
||||
<div class="entry">{entry}</div>
|
||||
</div>
|
||||
''')
|
||||
return "\n".join(history_content)
|
||||
|
||||
def create_document(self) -> str:
|
||||
"""生成完整的HTML文档
|
||||
|
||||
Returns:
|
||||
str: 完整的HTML文档字符串
|
||||
"""
|
||||
return f"""
|
||||
<!DOCTYPE html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>对话存档</title>
|
||||
<style>{self.css_styles}</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1 class="chat-title">对话存档</h1>
|
||||
<div class="chat-body">
|
||||
{self.format_chat_content()}
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
39
crazy_functions/doc_fns/conversation_doc/markdown_doc.py
Normal file
39
crazy_functions/doc_fns/conversation_doc/markdown_doc.py
Normal file
@@ -0,0 +1,39 @@
|
||||
|
||||
class MarkdownFormatter:
|
||||
"""Markdown格式文档生成器 - 用于生成对话记录的markdown文档"""
|
||||
|
||||
def __init__(self):
|
||||
self.content = []
|
||||
|
||||
def _add_content(self, text: str):
|
||||
"""添加正文内容"""
|
||||
if text:
|
||||
self.content.append(f"\n{text}\n")
|
||||
|
||||
def create_document(self, history: list) -> str:
|
||||
"""
|
||||
创建完整的Markdown文档
|
||||
Args:
|
||||
history: 历史记录列表,偶数位置为问题,奇数位置为答案
|
||||
Returns:
|
||||
str: 生成的Markdown文本
|
||||
"""
|
||||
self.content = []
|
||||
|
||||
# 处理问答对
|
||||
for i in range(0, len(history), 2):
|
||||
question = history[i]
|
||||
answer = history[i + 1]
|
||||
|
||||
# 添加问题
|
||||
self.content.append(f"\n### 问题 {i//2 + 1}")
|
||||
self._add_content(question)
|
||||
|
||||
# 添加回答
|
||||
self.content.append(f"\n### 回答 {i//2 + 1}")
|
||||
self._add_content(answer)
|
||||
|
||||
# 添加分隔线
|
||||
self.content.append("\n---\n")
|
||||
|
||||
return "\n".join(self.content)
|
||||
172
crazy_functions/doc_fns/conversation_doc/pdf_doc.py
Normal file
172
crazy_functions/doc_fns/conversation_doc/pdf_doc.py
Normal file
@@ -0,0 +1,172 @@
|
||||
from datetime import datetime
|
||||
import os
|
||||
import re
|
||||
from reportlab.pdfbase import pdfmetrics
|
||||
from reportlab.pdfbase.ttfonts import TTFont
|
||||
|
||||
def convert_markdown_to_pdf(markdown_text):
|
||||
"""将Markdown文本转换为PDF格式的纯文本"""
|
||||
if not markdown_text:
|
||||
return ""
|
||||
|
||||
# 标准化换行符
|
||||
markdown_text = markdown_text.replace('\r\n', '\n').replace('\r', '\n')
|
||||
|
||||
# 处理标题、粗体、斜体
|
||||
markdown_text = re.sub(r'^#\s+(.+)$', r'\1', markdown_text, flags=re.MULTILINE)
|
||||
markdown_text = re.sub(r'\*\*(.+?)\*\*', r'\1', markdown_text)
|
||||
markdown_text = re.sub(r'\*(.+?)\*', r'\1', markdown_text)
|
||||
|
||||
# 处理列表
|
||||
markdown_text = re.sub(r'^\s*[-*+]\s+(.+?)(?=\n|$)', r'• \1', markdown_text, flags=re.MULTILINE)
|
||||
markdown_text = re.sub(r'^\s*\d+\.\s+(.+?)(?=\n|$)', r'\1', markdown_text, flags=re.MULTILINE)
|
||||
|
||||
# 处理链接
|
||||
markdown_text = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', r'\1', markdown_text)
|
||||
|
||||
# 处理段落
|
||||
markdown_text = re.sub(r'\n{2,}', '\n', markdown_text)
|
||||
markdown_text = re.sub(r'(?<!\n)(?<!^)(?<!•\s)(?<!\d\.\s)\n(?![\s•\d])', '\n\n', markdown_text, flags=re.MULTILINE)
|
||||
|
||||
# 清理空白
|
||||
markdown_text = re.sub(r' +', ' ', markdown_text)
|
||||
markdown_text = re.sub(r'(?m)^\s+|\s+$', '', markdown_text)
|
||||
|
||||
return markdown_text.strip()
|
||||
|
||||
class PDFFormatter:
|
||||
"""聊天记录PDF文档生成器 - 使用 Noto Sans CJK 字体"""
|
||||
|
||||
def __init__(self):
|
||||
self._init_reportlab()
|
||||
self._register_fonts()
|
||||
self.styles = self._get_reportlab_lib()['getSampleStyleSheet']()
|
||||
self._create_styles()
|
||||
|
||||
def _init_reportlab(self):
|
||||
"""初始化 ReportLab 相关组件"""
|
||||
from reportlab.lib.pagesizes import A4
|
||||
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
|
||||
from reportlab.lib.units import cm
|
||||
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
|
||||
|
||||
self._lib = {
|
||||
'A4': A4,
|
||||
'getSampleStyleSheet': getSampleStyleSheet,
|
||||
'ParagraphStyle': ParagraphStyle,
|
||||
'cm': cm
|
||||
}
|
||||
|
||||
self._platypus = {
|
||||
'SimpleDocTemplate': SimpleDocTemplate,
|
||||
'Paragraph': Paragraph,
|
||||
'Spacer': Spacer
|
||||
}
|
||||
|
||||
def _get_reportlab_lib(self):
|
||||
return self._lib
|
||||
|
||||
def _get_reportlab_platypus(self):
|
||||
return self._platypus
|
||||
|
||||
def _register_fonts(self):
|
||||
"""注册 Noto Sans CJK 字体"""
|
||||
possible_font_paths = [
|
||||
'/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc',
|
||||
'/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc',
|
||||
'/usr/share/fonts/noto/NotoSansCJK-Regular.ttc'
|
||||
]
|
||||
|
||||
font_registered = False
|
||||
for path in possible_font_paths:
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
pdfmetrics.registerFont(TTFont('NotoSansCJK', path))
|
||||
font_registered = True
|
||||
break
|
||||
except:
|
||||
continue
|
||||
|
||||
if not font_registered:
|
||||
print("Warning: Could not find Noto Sans CJK font. Using fallback font.")
|
||||
self.font_name = 'Helvetica'
|
||||
else:
|
||||
self.font_name = 'NotoSansCJK'
|
||||
|
||||
def _create_styles(self):
|
||||
"""创建文档样式"""
|
||||
ParagraphStyle = self._lib['ParagraphStyle']
|
||||
|
||||
# 标题样式
|
||||
self.styles.add(ParagraphStyle(
|
||||
name='Title_Custom',
|
||||
fontName=self.font_name,
|
||||
fontSize=24,
|
||||
leading=38,
|
||||
alignment=1,
|
||||
spaceAfter=32
|
||||
))
|
||||
|
||||
# 日期样式
|
||||
self.styles.add(ParagraphStyle(
|
||||
name='Date_Style',
|
||||
fontName=self.font_name,
|
||||
fontSize=16,
|
||||
leading=20,
|
||||
alignment=1,
|
||||
spaceAfter=20
|
||||
))
|
||||
|
||||
# 问题样式
|
||||
self.styles.add(ParagraphStyle(
|
||||
name='Question_Style',
|
||||
fontName=self.font_name,
|
||||
fontSize=12,
|
||||
leading=18,
|
||||
leftIndent=28,
|
||||
spaceAfter=6
|
||||
))
|
||||
|
||||
# 回答样式
|
||||
self.styles.add(ParagraphStyle(
|
||||
name='Answer_Style',
|
||||
fontName=self.font_name,
|
||||
fontSize=12,
|
||||
leading=18,
|
||||
leftIndent=28,
|
||||
spaceAfter=12
|
||||
))
|
||||
|
||||
def create_document(self, history, output_path):
|
||||
"""生成PDF文档"""
|
||||
# 创建PDF文档
|
||||
doc = self._platypus['SimpleDocTemplate'](
|
||||
output_path,
|
||||
pagesize=self._lib['A4'],
|
||||
rightMargin=2.6 * self._lib['cm'],
|
||||
leftMargin=2.8 * self._lib['cm'],
|
||||
topMargin=3.7 * self._lib['cm'],
|
||||
bottomMargin=3.5 * self._lib['cm']
|
||||
)
|
||||
|
||||
# 构建内容
|
||||
story = []
|
||||
Paragraph = self._platypus['Paragraph']
|
||||
|
||||
# 添加对话内容
|
||||
for i in range(0, len(history), 2):
|
||||
question = history[i]
|
||||
answer = convert_markdown_to_pdf(history[i + 1]) if i + 1 < len(history) else ""
|
||||
|
||||
if question:
|
||||
q_text = f'问题 {i // 2 + 1}:{str(question)}'
|
||||
story.append(Paragraph(q_text, self.styles['Question_Style']))
|
||||
|
||||
if answer:
|
||||
a_text = f'回答 {i // 2 + 1}:{str(answer)}'
|
||||
story.append(Paragraph(a_text, self.styles['Answer_Style']))
|
||||
|
||||
# 构建PDF
|
||||
doc.build(story)
|
||||
|
||||
return doc
|
||||
79
crazy_functions/doc_fns/conversation_doc/txt_doc.py
Normal file
79
crazy_functions/doc_fns/conversation_doc/txt_doc.py
Normal file
@@ -0,0 +1,79 @@
|
||||
|
||||
import re
|
||||
|
||||
|
||||
def convert_markdown_to_txt(markdown_text):
|
||||
"""Convert markdown text to plain text while preserving formatting"""
|
||||
# Standardize line endings
|
||||
markdown_text = markdown_text.replace('\r\n', '\n').replace('\r', '\n')
|
||||
|
||||
# 1. Handle headers but keep their formatting instead of removing them
|
||||
markdown_text = re.sub(r'^#\s+(.+)$', r'# \1', markdown_text, flags=re.MULTILINE)
|
||||
markdown_text = re.sub(r'^##\s+(.+)$', r'## \1', markdown_text, flags=re.MULTILINE)
|
||||
markdown_text = re.sub(r'^###\s+(.+)$', r'### \1', markdown_text, flags=re.MULTILINE)
|
||||
|
||||
# 2. Handle bold and italic - simply remove markers
|
||||
markdown_text = re.sub(r'\*\*(.+?)\*\*', r'\1', markdown_text)
|
||||
markdown_text = re.sub(r'\*(.+?)\*', r'\1', markdown_text)
|
||||
|
||||
# 3. Handle lists but preserve formatting
|
||||
markdown_text = re.sub(r'^\s*[-*+]\s+(.+?)(?=\n|$)', r'• \1', markdown_text, flags=re.MULTILINE)
|
||||
|
||||
# 4. Handle links - keep only the text
|
||||
markdown_text = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', r'\1 (\2)', markdown_text)
|
||||
|
||||
# 5. Handle HTML links - convert to user-friendly format
|
||||
markdown_text = re.sub(r'<a href=[\'"]([^\'"]+)[\'"](?:\s+target=[\'"][^\'"]+[\'"])?>([^<]+)</a>', r'\2 (\1)',
|
||||
markdown_text)
|
||||
|
||||
# 6. Preserve paragraph breaks
|
||||
markdown_text = re.sub(r'\n{3,}', '\n\n', markdown_text) # normalize multiple newlines to double newlines
|
||||
|
||||
# 7. Clean up extra spaces but maintain indentation
|
||||
markdown_text = re.sub(r' +', ' ', markdown_text)
|
||||
|
||||
return markdown_text.strip()
|
||||
|
||||
|
||||
class TxtFormatter:
|
||||
"""Chat history TXT document generator"""
|
||||
|
||||
def __init__(self):
|
||||
self.content = []
|
||||
self._setup_document()
|
||||
|
||||
def _setup_document(self):
|
||||
"""Initialize document with header"""
|
||||
self.content.append("=" * 50)
|
||||
self.content.append("GPT-Academic对话记录".center(48))
|
||||
self.content.append("=" * 50)
|
||||
|
||||
def _format_header(self):
|
||||
"""Create document header with current date"""
|
||||
from datetime import datetime
|
||||
date_str = datetime.now().strftime('%Y年%m月%d日')
|
||||
return [
|
||||
date_str.center(48),
|
||||
"\n" # Add blank line after date
|
||||
]
|
||||
|
||||
def create_document(self, history):
|
||||
"""Generate document from chat history"""
|
||||
# Add header with date
|
||||
self.content.extend(self._format_header())
|
||||
|
||||
# Add conversation content
|
||||
for i in range(0, len(history), 2):
|
||||
question = history[i]
|
||||
answer = convert_markdown_to_txt(history[i + 1]) if i + 1 < len(history) else ""
|
||||
|
||||
if question:
|
||||
self.content.append(f"问题 {i // 2 + 1}:{str(question)}")
|
||||
self.content.append("") # Add blank line
|
||||
|
||||
if answer:
|
||||
self.content.append(f"回答 {i // 2 + 1}:{str(answer)}")
|
||||
self.content.append("") # Add blank line
|
||||
|
||||
# Join all content with newlines
|
||||
return "\n".join(self.content)
|
||||
155
crazy_functions/doc_fns/conversation_doc/word2pdf.py
Normal file
155
crazy_functions/doc_fns/conversation_doc/word2pdf.py
Normal file
@@ -0,0 +1,155 @@
|
||||
from docx2pdf import convert
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
from typing import Union
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
class WordToPdfConverter:
|
||||
"""Word文档转PDF转换器"""
|
||||
|
||||
@staticmethod
|
||||
def convert_to_pdf(word_path: Union[str, Path], pdf_path: Union[str, Path] = None) -> str:
|
||||
"""
|
||||
将Word文档转换为PDF
|
||||
|
||||
参数:
|
||||
word_path: Word文档的路径
|
||||
pdf_path: 可选,PDF文件的输出路径。如果未指定,将使用与Word文档相同的名称和位置
|
||||
|
||||
返回:
|
||||
生成的PDF文件路径
|
||||
|
||||
异常:
|
||||
如果转换失败,将抛出相应异常
|
||||
"""
|
||||
try:
|
||||
# 确保输入路径是Path对象
|
||||
word_path = Path(word_path)
|
||||
|
||||
# 如果未指定pdf_path,则使用与word文档相同的名称
|
||||
if pdf_path is None:
|
||||
pdf_path = word_path.with_suffix('.pdf')
|
||||
else:
|
||||
pdf_path = Path(pdf_path)
|
||||
|
||||
# 检查操作系统
|
||||
if platform.system() == 'Linux':
|
||||
# Linux系统需要安装libreoffice
|
||||
which_result = subprocess.run(['which', 'libreoffice'], capture_output=True, text=True)
|
||||
if which_result.returncode != 0:
|
||||
raise RuntimeError("请先安装LibreOffice: sudo apt-get install libreoffice")
|
||||
|
||||
print(f"开始转换Word文档: {word_path} 到 PDF")
|
||||
|
||||
# 使用subprocess代替os.system
|
||||
result = subprocess.run(
|
||||
['libreoffice', '--headless', '--convert-to', 'pdf:writer_pdf_Export',
|
||||
str(word_path), '--outdir', str(pdf_path.parent)],
|
||||
capture_output=True, text=True
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
error_msg = result.stderr or "未知错误"
|
||||
print(f"LibreOffice转换失败,错误信息: {error_msg}")
|
||||
raise RuntimeError(f"LibreOffice转换失败: {error_msg}")
|
||||
|
||||
print(f"LibreOffice转换输出: {result.stdout}")
|
||||
|
||||
# 如果输出路径与默认生成的不同,则重命名
|
||||
default_pdf = word_path.with_suffix('.pdf')
|
||||
if default_pdf != pdf_path and default_pdf.exists():
|
||||
os.rename(default_pdf, pdf_path)
|
||||
print(f"已将PDF从 {default_pdf} 重命名为 {pdf_path}")
|
||||
|
||||
# 验证PDF是否成功生成
|
||||
if not pdf_path.exists() or pdf_path.stat().st_size == 0:
|
||||
raise RuntimeError("PDF生成失败或文件为空")
|
||||
|
||||
print(f"PDF转换成功,文件大小: {pdf_path.stat().st_size} 字节")
|
||||
else:
|
||||
# Windows和MacOS使用docx2pdf
|
||||
print(f"使用docx2pdf转换 {word_path} 到 {pdf_path}")
|
||||
convert(word_path, pdf_path)
|
||||
|
||||
# 验证PDF是否成功生成
|
||||
if not pdf_path.exists() or pdf_path.stat().st_size == 0:
|
||||
raise RuntimeError("PDF生成失败或文件为空")
|
||||
|
||||
print(f"PDF转换成功,文件大小: {pdf_path.stat().st_size} 字节")
|
||||
|
||||
return str(pdf_path)
|
||||
|
||||
except Exception as e:
|
||||
print(f"PDF转换异常: {str(e)}")
|
||||
raise Exception(f"转换PDF失败: {str(e)}")
|
||||
|
||||
@staticmethod
|
||||
def batch_convert(word_dir: Union[str, Path], pdf_dir: Union[str, Path] = None) -> list:
|
||||
"""
|
||||
批量转换目录下的所有Word文档
|
||||
|
||||
参数:
|
||||
word_dir: 包含Word文档的目录路径
|
||||
pdf_dir: 可选,PDF文件的输出目录。如果未指定,将使用与Word文档相同的目录
|
||||
|
||||
返回:
|
||||
生成的PDF文件路径列表
|
||||
"""
|
||||
word_dir = Path(word_dir)
|
||||
if pdf_dir:
|
||||
pdf_dir = Path(pdf_dir)
|
||||
pdf_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
converted_files = []
|
||||
|
||||
for word_file in word_dir.glob("*.docx"):
|
||||
try:
|
||||
if pdf_dir:
|
||||
pdf_path = pdf_dir / word_file.with_suffix('.pdf').name
|
||||
else:
|
||||
pdf_path = word_file.with_suffix('.pdf')
|
||||
|
||||
pdf_file = WordToPdfConverter.convert_to_pdf(word_file, pdf_path)
|
||||
converted_files.append(pdf_file)
|
||||
|
||||
except Exception as e:
|
||||
print(f"转换 {word_file} 失败: {str(e)}")
|
||||
|
||||
return converted_files
|
||||
|
||||
@staticmethod
|
||||
def convert_doc_to_pdf(doc, output_dir: Union[str, Path] = None) -> str:
|
||||
"""
|
||||
将docx对象直接转换为PDF
|
||||
|
||||
参数:
|
||||
doc: python-docx的Document对象
|
||||
output_dir: 可选,输出目录。如果未指定,将使用当前目录
|
||||
|
||||
返回:
|
||||
生成的PDF文件路径
|
||||
"""
|
||||
try:
|
||||
# 设置临时文件路径和输出路径
|
||||
output_dir = Path(output_dir) if output_dir else Path.cwd()
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 生成临时word文件
|
||||
temp_docx = output_dir / f"temp_{datetime.now().strftime('%Y%m%d_%H%M%S')}.docx"
|
||||
doc.save(temp_docx)
|
||||
|
||||
# 转换为PDF
|
||||
pdf_path = temp_docx.with_suffix('.pdf')
|
||||
WordToPdfConverter.convert_to_pdf(temp_docx, pdf_path)
|
||||
|
||||
# 删除临时word文件
|
||||
temp_docx.unlink()
|
||||
|
||||
return str(pdf_path)
|
||||
|
||||
except Exception as e:
|
||||
if temp_docx.exists():
|
||||
temp_docx.unlink()
|
||||
raise Exception(f"转换PDF失败: {str(e)}")
|
||||
177
crazy_functions/doc_fns/conversation_doc/word_doc.py
Normal file
177
crazy_functions/doc_fns/conversation_doc/word_doc.py
Normal file
@@ -0,0 +1,177 @@
|
||||
import re
|
||||
from docx import Document
|
||||
from docx.shared import Cm, Pt
|
||||
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT, WD_LINE_SPACING
|
||||
from docx.enum.style import WD_STYLE_TYPE
|
||||
from docx.oxml.ns import qn
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
def convert_markdown_to_word(markdown_text):
|
||||
# 0. 首先标准化所有换行符为\n
|
||||
markdown_text = markdown_text.replace('\r\n', '\n').replace('\r', '\n')
|
||||
|
||||
# 1. 处理标题 - 支持更多级别的标题,使用更精确的正则
|
||||
# 保留标题标记,以便后续处理时还能识别出标题级别
|
||||
markdown_text = re.sub(r'^(#{1,6})\s+(.+?)(?:\s+#+)?$', r'\1 \2', markdown_text, flags=re.MULTILINE)
|
||||
|
||||
# 2. 处理粗体、斜体和加粗斜体
|
||||
markdown_text = re.sub(r'\*\*\*(.+?)\*\*\*', r'\1', markdown_text) # 加粗斜体
|
||||
markdown_text = re.sub(r'\*\*(.+?)\*\*', r'\1', markdown_text) # 加粗
|
||||
markdown_text = re.sub(r'\*(.+?)\*', r'\1', markdown_text) # 斜体
|
||||
markdown_text = re.sub(r'_(.+?)_', r'\1', markdown_text) # 下划线斜体
|
||||
markdown_text = re.sub(r'__(.+?)__', r'\1', markdown_text) # 下划线加粗
|
||||
|
||||
# 3. 处理代码块 - 不移除,而是简化格式
|
||||
# 多行代码块
|
||||
markdown_text = re.sub(r'```(?:\w+)?\n([\s\S]*?)```', r'[代码块]\n\1[/代码块]', markdown_text)
|
||||
# 单行代码
|
||||
markdown_text = re.sub(r'`([^`]+)`', r'[代码]\1[/代码]', markdown_text)
|
||||
|
||||
# 4. 处理列表 - 保留列表结构
|
||||
# 匹配无序列表
|
||||
markdown_text = re.sub(r'^(\s*)[-*+]\s+(.+?)$', r'\1• \2', markdown_text, flags=re.MULTILINE)
|
||||
|
||||
# 5. 处理Markdown链接
|
||||
markdown_text = re.sub(r'\[([^\]]+)\]\(([^)]+?)\s*(?:"[^"]*")?\)', r'\1 (\2)', markdown_text)
|
||||
|
||||
# 6. 处理HTML链接
|
||||
markdown_text = re.sub(r'<a href=[\'"]([^\'"]+)[\'"](?:\s+target=[\'"][^\'"]+[\'"])?>([^<]+)</a>', r'\2 (\1)',
|
||||
markdown_text)
|
||||
|
||||
# 7. 处理图片
|
||||
markdown_text = re.sub(r'!\[([^\]]*)\]\([^)]+\)', r'[图片:\1]', markdown_text)
|
||||
|
||||
return markdown_text
|
||||
|
||||
|
||||
class WordFormatter:
|
||||
"""聊天记录Word文档生成器 - 符合中国政府公文格式规范(GB/T 9704-2012)"""
|
||||
|
||||
def __init__(self):
|
||||
self.doc = Document()
|
||||
self._setup_document()
|
||||
self._create_styles()
|
||||
|
||||
def _setup_document(self):
|
||||
"""设置文档基本格式,包括页面设置和页眉"""
|
||||
sections = self.doc.sections
|
||||
for section in sections:
|
||||
# 设置页面大小为A4
|
||||
section.page_width = Cm(21)
|
||||
section.page_height = Cm(29.7)
|
||||
# 设置页边距
|
||||
section.top_margin = Cm(3.7) # 上边距37mm
|
||||
section.bottom_margin = Cm(3.5) # 下边距35mm
|
||||
section.left_margin = Cm(2.8) # 左边距28mm
|
||||
section.right_margin = Cm(2.6) # 右边距26mm
|
||||
# 设置页眉页脚距离
|
||||
section.header_distance = Cm(2.0)
|
||||
section.footer_distance = Cm(2.0)
|
||||
|
||||
# 添加页眉
|
||||
header = section.header
|
||||
header_para = header.paragraphs[0]
|
||||
header_para.alignment = WD_PARAGRAPH_ALIGNMENT.RIGHT
|
||||
header_run = header_para.add_run("GPT-Academic对话记录")
|
||||
header_run.font.name = '仿宋'
|
||||
header_run._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
||||
header_run.font.size = Pt(9)
|
||||
|
||||
def _create_styles(self):
|
||||
"""创建文档样式"""
|
||||
# 创建正文样式
|
||||
style = self.doc.styles.add_style('Normal_Custom', WD_STYLE_TYPE.PARAGRAPH)
|
||||
style.font.name = '仿宋'
|
||||
style._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
||||
style.font.size = Pt(12) # 调整为12磅
|
||||
style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
||||
style.paragraph_format.space_after = Pt(0)
|
||||
|
||||
# 创建问题样式
|
||||
question_style = self.doc.styles.add_style('Question_Style', WD_STYLE_TYPE.PARAGRAPH)
|
||||
question_style.font.name = '黑体'
|
||||
question_style._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
|
||||
question_style.font.size = Pt(14) # 调整为14磅
|
||||
question_style.font.bold = True
|
||||
question_style.paragraph_format.space_before = Pt(12) # 减小段前距
|
||||
question_style.paragraph_format.space_after = Pt(6)
|
||||
question_style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
||||
question_style.paragraph_format.left_indent = Pt(0) # 移除左缩进
|
||||
|
||||
# 创建回答样式
|
||||
answer_style = self.doc.styles.add_style('Answer_Style', WD_STYLE_TYPE.PARAGRAPH)
|
||||
answer_style.font.name = '仿宋'
|
||||
answer_style._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
||||
answer_style.font.size = Pt(12) # 调整为12磅
|
||||
answer_style.paragraph_format.space_before = Pt(6)
|
||||
answer_style.paragraph_format.space_after = Pt(12)
|
||||
answer_style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
||||
answer_style.paragraph_format.left_indent = Pt(0) # 移除左缩进
|
||||
|
||||
# 创建标题样式
|
||||
title_style = self.doc.styles.add_style('Title_Custom', WD_STYLE_TYPE.PARAGRAPH)
|
||||
title_style.font.name = '黑体' # 改用黑体
|
||||
title_style._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
|
||||
title_style.font.size = Pt(22) # 调整为22磅
|
||||
title_style.font.bold = True
|
||||
title_style.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
|
||||
title_style.paragraph_format.space_before = Pt(0)
|
||||
title_style.paragraph_format.space_after = Pt(24)
|
||||
title_style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
||||
|
||||
# 添加参考文献样式
|
||||
ref_style = self.doc.styles.add_style('Reference_Style', WD_STYLE_TYPE.PARAGRAPH)
|
||||
ref_style.font.name = '宋体'
|
||||
ref_style._element.rPr.rFonts.set(qn('w:eastAsia'), '宋体')
|
||||
ref_style.font.size = Pt(10.5) # 参考文献使用小号字体
|
||||
ref_style.paragraph_format.space_before = Pt(3)
|
||||
ref_style.paragraph_format.space_after = Pt(3)
|
||||
ref_style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.SINGLE
|
||||
ref_style.paragraph_format.left_indent = Pt(21)
|
||||
ref_style.paragraph_format.first_line_indent = Pt(-21)
|
||||
|
||||
# 添加参考文献标题样式
|
||||
ref_title_style = self.doc.styles.add_style('Reference_Title_Style', WD_STYLE_TYPE.PARAGRAPH)
|
||||
ref_title_style.font.name = '黑体'
|
||||
ref_title_style._element.rPr.rFonts.set(qn('w:eastAsia'), '黑体')
|
||||
ref_title_style.font.size = Pt(16)
|
||||
ref_title_style.font.bold = True
|
||||
ref_title_style.paragraph_format.space_before = Pt(24)
|
||||
ref_title_style.paragraph_format.space_after = Pt(12)
|
||||
ref_title_style.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE
|
||||
|
||||
def create_document(self, history):
|
||||
"""写入聊天历史"""
|
||||
# 添加标题
|
||||
title_para = self.doc.add_paragraph(style='Title_Custom')
|
||||
title_run = title_para.add_run('GPT-Academic 对话记录')
|
||||
|
||||
# 添加日期
|
||||
date_para = self.doc.add_paragraph()
|
||||
date_para.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER
|
||||
date_run = date_para.add_run(datetime.now().strftime('%Y年%m月%d日'))
|
||||
date_run.font.name = '仿宋'
|
||||
date_run._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
||||
date_run.font.size = Pt(16)
|
||||
|
||||
self.doc.add_paragraph() # 添加空行
|
||||
|
||||
# 添加对话内容
|
||||
for i in range(0, len(history), 2):
|
||||
question = history[i]
|
||||
answer = convert_markdown_to_word(history[i + 1])
|
||||
|
||||
if question:
|
||||
q_para = self.doc.add_paragraph(style='Question_Style')
|
||||
q_para.add_run(f'问题 {i//2 + 1}:').bold = True
|
||||
q_para.add_run(str(question))
|
||||
|
||||
if answer:
|
||||
a_para = self.doc.add_paragraph(style='Answer_Style')
|
||||
a_para.add_run(f'回答 {i//2 + 1}:').bold = True
|
||||
a_para.add_run(str(answer))
|
||||
|
||||
|
||||
return self.doc
|
||||
|
||||
0
crazy_functions/doc_fns/read_fns/__init__.py
Normal file
0
crazy_functions/doc_fns/read_fns/__init__.py
Normal file
6
crazy_functions/doc_fns/read_fns/docx_reader.py
Normal file
6
crazy_functions/doc_fns/read_fns/docx_reader.py
Normal file
@@ -0,0 +1,6 @@
|
||||
import nltk
|
||||
nltk.data.path.append('~/nltk_data')
|
||||
nltk.download('averaged_perceptron_tagger', download_dir='~/nltk_data',
|
||||
)
|
||||
nltk.download('punkt', download_dir='~/nltk_data',
|
||||
)
|
||||
286
crazy_functions/doc_fns/read_fns/excel_reader.py
Normal file
286
crazy_functions/doc_fns/read_fns/excel_reader.py
Normal file
@@ -0,0 +1,286 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Set, Dict, Union, Iterator, Tuple
|
||||
from dataclasses import dataclass, field
|
||||
import logging
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
import chardet
|
||||
from functools import lru_cache
|
||||
import os
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtractorConfig:
|
||||
"""提取器配置类"""
|
||||
encoding: str = 'auto'
|
||||
na_filter: bool = True
|
||||
skip_blank_lines: bool = True
|
||||
chunk_size: int = 10000
|
||||
max_workers: int = 4
|
||||
preserve_format: bool = True
|
||||
read_all_sheets: bool = True # 新增:是否读取所有工作表
|
||||
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
|
||||
'remove_extra_spaces': True,
|
||||
'normalize_whitespace': False,
|
||||
'remove_special_chars': False,
|
||||
'lowercase': False
|
||||
})
|
||||
|
||||
|
||||
class ExcelTextExtractor:
|
||||
"""增强的Excel格式文件文本内容提取器"""
|
||||
|
||||
SUPPORTED_EXTENSIONS: Set[str] = {
|
||||
'.xlsx', '.xls', '.csv', '.tsv', '.xlsm', '.xltx', '.xltm', '.ods'
|
||||
}
|
||||
|
||||
def __init__(self, config: Optional[ExtractorConfig] = None):
|
||||
self.config = config or ExtractorConfig()
|
||||
self._setup_logging()
|
||||
self._detect_encoding = lru_cache(maxsize=128)(self._detect_encoding)
|
||||
|
||||
def _setup_logging(self) -> None:
|
||||
"""配置日志记录器"""
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
fh = logging.FileHandler('excel_extractor.log')
|
||||
fh.setLevel(logging.ERROR)
|
||||
self.logger.addHandler(fh)
|
||||
|
||||
def _detect_encoding(self, file_path: Path) -> str:
|
||||
if self.config.encoding != 'auto':
|
||||
return self.config.encoding
|
||||
|
||||
try:
|
||||
with open(file_path, 'rb') as f:
|
||||
raw_data = f.read(10000)
|
||||
result = chardet.detect(raw_data)
|
||||
return result['encoding'] or 'utf-8'
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Encoding detection failed: {e}. Using utf-8")
|
||||
return 'utf-8'
|
||||
|
||||
def _validate_file(self, file_path: Union[str, Path]) -> Path:
|
||||
path = Path(file_path).resolve()
|
||||
|
||||
if not path.exists():
|
||||
raise ValueError(f"File not found: {path}")
|
||||
|
||||
if not path.is_file():
|
||||
raise ValueError(f"Not a file: {path}")
|
||||
|
||||
if not os.access(path, os.R_OK):
|
||||
raise PermissionError(f"No read permission: {path}")
|
||||
|
||||
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
|
||||
raise ValueError(
|
||||
f"Unsupported format: {path.suffix}. "
|
||||
f"Supported: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
|
||||
)
|
||||
|
||||
return path
|
||||
|
||||
def _format_value(self, value: Any) -> str:
|
||||
if pd.isna(value) or value is None:
|
||||
return ''
|
||||
if isinstance(value, (int, float)):
|
||||
return str(value)
|
||||
return str(value).strip()
|
||||
|
||||
def _process_chunk(self, chunk: pd.DataFrame, columns: Optional[List[str]] = None, sheet_name: str = '') -> str:
|
||||
"""处理数据块,新增sheet_name参数"""
|
||||
try:
|
||||
if columns:
|
||||
chunk = chunk[columns]
|
||||
|
||||
if self.config.preserve_format:
|
||||
formatted_chunk = chunk.applymap(self._format_value)
|
||||
rows = []
|
||||
|
||||
# 添加工作表名称作为标题
|
||||
if sheet_name:
|
||||
rows.append(f"[Sheet: {sheet_name}]")
|
||||
|
||||
# 添加表头
|
||||
headers = [str(col) for col in formatted_chunk.columns]
|
||||
rows.append('\t'.join(headers))
|
||||
|
||||
# 添加数据行
|
||||
for _, row in formatted_chunk.iterrows():
|
||||
rows.append('\t'.join(row.values))
|
||||
|
||||
return '\n'.join(rows)
|
||||
else:
|
||||
flat_values = (
|
||||
chunk.astype(str)
|
||||
.replace({'nan': '', 'None': '', 'NaN': ''})
|
||||
.values.flatten()
|
||||
)
|
||||
return ' '.join(v for v in flat_values if v)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error processing chunk: {e}")
|
||||
raise
|
||||
|
||||
def _read_file(self, file_path: Path) -> Union[pd.DataFrame, Iterator[pd.DataFrame], Dict[str, pd.DataFrame]]:
|
||||
"""读取文件,支持多工作表"""
|
||||
try:
|
||||
encoding = self._detect_encoding(file_path)
|
||||
|
||||
if file_path.suffix.lower() in {'.csv', '.tsv'}:
|
||||
sep = '\t' if file_path.suffix.lower() == '.tsv' else ','
|
||||
|
||||
# 对大文件使用分块读取
|
||||
if file_path.stat().st_size > self.config.chunk_size * 1024:
|
||||
return pd.read_csv(
|
||||
file_path,
|
||||
encoding=encoding,
|
||||
na_filter=self.config.na_filter,
|
||||
skip_blank_lines=self.config.skip_blank_lines,
|
||||
sep=sep,
|
||||
chunksize=self.config.chunk_size,
|
||||
on_bad_lines='warn'
|
||||
)
|
||||
else:
|
||||
return pd.read_csv(
|
||||
file_path,
|
||||
encoding=encoding,
|
||||
na_filter=self.config.na_filter,
|
||||
skip_blank_lines=self.config.skip_blank_lines,
|
||||
sep=sep
|
||||
)
|
||||
else:
|
||||
# Excel文件处理,支持多工作表
|
||||
if self.config.read_all_sheets:
|
||||
# 读取所有工作表
|
||||
return pd.read_excel(
|
||||
file_path,
|
||||
na_filter=self.config.na_filter,
|
||||
keep_default_na=self.config.na_filter,
|
||||
engine='openpyxl',
|
||||
sheet_name=None # None表示读取所有工作表
|
||||
)
|
||||
else:
|
||||
# 只读取第一个工作表
|
||||
return pd.read_excel(
|
||||
file_path,
|
||||
na_filter=self.config.na_filter,
|
||||
keep_default_na=self.config.na_filter,
|
||||
engine='openpyxl',
|
||||
sheet_name=0 # 读取第一个工作表
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error reading file {file_path}: {e}")
|
||||
raise
|
||||
|
||||
def extract_text(
|
||||
self,
|
||||
file_path: Union[str, Path],
|
||||
columns: Optional[List[str]] = None,
|
||||
separator: str = '\n'
|
||||
) -> str:
|
||||
"""提取文本,支持多工作表"""
|
||||
try:
|
||||
path = self._validate_file(file_path)
|
||||
self.logger.info(f"Processing: {path}")
|
||||
|
||||
reader = self._read_file(path)
|
||||
texts = []
|
||||
|
||||
# 处理Excel多工作表
|
||||
if isinstance(reader, dict):
|
||||
for sheet_name, df in reader.items():
|
||||
sheet_text = self._process_chunk(df, columns, sheet_name)
|
||||
if sheet_text:
|
||||
texts.append(sheet_text)
|
||||
return separator.join(texts)
|
||||
|
||||
# 处理单个DataFrame
|
||||
elif isinstance(reader, pd.DataFrame):
|
||||
return self._process_chunk(reader, columns)
|
||||
|
||||
# 处理DataFrame迭代器
|
||||
else:
|
||||
with ThreadPoolExecutor(max_workers=self.config.max_workers) as executor:
|
||||
futures = {
|
||||
executor.submit(self._process_chunk, chunk, columns): i
|
||||
for i, chunk in enumerate(reader)
|
||||
}
|
||||
|
||||
chunk_texts = []
|
||||
for future in as_completed(futures):
|
||||
try:
|
||||
text = future.result()
|
||||
if text:
|
||||
chunk_texts.append((futures[future], text))
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error in chunk {futures[future]}: {e}")
|
||||
|
||||
# 按块的顺序排序
|
||||
chunk_texts.sort(key=lambda x: x[0])
|
||||
texts = [text for _, text in chunk_texts]
|
||||
|
||||
# 合并文本,保留格式
|
||||
if texts and self.config.preserve_format:
|
||||
result = texts[0] # 第一块包含表头
|
||||
if len(texts) > 1:
|
||||
# 跳过后续块的表头行
|
||||
for text in texts[1:]:
|
||||
result += '\n' + '\n'.join(text.split('\n')[1:])
|
||||
return result
|
||||
else:
|
||||
return separator.join(texts)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Extraction failed: {e}")
|
||||
raise
|
||||
|
||||
@staticmethod
|
||||
def get_supported_formats() -> List[str]:
|
||||
"""获取支持的文件格式列表"""
|
||||
return sorted(ExcelTextExtractor.SUPPORTED_EXTENSIONS)
|
||||
|
||||
|
||||
def main():
|
||||
"""主函数:演示用法"""
|
||||
config = ExtractorConfig(
|
||||
encoding='auto',
|
||||
preserve_format=True,
|
||||
read_all_sheets=True, # 启用多工作表读取
|
||||
text_cleanup={
|
||||
'remove_extra_spaces': True,
|
||||
'normalize_whitespace': False,
|
||||
'remove_special_chars': False,
|
||||
'lowercase': False
|
||||
}
|
||||
)
|
||||
|
||||
extractor = ExcelTextExtractor(config)
|
||||
|
||||
try:
|
||||
sample_file = 'example.xlsx'
|
||||
if Path(sample_file).exists():
|
||||
text = extractor.extract_text(
|
||||
sample_file,
|
||||
columns=['title', 'content']
|
||||
)
|
||||
print("提取的文本:")
|
||||
print(text)
|
||||
else:
|
||||
print(f"示例文件 {sample_file} 不存在")
|
||||
|
||||
print("\n支持的格式:", extractor.get_supported_formats())
|
||||
|
||||
except Exception as e:
|
||||
print(f"错误: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
359
crazy_functions/doc_fns/read_fns/markitdown/markdown_reader.py
Normal file
359
crazy_functions/doc_fns/read_fns/markitdown/markdown_reader.py
Normal file
@@ -0,0 +1,359 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional, Set, Dict, Union, List
|
||||
from dataclasses import dataclass, field
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
@dataclass
|
||||
class MarkdownConverterConfig:
|
||||
"""PDF 到 Markdown 转换器配置类
|
||||
|
||||
Attributes:
|
||||
extract_images: 是否提取图片
|
||||
extract_tables: 是否尝试保留表格结构
|
||||
extract_code_blocks: 是否识别代码块
|
||||
extract_math: 是否转换数学公式
|
||||
output_dir: 输出目录路径
|
||||
image_dir: 图片保存目录路径
|
||||
paragraph_separator: 段落之间的分隔符
|
||||
text_cleanup: 文本清理选项字典
|
||||
docintel_endpoint: Document Intelligence端点URL (可选)
|
||||
enable_plugins: 是否启用插件
|
||||
llm_client: LLM客户端对象 (例如OpenAI client)
|
||||
llm_model: 要使用的LLM模型名称
|
||||
"""
|
||||
extract_images: bool = True
|
||||
extract_tables: bool = True
|
||||
extract_code_blocks: bool = True
|
||||
extract_math: bool = True
|
||||
output_dir: str = ""
|
||||
image_dir: str = "images"
|
||||
paragraph_separator: str = '\n\n'
|
||||
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
|
||||
'remove_extra_spaces': True,
|
||||
'normalize_whitespace': True,
|
||||
'remove_special_chars': False,
|
||||
'lowercase': False
|
||||
})
|
||||
docintel_endpoint: str = ""
|
||||
enable_plugins: bool = False
|
||||
llm_client: Optional[object] = None
|
||||
llm_model: str = ""
|
||||
|
||||
|
||||
class MarkdownConverter:
|
||||
"""PDF 到 Markdown 转换器
|
||||
|
||||
使用 markitdown 库实现 PDF 到 Markdown 的转换,支持多种配置选项。
|
||||
"""
|
||||
|
||||
SUPPORTED_EXTENSIONS: Set[str] = {
|
||||
'.pdf',
|
||||
}
|
||||
|
||||
def __init__(self, config: Optional[MarkdownConverterConfig] = None):
|
||||
"""初始化转换器
|
||||
|
||||
Args:
|
||||
config: 转换器配置对象,如果为None则使用默认配置
|
||||
"""
|
||||
self.config = config or MarkdownConverterConfig()
|
||||
self._setup_logging()
|
||||
|
||||
# 检查是否安装了 markitdown
|
||||
self._check_markitdown_installation()
|
||||
|
||||
def _setup_logging(self) -> None:
|
||||
"""配置日志记录器"""
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
# 添加文件处理器
|
||||
fh = logging.FileHandler('markdown_converter.log')
|
||||
fh.setLevel(logging.ERROR)
|
||||
self.logger.addHandler(fh)
|
||||
|
||||
def _check_markitdown_installation(self) -> None:
|
||||
"""检查是否安装了 markitdown"""
|
||||
try:
|
||||
# 尝试导入 markitdown 库
|
||||
from markitdown import MarkItDown
|
||||
self.logger.info("markitdown 库已安装")
|
||||
except ImportError:
|
||||
self.logger.warning("markitdown 库未安装,尝试安装...")
|
||||
try:
|
||||
subprocess.check_call(["pip", "install", "markitdown"])
|
||||
self.logger.info("markitdown 库安装成功")
|
||||
from markitdown import MarkItDown
|
||||
except (subprocess.SubprocessError, ImportError):
|
||||
self.logger.error("无法安装 markitdown 库,请手动安装")
|
||||
self.markitdown_available = False
|
||||
return
|
||||
|
||||
self.markitdown_available = True
|
||||
|
||||
def _validate_file(self, file_path: Union[str, Path], max_size_mb: int = 100) -> Path:
|
||||
"""验证文件
|
||||
|
||||
Args:
|
||||
file_path: 文件路径
|
||||
max_size_mb: 允许的最大文件大小(MB)
|
||||
|
||||
Returns:
|
||||
Path: 验证后的Path对象
|
||||
|
||||
Raises:
|
||||
ValueError: 文件不存在、格式不支持或大小超限
|
||||
PermissionError: 没有读取权限
|
||||
"""
|
||||
path = Path(file_path).resolve()
|
||||
|
||||
if not path.exists():
|
||||
raise ValueError(f"文件不存在: {path}")
|
||||
|
||||
if not path.is_file():
|
||||
raise ValueError(f"不是一个文件: {path}")
|
||||
|
||||
if not os.access(path, os.R_OK):
|
||||
raise PermissionError(f"没有读取权限: {path}")
|
||||
|
||||
file_size_mb = path.stat().st_size / (1024 * 1024)
|
||||
if file_size_mb > max_size_mb:
|
||||
raise ValueError(
|
||||
f"文件大小 ({file_size_mb:.1f}MB) 超过限制 {max_size_mb}MB"
|
||||
)
|
||||
|
||||
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
|
||||
raise ValueError(
|
||||
f"不支持的格式: {path.suffix}. "
|
||||
f"支持的格式: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
|
||||
)
|
||||
|
||||
return path
|
||||
|
||||
def _cleanup_text(self, text: str) -> str:
|
||||
"""清理文本
|
||||
|
||||
Args:
|
||||
text: 原始文本
|
||||
|
||||
Returns:
|
||||
str: 清理后的文本
|
||||
"""
|
||||
if self.config.text_cleanup['remove_extra_spaces']:
|
||||
text = ' '.join(text.split())
|
||||
|
||||
if self.config.text_cleanup['normalize_whitespace']:
|
||||
text = text.replace('\t', ' ').replace('\r', '\n')
|
||||
|
||||
if self.config.text_cleanup['lowercase']:
|
||||
text = text.lower()
|
||||
|
||||
return text.strip()
|
||||
|
||||
@staticmethod
|
||||
def get_supported_formats() -> List[str]:
|
||||
"""获取支持的文件格式列表"""
|
||||
return sorted(MarkdownConverter.SUPPORTED_EXTENSIONS)
|
||||
|
||||
def convert_to_markdown(
|
||||
self,
|
||||
file_path: Union[str, Path],
|
||||
output_path: Optional[Union[str, Path]] = None
|
||||
) -> str:
|
||||
"""将 PDF 转换为 Markdown
|
||||
|
||||
Args:
|
||||
file_path: PDF 文件路径
|
||||
output_path: 输出 Markdown 文件路径,如果为 None 则返回内容而不保存
|
||||
|
||||
Returns:
|
||||
str: 转换后的 Markdown 内容
|
||||
|
||||
Raises:
|
||||
Exception: 转换过程中的错误
|
||||
"""
|
||||
try:
|
||||
path = self._validate_file(file_path)
|
||||
self.logger.info(f"处理: {path}")
|
||||
|
||||
if not self.markitdown_available:
|
||||
raise ImportError("markitdown 库未安装,无法进行转换")
|
||||
|
||||
# 导入 markitdown 库
|
||||
from markitdown import MarkItDown
|
||||
|
||||
# 准备输出目录
|
||||
if output_path:
|
||||
output_path = Path(output_path)
|
||||
output_dir = output_path.parent
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
else:
|
||||
# 创建临时目录作为输出目录
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
output_dir = Path(temp_dir)
|
||||
output_path = output_dir / f"{path.stem}.md"
|
||||
|
||||
# 图片目录
|
||||
image_dir = output_dir / self.config.image_dir
|
||||
image_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# 创建 MarkItDown 实例并进行转换
|
||||
if self.config.docintel_endpoint:
|
||||
md = MarkItDown(docintel_endpoint=self.config.docintel_endpoint)
|
||||
elif self.config.llm_client and self.config.llm_model:
|
||||
md = MarkItDown(
|
||||
enable_plugins=self.config.enable_plugins,
|
||||
llm_client=self.config.llm_client,
|
||||
llm_model=self.config.llm_model
|
||||
)
|
||||
else:
|
||||
md = MarkItDown(enable_plugins=self.config.enable_plugins)
|
||||
|
||||
# 执行转换
|
||||
result = md.convert(str(path))
|
||||
markdown_content = result.text_content
|
||||
|
||||
# 清理文本
|
||||
markdown_content = self._cleanup_text(markdown_content)
|
||||
|
||||
# 如果需要保存到文件
|
||||
if output_path:
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(markdown_content)
|
||||
self.logger.info(f"转换成功,输出到: {output_path}")
|
||||
|
||||
return markdown_content
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"转换失败: {e}")
|
||||
raise
|
||||
finally:
|
||||
# 如果使用了临时目录且没有指定输出路径,则清理临时目录
|
||||
if 'temp_dir' in locals() and not output_path:
|
||||
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||
|
||||
def convert_to_markdown_and_save(
|
||||
self,
|
||||
file_path: Union[str, Path],
|
||||
output_path: Union[str, Path]
|
||||
) -> Path:
|
||||
"""将 PDF 转换为 Markdown 并保存到指定路径
|
||||
|
||||
Args:
|
||||
file_path: PDF 文件路径
|
||||
output_path: 输出 Markdown 文件路径
|
||||
|
||||
Returns:
|
||||
Path: 输出文件的 Path 对象
|
||||
|
||||
Raises:
|
||||
Exception: 转换过程中的错误
|
||||
"""
|
||||
self.convert_to_markdown(file_path, output_path)
|
||||
return Path(output_path)
|
||||
|
||||
def batch_convert(
|
||||
self,
|
||||
file_paths: List[Union[str, Path]],
|
||||
output_dir: Union[str, Path]
|
||||
) -> List[Path]:
|
||||
"""批量转换多个 PDF 文件为 Markdown
|
||||
|
||||
Args:
|
||||
file_paths: PDF 文件路径列表
|
||||
output_dir: 输出目录路径
|
||||
|
||||
Returns:
|
||||
List[Path]: 输出文件路径列表
|
||||
|
||||
Raises:
|
||||
Exception: 转换过程中的错误
|
||||
"""
|
||||
output_dir = Path(output_dir)
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
output_paths = []
|
||||
for file_path in file_paths:
|
||||
path = Path(file_path)
|
||||
output_path = output_dir / f"{path.stem}.md"
|
||||
|
||||
try:
|
||||
self.convert_to_markdown(file_path, output_path)
|
||||
output_paths.append(output_path)
|
||||
self.logger.info(f"成功转换: {path} -> {output_path}")
|
||||
except Exception as e:
|
||||
self.logger.error(f"转换失败 {path}: {e}")
|
||||
|
||||
return output_paths
|
||||
|
||||
|
||||
def main():
|
||||
"""主函数:演示用法"""
|
||||
# 配置
|
||||
config = MarkdownConverterConfig(
|
||||
extract_images=True,
|
||||
extract_tables=True,
|
||||
extract_code_blocks=True,
|
||||
extract_math=True,
|
||||
enable_plugins=False,
|
||||
text_cleanup={
|
||||
'remove_extra_spaces': True,
|
||||
'normalize_whitespace': True,
|
||||
'remove_special_chars': False,
|
||||
'lowercase': False
|
||||
}
|
||||
)
|
||||
|
||||
# 创建转换器
|
||||
converter = MarkdownConverter(config)
|
||||
|
||||
# 使用示例
|
||||
try:
|
||||
# 替换为实际的文件路径
|
||||
sample_file = './crazy_functions/doc_fns/read_fns/paper/2501.12599v1.pdf'
|
||||
if Path(sample_file).exists():
|
||||
# 转换为 Markdown 并打印内容
|
||||
markdown_content = converter.convert_to_markdown(sample_file)
|
||||
print("转换后的 Markdown 内容:")
|
||||
print(markdown_content[:500] + "...") # 只打印前500个字符
|
||||
|
||||
# 转换并保存到文件
|
||||
output_file = f"./output_{Path(sample_file).stem}.md"
|
||||
output_path = converter.convert_to_markdown_and_save(sample_file, output_file)
|
||||
print(f"\n已保存到: {output_path}")
|
||||
|
||||
# 使用LLM增强的示例 (需要添加相应的导入和配置)
|
||||
# try:
|
||||
# from openai import OpenAI
|
||||
# client = OpenAI()
|
||||
# llm_config = MarkdownConverterConfig(
|
||||
# llm_client=client,
|
||||
# llm_model="gpt-4o"
|
||||
# )
|
||||
# llm_converter = MarkdownConverter(llm_config)
|
||||
# llm_result = llm_converter.convert_to_markdown("example.jpg")
|
||||
# print("LLM增强的结果:")
|
||||
# print(llm_result[:500] + "...")
|
||||
# except ImportError:
|
||||
# print("未安装OpenAI库,跳过LLM示例")
|
||||
else:
|
||||
print(f"示例文件 {sample_file} 不存在")
|
||||
|
||||
print("\n支持的格式:", converter.get_supported_formats())
|
||||
|
||||
except Exception as e:
|
||||
print(f"错误: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,493 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional, Set, Dict, Union, List
|
||||
from dataclasses import dataclass, field
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
||||
from unstructured.partition.auto import partition
|
||||
from unstructured.documents.elements import (
|
||||
Text, Title, NarrativeText, ListItem, Table,
|
||||
Footer, Header, PageBreak, Image, Address
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PaperMetadata:
|
||||
"""论文元数据类"""
|
||||
title: str = ""
|
||||
authors: List[str] = field(default_factory=list)
|
||||
affiliations: List[str] = field(default_factory=list)
|
||||
journal: str = ""
|
||||
volume: str = ""
|
||||
issue: str = ""
|
||||
year: str = ""
|
||||
doi: str = ""
|
||||
date: str = ""
|
||||
publisher: str = ""
|
||||
conference: str = ""
|
||||
abstract: str = ""
|
||||
keywords: List[str] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExtractorConfig:
|
||||
"""元数据提取器配置类"""
|
||||
paragraph_separator: str = '\n\n'
|
||||
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
|
||||
'remove_extra_spaces': True,
|
||||
'normalize_whitespace': True,
|
||||
'remove_special_chars': False,
|
||||
'lowercase': False
|
||||
})
|
||||
|
||||
|
||||
class PaperMetadataExtractor:
|
||||
"""论文元数据提取器
|
||||
|
||||
使用unstructured库从多种文档格式中提取论文的标题、作者、摘要等元数据信息。
|
||||
"""
|
||||
|
||||
SUPPORTED_EXTENSIONS: Set[str] = {
|
||||
'.pdf', '.docx', '.doc', '.txt', '.ppt', '.pptx',
|
||||
'.xlsx', '.xls', '.md', '.org', '.odt', '.rst',
|
||||
'.rtf', '.epub', '.html', '.xml', '.json'
|
||||
}
|
||||
|
||||
# 定义论文各部分的关键词模式
|
||||
SECTION_PATTERNS = {
|
||||
'abstract': r'\b(摘要|abstract|summary|概要|résumé|zusammenfassung|аннотация)\b',
|
||||
'keywords': r'\b(关键词|keywords|key\s+words|关键字|mots[- ]clés|schlüsselwörter|ключевые слова)\b',
|
||||
}
|
||||
|
||||
def __init__(self, config: Optional[ExtractorConfig] = None):
|
||||
"""初始化提取器
|
||||
|
||||
Args:
|
||||
config: 提取器配置对象,如果为None则使用默认配置
|
||||
"""
|
||||
self.config = config or ExtractorConfig()
|
||||
self._setup_logging()
|
||||
|
||||
def _setup_logging(self) -> None:
|
||||
"""配置日志记录器"""
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
# 添加文件处理器
|
||||
fh = logging.FileHandler('paper_metadata_extractor.log')
|
||||
fh.setLevel(logging.ERROR)
|
||||
self.logger.addHandler(fh)
|
||||
|
||||
def _validate_file(self, file_path: Union[str, Path], max_size_mb: int = 100) -> Path:
|
||||
"""验证文件
|
||||
|
||||
Args:
|
||||
file_path: 文件路径
|
||||
max_size_mb: 允许的最大文件大小(MB)
|
||||
|
||||
Returns:
|
||||
Path: 验证后的Path对象
|
||||
|
||||
Raises:
|
||||
ValueError: 文件不存在、格式不支持或大小超限
|
||||
PermissionError: 没有读取权限
|
||||
"""
|
||||
path = Path(file_path).resolve()
|
||||
|
||||
if not path.exists():
|
||||
raise ValueError(f"文件不存在: {path}")
|
||||
|
||||
if not path.is_file():
|
||||
raise ValueError(f"不是文件: {path}")
|
||||
|
||||
if not os.access(path, os.R_OK):
|
||||
raise PermissionError(f"没有读取权限: {path}")
|
||||
|
||||
file_size_mb = path.stat().st_size / (1024 * 1024)
|
||||
if file_size_mb > max_size_mb:
|
||||
raise ValueError(
|
||||
f"文件大小 ({file_size_mb:.1f}MB) 超过限制 {max_size_mb}MB"
|
||||
)
|
||||
|
||||
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
|
||||
raise ValueError(
|
||||
f"不支持的文件格式: {path.suffix}. "
|
||||
f"支持的格式: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
|
||||
)
|
||||
|
||||
return path
|
||||
|
||||
def _cleanup_text(self, text: str) -> str:
|
||||
"""清理文本
|
||||
|
||||
Args:
|
||||
text: 原始文本
|
||||
|
||||
Returns:
|
||||
str: 清理后的文本
|
||||
"""
|
||||
if self.config.text_cleanup['remove_extra_spaces']:
|
||||
text = ' '.join(text.split())
|
||||
|
||||
if self.config.text_cleanup['normalize_whitespace']:
|
||||
text = text.replace('\t', ' ').replace('\r', '\n')
|
||||
|
||||
if self.config.text_cleanup['lowercase']:
|
||||
text = text.lower()
|
||||
|
||||
return text.strip()
|
||||
|
||||
@staticmethod
|
||||
def get_supported_formats() -> List[str]:
|
||||
"""获取支持的文件格式列表"""
|
||||
return sorted(PaperMetadataExtractor.SUPPORTED_EXTENSIONS)
|
||||
|
||||
def extract_metadata(self, file_path: Union[str, Path], strategy: str = "fast") -> PaperMetadata:
|
||||
"""提取论文元数据
|
||||
|
||||
Args:
|
||||
file_path: 文件路径
|
||||
strategy: 提取策略 ("fast" 或 "accurate")
|
||||
|
||||
Returns:
|
||||
PaperMetadata: 提取的论文元数据
|
||||
|
||||
Raises:
|
||||
Exception: 提取过程中的错误
|
||||
"""
|
||||
try:
|
||||
path = self._validate_file(file_path)
|
||||
self.logger.info(f"正在处理: {path}")
|
||||
|
||||
# 使用unstructured库分解文档
|
||||
elements = partition(
|
||||
str(path),
|
||||
strategy=strategy,
|
||||
include_metadata=True,
|
||||
nlp=False,
|
||||
)
|
||||
|
||||
# 提取元数据
|
||||
metadata = PaperMetadata()
|
||||
|
||||
# 提取标题和作者
|
||||
self._extract_title_and_authors(elements, metadata)
|
||||
|
||||
# 提取摘要和关键词
|
||||
self._extract_abstract_and_keywords(elements, metadata)
|
||||
|
||||
# 提取其他元数据
|
||||
self._extract_additional_metadata(elements, metadata)
|
||||
|
||||
return metadata
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"元数据提取失败: {e}")
|
||||
raise
|
||||
|
||||
def _extract_title_and_authors(self, elements, metadata: PaperMetadata) -> None:
|
||||
"""从文档中提取标题和作者信息 - 改进版"""
|
||||
# 收集所有潜在的标题候选
|
||||
title_candidates = []
|
||||
all_text = []
|
||||
raw_text = []
|
||||
|
||||
# 首先收集文档前30个元素的文本,用于辅助判断
|
||||
for i, element in enumerate(elements[:30]):
|
||||
if isinstance(element, (Text, Title, NarrativeText)):
|
||||
text = str(element).strip()
|
||||
if text:
|
||||
all_text.append(text)
|
||||
raw_text.append(text)
|
||||
|
||||
# 打印出原始文本,用于调试
|
||||
print("原始文本前10行:")
|
||||
for i, text in enumerate(raw_text[:10]):
|
||||
print(f"{i}: {text}")
|
||||
|
||||
# 1. 尝试查找连续的标题片段并合并它们
|
||||
i = 0
|
||||
while i < len(all_text) - 1:
|
||||
current = all_text[i]
|
||||
next_text = all_text[i + 1]
|
||||
|
||||
# 检查是否存在标题分割情况:一行以冒号结尾,下一行像是标题的延续
|
||||
if current.endswith(':') and len(current) < 50 and len(next_text) > 5 and next_text[0].isupper():
|
||||
# 合并这两行文本
|
||||
combined_title = f"{current} {next_text}"
|
||||
# 查找合并前的文本并替换
|
||||
all_text[i] = combined_title
|
||||
all_text.pop(i + 1)
|
||||
# 给合并后的标题很高的分数
|
||||
title_candidates.append((combined_title, 15, i))
|
||||
else:
|
||||
i += 1
|
||||
|
||||
# 2. 首先尝试从标题元素中查找
|
||||
for i, element in enumerate(elements[:15]): # 只检查前15个元素
|
||||
if isinstance(element, Title):
|
||||
title_text = str(element).strip()
|
||||
# 排除常见的非标题内容
|
||||
if title_text.lower() not in ['abstract', '摘要', 'introduction', '引言']:
|
||||
# 计算标题分数(越高越可能是真正的标题)
|
||||
score = self._evaluate_title_candidate(title_text, i, element)
|
||||
title_candidates.append((title_text, score, i))
|
||||
|
||||
# 3. 特别处理常见的论文标题格式
|
||||
for i, text in enumerate(all_text[:15]):
|
||||
# 特别检查"KIMI K1.5:"类型的前缀标题
|
||||
if re.match(r'^[A-Z][A-Z0-9\s\.]+(\s+K\d+(\.\d+)?)?:', text):
|
||||
score = 12 # 给予很高的分数
|
||||
title_candidates.append((text, score, i))
|
||||
|
||||
# 如果下一行也是全大写,很可能是标题的延续
|
||||
if i+1 < len(all_text) and all_text[i+1].isupper() and len(all_text[i+1]) > 10:
|
||||
combined_title = f"{text} {all_text[i+1]}"
|
||||
title_candidates.append((combined_title, 15, i)) # 给合并标题更高分数
|
||||
|
||||
# 匹配全大写的标题行
|
||||
elif text.isupper() and len(text) > 10 and len(text) < 100:
|
||||
score = 10 - i * 0.5 # 越靠前越可能是标题
|
||||
title_candidates.append((text, score, i))
|
||||
|
||||
# 对标题候选按分数排序并选取最佳候选
|
||||
if title_candidates:
|
||||
title_candidates.sort(key=lambda x: x[1], reverse=True)
|
||||
metadata.title = title_candidates[0][0]
|
||||
title_position = title_candidates[0][2]
|
||||
print(f"所有标题候选: {title_candidates[:3]}")
|
||||
else:
|
||||
# 如果没有找到合适的标题,使用一个备选策略
|
||||
for text in all_text[:10]:
|
||||
if text.isupper() and len(text) > 10 and len(text) < 200: # 大写且适当长度的文本
|
||||
metadata.title = text
|
||||
break
|
||||
title_position = 0
|
||||
|
||||
# 提取作者信息 - 改进后的作者提取逻辑
|
||||
author_candidates = []
|
||||
|
||||
# 1. 特别处理"TECHNICAL REPORT OF"之后的行,通常是作者或团队
|
||||
for i, text in enumerate(all_text):
|
||||
if "TECHNICAL REPORT" in text.upper() and i+1 < len(all_text):
|
||||
team_text = all_text[i+1].strip()
|
||||
if re.search(r'\b(team|group|lab)\b', team_text, re.IGNORECASE):
|
||||
author_candidates.append((team_text, 15))
|
||||
|
||||
# 2. 查找包含Team的文本
|
||||
for text in all_text[:20]:
|
||||
if "Team" in text and len(text) < 30:
|
||||
# 这很可能是团队名
|
||||
author_candidates.append((text, 12))
|
||||
|
||||
# 添加作者到元数据
|
||||
if author_candidates:
|
||||
# 按分数排序
|
||||
author_candidates.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
# 去重
|
||||
seen_authors = set()
|
||||
for author, _ in author_candidates:
|
||||
if author.lower() not in seen_authors and not author.isdigit():
|
||||
seen_authors.add(author.lower())
|
||||
metadata.authors.append(author)
|
||||
|
||||
# 如果没有找到作者,尝试查找隶属机构信息中的团队名称
|
||||
if not metadata.authors:
|
||||
for text in all_text[:20]:
|
||||
if re.search(r'\b(team|group|lab|laboratory|研究组|团队)\b', text, re.IGNORECASE):
|
||||
if len(text) < 50: # 避免太长的文本
|
||||
metadata.authors.append(text.strip())
|
||||
break
|
||||
|
||||
# 提取隶属机构信息
|
||||
for i, element in enumerate(elements[:30]):
|
||||
element_text = str(element).strip()
|
||||
if re.search(r'(university|institute|department|school|laboratory|college|center|centre|\d{5,}|^[a-zA-Z]+@|学院|大学|研究所|研究院)', element_text, re.IGNORECASE):
|
||||
# 可能是隶属机构
|
||||
if element_text not in metadata.affiliations and len(element_text) > 10:
|
||||
metadata.affiliations.append(element_text)
|
||||
|
||||
def _evaluate_title_candidate(self, text, position, element):
|
||||
"""评估标题候选项的可能性分数"""
|
||||
score = 0
|
||||
|
||||
# 位置因素:越靠前越可能是标题
|
||||
score += max(0, 10 - position) * 0.5
|
||||
|
||||
# 长度因素:标题通常不会太短也不会太长
|
||||
if 10 <= len(text) <= 150:
|
||||
score += 3
|
||||
elif len(text) < 10:
|
||||
score -= 2
|
||||
elif len(text) > 150:
|
||||
score -= 3
|
||||
|
||||
# 格式因素
|
||||
if text.isupper(): # 全大写可能是标题
|
||||
score += 2
|
||||
if re.match(r'^[A-Z]', text): # 首字母大写
|
||||
score += 1
|
||||
if ':' in text: # 标题常包含冒号
|
||||
score += 1.5
|
||||
|
||||
# 内容因素
|
||||
if re.search(r'\b(scaling|learning|model|approach|method|system|framework|analysis)\b', text.lower()):
|
||||
score += 2 # 包含常见的学术论文关键词
|
||||
|
||||
# 避免误判
|
||||
if re.match(r'^\d+$', text): # 纯数字
|
||||
score -= 10
|
||||
if re.search(r'^(http|www|doi)', text.lower()): # URL或DOI
|
||||
score -= 5
|
||||
if len(text.split()) <= 2 and len(text) < 15: # 太短的短语
|
||||
score -= 3
|
||||
|
||||
# 元数据因素(如果有)
|
||||
if hasattr(element, 'metadata') and element.metadata:
|
||||
# 修复:正确处理ElementMetadata对象
|
||||
try:
|
||||
# 尝试通过getattr安全地获取属性
|
||||
font_size = getattr(element.metadata, 'font_size', None)
|
||||
if font_size is not None and font_size > 14: # 假设标准字体大小是12
|
||||
score += 3
|
||||
|
||||
font_weight = getattr(element.metadata, 'font_weight', None)
|
||||
if font_weight == 'bold':
|
||||
score += 2 # 粗体加分
|
||||
except (AttributeError, TypeError):
|
||||
# 如果metadata的访问方式不正确,尝试其他可能的访问方式
|
||||
try:
|
||||
metadata_dict = element.metadata.__dict__ if hasattr(element.metadata, '__dict__') else {}
|
||||
if 'font_size' in metadata_dict and metadata_dict['font_size'] > 14:
|
||||
score += 3
|
||||
if 'font_weight' in metadata_dict and metadata_dict['font_weight'] == 'bold':
|
||||
score += 2
|
||||
except Exception:
|
||||
# 如果所有尝试都失败,忽略元数据处理
|
||||
pass
|
||||
|
||||
return score
|
||||
|
||||
def _extract_abstract_and_keywords(self, elements, metadata: PaperMetadata) -> None:
|
||||
"""从文档中提取摘要和关键词"""
|
||||
abstract_found = False
|
||||
keywords_found = False
|
||||
abstract_text = []
|
||||
|
||||
for i, element in enumerate(elements):
|
||||
element_text = str(element).strip().lower()
|
||||
|
||||
# 寻找摘要部分
|
||||
if not abstract_found and (
|
||||
isinstance(element, Title) and
|
||||
re.search(self.SECTION_PATTERNS['abstract'], element_text, re.IGNORECASE)
|
||||
):
|
||||
abstract_found = True
|
||||
continue
|
||||
|
||||
# 如果找到摘要部分,收集内容直到遇到关键词部分或新章节
|
||||
if abstract_found and not keywords_found:
|
||||
# 检查是否遇到关键词部分或新章节
|
||||
if (
|
||||
isinstance(element, Title) or
|
||||
re.search(self.SECTION_PATTERNS['keywords'], element_text, re.IGNORECASE) or
|
||||
re.match(r'\b(introduction|引言|method|方法)\b', element_text, re.IGNORECASE)
|
||||
):
|
||||
keywords_found = re.search(self.SECTION_PATTERNS['keywords'], element_text, re.IGNORECASE)
|
||||
abstract_found = False # 停止收集摘要
|
||||
else:
|
||||
# 收集摘要文本
|
||||
if isinstance(element, (Text, NarrativeText)) and element_text:
|
||||
abstract_text.append(element_text)
|
||||
|
||||
# 如果找到关键词部分,提取关键词
|
||||
if keywords_found and not abstract_found and not metadata.keywords:
|
||||
if isinstance(element, (Text, NarrativeText)):
|
||||
# 清除可能的"关键词:"/"Keywords:"前缀
|
||||
cleaned_text = re.sub(r'^\s*(关键词|keywords|key\s+words)\s*[::]\s*', '', element_text, flags=re.IGNORECASE)
|
||||
|
||||
# 尝试按不同分隔符分割
|
||||
for separator in [';', ';', ',', ',']:
|
||||
if separator in cleaned_text:
|
||||
metadata.keywords = [k.strip() for k in cleaned_text.split(separator) if k.strip()]
|
||||
break
|
||||
|
||||
# 如果未能分割,将整个文本作为一个关键词
|
||||
if not metadata.keywords and cleaned_text:
|
||||
metadata.keywords = [cleaned_text]
|
||||
|
||||
keywords_found = False # 已提取关键词,停止处理
|
||||
|
||||
# 设置摘要文本
|
||||
if abstract_text:
|
||||
metadata.abstract = self.config.paragraph_separator.join(abstract_text)
|
||||
|
||||
def _extract_additional_metadata(self, elements, metadata: PaperMetadata) -> None:
|
||||
"""提取其他元数据信息"""
|
||||
for element in elements[:30]: # 只检查文档前部分
|
||||
element_text = str(element).strip()
|
||||
|
||||
# 尝试匹配DOI
|
||||
doi_match = re.search(r'(doi|DOI):\s*(10\.\d{4,}\/[a-zA-Z0-9.-]+)', element_text)
|
||||
if doi_match and not metadata.doi:
|
||||
metadata.doi = doi_match.group(2)
|
||||
|
||||
# 尝试匹配日期
|
||||
date_match = re.search(r'(published|received|accepted|submitted):\s*(\d{1,2}\s+[a-zA-Z]+\s+\d{4}|\d{4}[-/]\d{1,2}[-/]\d{1,2})', element_text, re.IGNORECASE)
|
||||
if date_match and not metadata.date:
|
||||
metadata.date = date_match.group(2)
|
||||
|
||||
# 尝试匹配年份
|
||||
year_match = re.search(r'\b(19|20)\d{2}\b', element_text)
|
||||
if year_match and not metadata.year:
|
||||
metadata.year = year_match.group(0)
|
||||
|
||||
# 尝试匹配期刊/会议名称
|
||||
journal_match = re.search(r'(journal|conference):\s*([^,;.]+)', element_text, re.IGNORECASE)
|
||||
if journal_match:
|
||||
if "journal" in journal_match.group(1).lower() and not metadata.journal:
|
||||
metadata.journal = journal_match.group(2).strip()
|
||||
elif not metadata.conference:
|
||||
metadata.conference = journal_match.group(2).strip()
|
||||
|
||||
|
||||
def main():
|
||||
"""主函数:演示用法"""
|
||||
# 创建提取器
|
||||
extractor = PaperMetadataExtractor()
|
||||
|
||||
# 使用示例
|
||||
try:
|
||||
# 替换为实际的文件路径
|
||||
sample_file = '/Users/boyin.liu/Documents/示例文档/论文/3.pdf'
|
||||
if Path(sample_file).exists():
|
||||
metadata = extractor.extract_metadata(sample_file)
|
||||
print("提取的元数据:")
|
||||
print(f"标题: {metadata.title}")
|
||||
print(f"作者: {', '.join(metadata.authors)}")
|
||||
print(f"机构: {', '.join(metadata.affiliations)}")
|
||||
print(f"摘要: {metadata.abstract[:200]}...")
|
||||
print(f"关键词: {', '.join(metadata.keywords)}")
|
||||
print(f"DOI: {metadata.doi}")
|
||||
print(f"日期: {metadata.date}")
|
||||
print(f"年份: {metadata.year}")
|
||||
print(f"期刊: {metadata.journal}")
|
||||
print(f"会议: {metadata.conference}")
|
||||
else:
|
||||
print(f"示例文件 {sample_file} 不存在")
|
||||
|
||||
print("\n支持的格式:", extractor.get_supported_formats())
|
||||
|
||||
except Exception as e:
|
||||
print(f"错误: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,86 @@
|
||||
from pathlib import Path
|
||||
from crazy_functions.doc_fns.read_fns.unstructured_all.paper_structure_extractor import PaperStructureExtractor
|
||||
|
||||
def extract_and_save_as_markdown(paper_path, output_path=None):
|
||||
"""
|
||||
提取论文结构并保存为Markdown格式
|
||||
|
||||
参数:
|
||||
paper_path: 论文文件路径
|
||||
output_path: 输出的Markdown文件路径,如果不指定,将使用与输入相同的文件名但扩展名为.md
|
||||
|
||||
返回:
|
||||
保存的Markdown文件路径
|
||||
"""
|
||||
# 创建提取器
|
||||
extractor = PaperStructureExtractor()
|
||||
|
||||
# 解析文件路径
|
||||
paper_path = Path(paper_path)
|
||||
|
||||
# 如果未指定输出路径,使用相同文件名但扩展名为.md
|
||||
if output_path is None:
|
||||
output_path = paper_path.with_suffix('.md')
|
||||
else:
|
||||
output_path = Path(output_path)
|
||||
|
||||
# 确保输出目录存在
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
print(f"正在处理论文: {paper_path}")
|
||||
|
||||
try:
|
||||
# 提取论文结构
|
||||
paper = extractor.extract_paper_structure(paper_path)
|
||||
|
||||
# 生成Markdown内容
|
||||
markdown_content = extractor.generate_markdown(paper)
|
||||
|
||||
# 保存到文件
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(markdown_content)
|
||||
|
||||
print(f"已成功保存Markdown文件: {output_path}")
|
||||
|
||||
# 打印摘要信息
|
||||
print("\n论文摘要信息:")
|
||||
print(f"标题: {paper.metadata.title}")
|
||||
print(f"作者: {', '.join(paper.metadata.authors)}")
|
||||
print(f"关键词: {', '.join(paper.keywords)}")
|
||||
print(f"章节数: {len(paper.sections)}")
|
||||
print(f"图表数: {len(paper.figures)}")
|
||||
print(f"表格数: {len(paper.tables)}")
|
||||
print(f"公式数: {len(paper.formulas)}")
|
||||
print(f"参考文献数: {len(paper.references)}")
|
||||
|
||||
return output_path
|
||||
|
||||
except Exception as e:
|
||||
print(f"处理论文时出错: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
# 使用示例
|
||||
if __name__ == "__main__":
|
||||
# 替换为实际的论文文件路径
|
||||
sample_paper = "crazy_functions/doc_fns/read_fns/paper/2501.12599v1.pdf"
|
||||
|
||||
# 可以指定输出路径,也可以使用默认路径
|
||||
# output_file = "/path/to/output/paper_structure.md"
|
||||
# extract_and_save_as_markdown(sample_paper, output_file)
|
||||
|
||||
# 使用默认输出路径(与输入文件同名但扩展名为.md)
|
||||
extract_and_save_as_markdown(sample_paper)
|
||||
|
||||
# # 批量处理多个论文的示例
|
||||
# paper_dir = Path("/path/to/papers/folder")
|
||||
# output_dir = Path("/path/to/output/folder")
|
||||
#
|
||||
# # 确保输出目录存在
|
||||
# output_dir.mkdir(parents=True, exist_ok=True)
|
||||
#
|
||||
# # 处理目录中的所有PDF文件
|
||||
# for paper_file in paper_dir.glob("*.pdf"):
|
||||
# output_file = output_dir / f"{paper_file.stem}.md"
|
||||
# extract_and_save_as_markdown(paper_file, output_file)
|
||||
@@ -0,0 +1,275 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional, Set, Dict, Union, List
|
||||
from dataclasses import dataclass, field
|
||||
import logging
|
||||
import os
|
||||
|
||||
from unstructured.partition.auto import partition
|
||||
from unstructured.documents.elements import (
|
||||
Text, Title, NarrativeText, ListItem, Table,
|
||||
Footer, Header, PageBreak, Image, Address
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TextExtractorConfig:
|
||||
"""通用文档提取器配置类
|
||||
|
||||
Attributes:
|
||||
extract_headers_footers: 是否提取页眉页脚
|
||||
extract_tables: 是否提取表格内容
|
||||
extract_lists: 是否提取列表内容
|
||||
extract_titles: 是否提取标题
|
||||
paragraph_separator: 段落之间的分隔符
|
||||
text_cleanup: 文本清理选项字典
|
||||
"""
|
||||
extract_headers_footers: bool = False
|
||||
extract_tables: bool = True
|
||||
extract_lists: bool = True
|
||||
extract_titles: bool = True
|
||||
paragraph_separator: str = '\n\n'
|
||||
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
|
||||
'remove_extra_spaces': True,
|
||||
'normalize_whitespace': True,
|
||||
'remove_special_chars': False,
|
||||
'lowercase': False
|
||||
})
|
||||
|
||||
|
||||
class UnstructuredTextExtractor:
|
||||
"""通用文档文本内容提取器
|
||||
|
||||
使用 unstructured 库支持多种文档格式的文本提取,提供统一的接口和配置选项。
|
||||
"""
|
||||
|
||||
SUPPORTED_EXTENSIONS: Set[str] = {
|
||||
# 文档格式
|
||||
'.pdf', '.docx', '.doc', '.txt',
|
||||
# 演示文稿
|
||||
'.ppt', '.pptx',
|
||||
# 电子表格
|
||||
'.xlsx', '.xls', '.csv',
|
||||
# 图片
|
||||
'.png', '.jpg', '.jpeg', '.tiff',
|
||||
# 邮件
|
||||
'.eml', '.msg', '.p7s',
|
||||
# Markdown
|
||||
".md",
|
||||
# Org Mode
|
||||
".org",
|
||||
# Open Office
|
||||
".odt",
|
||||
# reStructured Text
|
||||
".rst",
|
||||
# Rich Text
|
||||
".rtf",
|
||||
# TSV
|
||||
".tsv",
|
||||
# EPUB
|
||||
'.epub',
|
||||
# 其他格式
|
||||
'.html', '.xml', '.json',
|
||||
}
|
||||
|
||||
def __init__(self, config: Optional[TextExtractorConfig] = None):
|
||||
"""初始化提取器
|
||||
|
||||
Args:
|
||||
config: 提取器配置对象,如果为None则使用默认配置
|
||||
"""
|
||||
self.config = config or TextExtractorConfig()
|
||||
self._setup_logging()
|
||||
|
||||
def _setup_logging(self) -> None:
|
||||
"""配置日志记录器"""
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
# 添加文件处理器
|
||||
fh = logging.FileHandler('text_extractor.log')
|
||||
fh.setLevel(logging.ERROR)
|
||||
self.logger.addHandler(fh)
|
||||
|
||||
def _validate_file(self, file_path: Union[str, Path], max_size_mb: int = 100) -> Path:
|
||||
"""验证文件
|
||||
|
||||
Args:
|
||||
file_path: 文件路径
|
||||
max_size_mb: 允许的最大文件大小(MB)
|
||||
|
||||
Returns:
|
||||
Path: 验证后的Path对象
|
||||
|
||||
Raises:
|
||||
ValueError: 文件不存在、格式不支持或大小超限
|
||||
PermissionError: 没有读取权限
|
||||
"""
|
||||
path = Path(file_path).resolve()
|
||||
|
||||
if not path.exists():
|
||||
raise ValueError(f"File not found: {path}")
|
||||
|
||||
if not path.is_file():
|
||||
raise ValueError(f"Not a file: {path}")
|
||||
|
||||
if not os.access(path, os.R_OK):
|
||||
raise PermissionError(f"No read permission: {path}")
|
||||
|
||||
file_size_mb = path.stat().st_size / (1024 * 1024)
|
||||
if file_size_mb > max_size_mb:
|
||||
raise ValueError(
|
||||
f"File size ({file_size_mb:.1f}MB) exceeds limit of {max_size_mb}MB"
|
||||
)
|
||||
|
||||
if path.suffix.lower() not in self.SUPPORTED_EXTENSIONS:
|
||||
raise ValueError(
|
||||
f"Unsupported format: {path.suffix}. "
|
||||
f"Supported: {', '.join(sorted(self.SUPPORTED_EXTENSIONS))}"
|
||||
)
|
||||
|
||||
return path
|
||||
|
||||
def _cleanup_text(self, text: str) -> str:
|
||||
"""清理文本
|
||||
|
||||
Args:
|
||||
text: 原始文本
|
||||
|
||||
Returns:
|
||||
str: 清理后的文本
|
||||
"""
|
||||
if self.config.text_cleanup['remove_extra_spaces']:
|
||||
text = ' '.join(text.split())
|
||||
|
||||
if self.config.text_cleanup['normalize_whitespace']:
|
||||
text = text.replace('\t', ' ').replace('\r', '\n')
|
||||
|
||||
if self.config.text_cleanup['lowercase']:
|
||||
text = text.lower()
|
||||
|
||||
return text.strip()
|
||||
|
||||
def _should_extract_element(self, element) -> bool:
|
||||
"""判断是否应该提取某个元素
|
||||
|
||||
Args:
|
||||
element: 文档元素
|
||||
|
||||
Returns:
|
||||
bool: 是否应该提取
|
||||
"""
|
||||
if isinstance(element, (Text, NarrativeText)):
|
||||
return True
|
||||
|
||||
if isinstance(element, Title) and self.config.extract_titles:
|
||||
return True
|
||||
|
||||
if isinstance(element, ListItem) and self.config.extract_lists:
|
||||
return True
|
||||
|
||||
if isinstance(element, Table) and self.config.extract_tables:
|
||||
return True
|
||||
|
||||
if isinstance(element, (Header, Footer)) and self.config.extract_headers_footers:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_supported_formats() -> List[str]:
|
||||
"""获取支持的文件格式列表"""
|
||||
return sorted(UnstructuredTextExtractor.SUPPORTED_EXTENSIONS)
|
||||
|
||||
def extract_text(
|
||||
self,
|
||||
file_path: Union[str, Path],
|
||||
strategy: str = "fast"
|
||||
) -> str:
|
||||
"""提取文本
|
||||
|
||||
Args:
|
||||
file_path: 文件路径
|
||||
strategy: 提取策略 ("fast" 或 "accurate")
|
||||
|
||||
Returns:
|
||||
str: 提取的文本内容
|
||||
|
||||
Raises:
|
||||
Exception: 提取过程中的错误
|
||||
"""
|
||||
try:
|
||||
path = self._validate_file(file_path)
|
||||
self.logger.info(f"Processing: {path}")
|
||||
|
||||
# 修改这里:添加 nlp=False 参数来禁用 NLTK
|
||||
elements = partition(
|
||||
str(path),
|
||||
strategy=strategy,
|
||||
include_metadata=True,
|
||||
nlp=True,
|
||||
)
|
||||
|
||||
# 其余代码保持不变
|
||||
text_parts = []
|
||||
for element in elements:
|
||||
if self._should_extract_element(element):
|
||||
text = str(element)
|
||||
cleaned_text = self._cleanup_text(text)
|
||||
if cleaned_text:
|
||||
if isinstance(element, (Header, Footer)):
|
||||
prefix = "[Header] " if isinstance(element, Header) else "[Footer] "
|
||||
text_parts.append(f"{prefix}{cleaned_text}")
|
||||
else:
|
||||
text_parts.append(cleaned_text)
|
||||
|
||||
return self.config.paragraph_separator.join(text_parts)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Extraction failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
"""主函数:演示用法"""
|
||||
# 配置
|
||||
config = TextExtractorConfig(
|
||||
extract_headers_footers=True,
|
||||
extract_tables=True,
|
||||
extract_lists=True,
|
||||
extract_titles=True,
|
||||
text_cleanup={
|
||||
'remove_extra_spaces': True,
|
||||
'normalize_whitespace': True,
|
||||
'remove_special_chars': False,
|
||||
'lowercase': False
|
||||
}
|
||||
)
|
||||
|
||||
# 创建提取器
|
||||
extractor = UnstructuredTextExtractor(config)
|
||||
|
||||
# 使用示例
|
||||
try:
|
||||
# 替换为实际的文件路径
|
||||
sample_file = './crazy_functions/doc_fns/read_fns/paper/2501.12599v1.pdf'
|
||||
if Path(sample_file).exists() or True:
|
||||
text = extractor.extract_text(sample_file)
|
||||
print("提取的文本:")
|
||||
print(text)
|
||||
else:
|
||||
print(f"示例文件 {sample_file} 不存在")
|
||||
|
||||
print("\n支持的格式:", extractor.get_supported_formats())
|
||||
|
||||
except Exception as e:
|
||||
print(f"错误: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
219
crazy_functions/doc_fns/read_fns/web_reader.py
Normal file
219
crazy_functions/doc_fns/read_fns/web_reader.py
Normal file
@@ -0,0 +1,219 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
import logging
|
||||
import trafilatura
|
||||
import requests
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@dataclass
|
||||
class WebExtractorConfig:
|
||||
"""网页内容提取器配置类
|
||||
|
||||
Attributes:
|
||||
extract_comments: 是否提取评论
|
||||
extract_tables: 是否提取表格
|
||||
extract_links: 是否保留链接信息
|
||||
paragraph_separator: 段落分隔符
|
||||
timeout: 网络请求超时时间(秒)
|
||||
max_retries: 最大重试次数
|
||||
user_agent: 自定义User-Agent
|
||||
text_cleanup: 文本清理选项
|
||||
"""
|
||||
extract_comments: bool = False
|
||||
extract_tables: bool = True
|
||||
extract_links: bool = False
|
||||
paragraph_separator: str = '\n\n'
|
||||
timeout: int = 10
|
||||
max_retries: int = 3
|
||||
user_agent: str = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
|
||||
text_cleanup: Dict[str, bool] = field(default_factory=lambda: {
|
||||
'remove_extra_spaces': True,
|
||||
'normalize_whitespace': True,
|
||||
'remove_special_chars': False,
|
||||
'lowercase': False
|
||||
})
|
||||
|
||||
|
||||
class WebTextExtractor:
|
||||
"""网页文本内容提取器
|
||||
|
||||
使用trafilatura库提取网页中的主要文本内容,去除广告、导航等无关内容。
|
||||
"""
|
||||
|
||||
def __init__(self, config: Optional[WebExtractorConfig] = None):
|
||||
"""初始化提取器
|
||||
|
||||
Args:
|
||||
config: 提取器配置对象,如果为None则使用默认配置
|
||||
"""
|
||||
self.config = config or WebExtractorConfig()
|
||||
self._setup_logging()
|
||||
|
||||
def _setup_logging(self) -> None:
|
||||
"""配置日志记录器"""
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
# 添加文件处理器
|
||||
fh = logging.FileHandler('web_extractor.log')
|
||||
fh.setLevel(logging.ERROR)
|
||||
self.logger.addHandler(fh)
|
||||
|
||||
def _validate_url(self, url: str) -> bool:
|
||||
"""验证URL格式是否有效
|
||||
|
||||
Args:
|
||||
url: 网页URL
|
||||
|
||||
Returns:
|
||||
bool: URL是否有效
|
||||
"""
|
||||
try:
|
||||
result = urlparse(url)
|
||||
return all([result.scheme, result.netloc])
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def _download_webpage(self, url: str) -> Optional[str]:
|
||||
"""下载网页内容
|
||||
|
||||
Args:
|
||||
url: 网页URL
|
||||
|
||||
Returns:
|
||||
Optional[str]: 网页HTML内容,失败返回None
|
||||
|
||||
Raises:
|
||||
Exception: 下载失败时抛出异常
|
||||
"""
|
||||
headers = {'User-Agent': self.config.user_agent}
|
||||
|
||||
for attempt in range(self.config.max_retries):
|
||||
try:
|
||||
response = requests.get(
|
||||
url,
|
||||
headers=headers,
|
||||
timeout=self.config.timeout
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.text
|
||||
except requests.RequestException as e:
|
||||
self.logger.warning(f"Attempt {attempt + 1} failed: {e}")
|
||||
if attempt == self.config.max_retries - 1:
|
||||
raise Exception(f"Failed to download webpage after {self.config.max_retries} attempts: {e}")
|
||||
return None
|
||||
|
||||
def _cleanup_text(self, text: str) -> str:
|
||||
"""清理文本
|
||||
|
||||
Args:
|
||||
text: 原始文本
|
||||
|
||||
Returns:
|
||||
str: 清理后的文本
|
||||
"""
|
||||
if not text:
|
||||
return ""
|
||||
|
||||
if self.config.text_cleanup['remove_extra_spaces']:
|
||||
text = ' '.join(text.split())
|
||||
|
||||
if self.config.text_cleanup['normalize_whitespace']:
|
||||
text = text.replace('\t', ' ').replace('\r', '\n')
|
||||
|
||||
if self.config.text_cleanup['lowercase']:
|
||||
text = text.lower()
|
||||
|
||||
return text.strip()
|
||||
|
||||
def extract_text(self, url: str) -> str:
|
||||
"""提取网页文本内容
|
||||
|
||||
Args:
|
||||
url: 网页URL
|
||||
|
||||
Returns:
|
||||
str: 提取的文本内容
|
||||
|
||||
Raises:
|
||||
ValueError: URL无效时抛出
|
||||
Exception: 提取失败时抛出
|
||||
"""
|
||||
try:
|
||||
if not self._validate_url(url):
|
||||
raise ValueError(f"Invalid URL: {url}")
|
||||
|
||||
self.logger.info(f"Processing URL: {url}")
|
||||
|
||||
# 下载网页
|
||||
html_content = self._download_webpage(url)
|
||||
if not html_content:
|
||||
raise Exception("Failed to download webpage")
|
||||
|
||||
# 配置trafilatura提取选项
|
||||
extract_config = {
|
||||
'include_comments': self.config.extract_comments,
|
||||
'include_tables': self.config.extract_tables,
|
||||
'include_links': self.config.extract_links,
|
||||
'no_fallback': False, # 允许使用后备提取器
|
||||
}
|
||||
|
||||
# 提取文本
|
||||
extracted_text = trafilatura.extract(
|
||||
html_content,
|
||||
**extract_config
|
||||
)
|
||||
|
||||
if not extracted_text:
|
||||
raise Exception("No content could be extracted")
|
||||
|
||||
# 清理文本
|
||||
cleaned_text = self._cleanup_text(extracted_text)
|
||||
|
||||
return cleaned_text
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Extraction failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def main():
|
||||
"""主函数:演示用法"""
|
||||
# 配置
|
||||
config = WebExtractorConfig(
|
||||
extract_comments=False,
|
||||
extract_tables=True,
|
||||
extract_links=False,
|
||||
timeout=10,
|
||||
text_cleanup={
|
||||
'remove_extra_spaces': True,
|
||||
'normalize_whitespace': True,
|
||||
'remove_special_chars': False,
|
||||
'lowercase': False
|
||||
}
|
||||
)
|
||||
|
||||
# 创建提取器
|
||||
extractor = WebTextExtractor(config)
|
||||
|
||||
# 使用示例
|
||||
try:
|
||||
# 替换为实际的URL
|
||||
sample_url = 'https://arxiv.org/abs/2412.00036'
|
||||
text = extractor.extract_text(sample_url)
|
||||
print("提取的文本:")
|
||||
print(text)
|
||||
|
||||
except Exception as e:
|
||||
print(f"错误: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
2
main.py
2
main.py
@@ -31,7 +31,7 @@ def encode_plugin_info(k, plugin)->str:
|
||||
|
||||
def main():
|
||||
import gradio as gr
|
||||
if gr.__version__ not in ['3.32.12']:
|
||||
if gr.__version__ not in ['3.32.15']:
|
||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||
|
||||
# 一些基础工具
|
||||
|
||||
@@ -80,6 +80,7 @@ ollama_endpoint = "http://localhost:11434/api/chat"
|
||||
yimodel_endpoint = "https://api.lingyiwanwu.com/v1/chat/completions"
|
||||
deepseekapi_endpoint = "https://api.deepseek.com/v1/chat/completions"
|
||||
grok_model_endpoint = "https://api.x.ai/v1/chat/completions"
|
||||
volcengine_endpoint = "https://ark.cn-beijing.volces.com/api/v3/chat/completions"
|
||||
|
||||
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
|
||||
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
|
||||
@@ -102,6 +103,7 @@ if ollama_endpoint in API_URL_REDIRECT: ollama_endpoint = API_URL_REDIRECT[ollam
|
||||
if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint]
|
||||
if deepseekapi_endpoint in API_URL_REDIRECT: deepseekapi_endpoint = API_URL_REDIRECT[deepseekapi_endpoint]
|
||||
if grok_model_endpoint in API_URL_REDIRECT: grok_model_endpoint = API_URL_REDIRECT[grok_model_endpoint]
|
||||
if volcengine_endpoint in API_URL_REDIRECT: volcengine_endpoint = API_URL_REDIRECT[volcengine_endpoint]
|
||||
|
||||
# 获取tokenizer
|
||||
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
||||
@@ -280,6 +282,7 @@ model_info = {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"endpoint": openai_endpoint,
|
||||
"can_multi_thread": True,
|
||||
"max_token": 128000,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
@@ -312,6 +315,50 @@ model_info = {
|
||||
"openai_force_temperature_one": True,
|
||||
},
|
||||
|
||||
"gpt-4.1":{
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"has_multimodal_capacity": True,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 828000,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
"gpt-4.1-mini":{
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"has_multimodal_capacity": True,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 828000,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
"o3":{
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"has_multimodal_capacity": True,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 828000,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
"openai_disable_system_prompt": True,
|
||||
"openai_disable_stream": True,
|
||||
"openai_force_temperature_one": True,
|
||||
},
|
||||
|
||||
"o4-mini":{
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"has_multimodal_capacity": True,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": openai_endpoint,
|
||||
"max_token": 828000,
|
||||
"tokenizer": tokenizer_gpt4,
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
"gpt-4-turbo": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
@@ -528,6 +575,15 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"gemini-2.0-flash": {
|
||||
"fn_with_ui": genai_ui,
|
||||
"fn_without_ui": genai_noui,
|
||||
"endpoint": gemini_endpoint,
|
||||
"has_multimodal_capacity": True,
|
||||
"max_token": 1024 * 204800,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
# cohere
|
||||
"cohere-command-r-plus": {
|
||||
@@ -811,8 +867,12 @@ if "qwen-local" in AVAIL_LLM_MODELS:
|
||||
})
|
||||
except:
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 通义-在线模型 -=-=-=-=-=-=-
|
||||
qwen_models = ["qwen-max-latest", "qwen-max-2025-01-25","qwen-max","qwen-turbo","qwen-plus"]
|
||||
|
||||
# -=-=-=-=-=-=- 阿里云百炼(通义)-在线模型 -=-=-=-=-=-=-
|
||||
qwen_models = ["qwen-max-latest", "qwen-max-2025-01-25","qwen-max","qwen-turbo","qwen-plus",
|
||||
"dashscope-deepseek-r1","dashscope-deepseek-v3",
|
||||
"dashscope-qwen3-14b", "dashscope-qwen3-235b-a22b", "dashscope-qwen3-qwen3-32b",
|
||||
]
|
||||
if any(item in qwen_models for item in AVAIL_LLM_MODELS):
|
||||
try:
|
||||
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
||||
@@ -862,10 +922,58 @@ if any(item in qwen_models for item in AVAIL_LLM_MODELS):
|
||||
"max_token": 30720,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"dashscope-deepseek-r1": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"enable_reasoning": True,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": None,
|
||||
"max_token": 57344,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"dashscope-deepseek-v3": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": None,
|
||||
"max_token": 57344,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"dashscope-qwen3-14b": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"enable_reasoning": True,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": None,
|
||||
"max_token": 129024,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"dashscope-qwen3-235b-a22b": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": None,
|
||||
"max_token": 129024,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"dashscope-qwen3-32b": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": None,
|
||||
"max_token": 129024,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
})
|
||||
except:
|
||||
logger.error(trimmed_format_exc())
|
||||
|
||||
# -=-=-=-=-=-=- 零一万物模型 -=-=-=-=-=-=-
|
||||
yi_models = ["yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview"]
|
||||
if any(item in yi_models for item in AVAIL_LLM_MODELS):
|
||||
@@ -1089,8 +1197,10 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||
})
|
||||
except:
|
||||
logger.error(trimmed_format_exc())
|
||||
|
||||
# -=-=-=-=-=-=- 幻方-深度求索大模型在线API -=-=-=-=-=-=-
|
||||
if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS or "deepseek-reasoner" in AVAIL_LLM_MODELS:
|
||||
claude_models = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner"]
|
||||
if any(item in claude_models for item in AVAIL_LLM_MODELS):
|
||||
try:
|
||||
deepseekapi_noui, deepseekapi_ui = get_predict_function(
|
||||
api_key_conf_name="DEEPSEEK_API_KEY", max_output_token=4096, disable_proxy=False
|
||||
@@ -1127,6 +1237,60 @@ if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS o
|
||||
})
|
||||
except:
|
||||
logger.error(trimmed_format_exc())
|
||||
|
||||
# -=-=-=-=-=-=- 火山引擎 对齐支持 -=-=-=-=-=-=-
|
||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("volcengine-")]:
|
||||
# 为了更灵活地接入volcengine多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["volcengine-deepseek-r1-250120(max_token=6666)"]
|
||||
# 其中
|
||||
# "volcengine-" 是前缀(必要)
|
||||
# "deepseek-r1-250120" 是模型名(必要)
|
||||
# "(max_token=6666)" 是配置(非必要)
|
||||
model_info_extend = model_info
|
||||
model_info_extend.update({
|
||||
"deepseek-r1-250120": {
|
||||
"max_token": 16384,
|
||||
"enable_reasoning": True,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": volcengine_endpoint,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
"deepseek-v3-241226": {
|
||||
"max_token": 16384,
|
||||
"enable_reasoning": False,
|
||||
"can_multi_thread": True,
|
||||
"endpoint": volcengine_endpoint,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
})
|
||||
try:
|
||||
origin_model_name, max_token_tmp = read_one_api_model_name(model)
|
||||
# 如果是已知模型,则尝试获取其信息
|
||||
original_model_info = model_info_extend.get(origin_model_name.replace("volcengine-", "", 1), None)
|
||||
except:
|
||||
logger.error(f"volcengine模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
|
||||
continue
|
||||
|
||||
volcengine_noui, volcengine_ui = get_predict_function(api_key_conf_name="ARK_API_KEY", max_output_token=8192, disable_proxy=True, model_remove_prefix = ["volcengine-"])
|
||||
|
||||
this_model_info = {
|
||||
"fn_with_ui": volcengine_ui,
|
||||
"fn_without_ui": volcengine_noui,
|
||||
"endpoint": volcengine_endpoint,
|
||||
"can_multi_thread": True,
|
||||
"max_token": 64000,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
|
||||
# 同步已知模型的其他信息
|
||||
attribute = "has_multimodal_capacity"
|
||||
if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)})
|
||||
attribute = "enable_reasoning"
|
||||
if original_model_info is not None and original_model_info.get(attribute, None) is not None: this_model_info.update({attribute: original_model_info.get(attribute, None)})
|
||||
model_info.update({model: this_model_info})
|
||||
|
||||
# -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=-
|
||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]:
|
||||
# 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"]
|
||||
|
||||
@@ -368,12 +368,12 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
|
||||
break # 对于符合规范的接口,这里可以break
|
||||
else:
|
||||
continue # 对于不符合规范的狗屎接口,这里需要继续
|
||||
continue # 对于不符合规范的接口,这里需要继续
|
||||
# 到这里,我们已经可以假定必须包含choice了
|
||||
try:
|
||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||
except:
|
||||
logger.error(f"一些垃圾第三方接口出现这样的错误,兼容一下吧: {chunk_decoded}")
|
||||
logger.error(f"一些第三方接口出现这样的错误,兼容一下吧: {chunk_decoded}")
|
||||
# 处理数据流的主体
|
||||
if has_content:
|
||||
# 正常情况
|
||||
@@ -382,9 +382,9 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
||||
# 一些第三方接口的出现这样的错误,兼容一下吧
|
||||
continue
|
||||
else:
|
||||
# 至此已经超出了正常接口应该进入的范围,一些垃圾第三方接口会出现这样的错误
|
||||
# 至此已经超出了正常接口应该进入的范围,一些第三方接口会出现这样的错误
|
||||
if chunkjson['choices'][0]["delta"].get("content", None) is None:
|
||||
logger.error(f"一些垃圾第三方接口出现这样的错误,兼容一下吧: {chunk_decoded}")
|
||||
logger.error(f"一些第三方接口出现这样的错误,兼容一下吧: {chunk_decoded}")
|
||||
continue
|
||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ from toolbox import get_conf
|
||||
import threading
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
|
||||
model_prefix_to_remove = 'dashscope-'
|
||||
|
||||
class QwenRequestInstance():
|
||||
def __init__(self):
|
||||
@@ -20,6 +21,13 @@ class QwenRequestInstance():
|
||||
raise RuntimeError('请配置 DASHSCOPE_API_KEY')
|
||||
dashscope.api_key = get_conf("DASHSCOPE_API_KEY")
|
||||
|
||||
def format_reasoning(self, reasoning_content:str, main_content:str):
|
||||
if reasoning_content:
|
||||
reasoning_content_paragraphs = ''.join([f'<p style="margin: 1.25em 0;">{line}</p>' for line in reasoning_content.split('\n')])
|
||||
formatted_reasoning_content = f'<div class="reasoning_process">{reasoning_content_paragraphs}</div>\n\n---\n\n'
|
||||
return formatted_reasoning_content + main_content
|
||||
else:
|
||||
return main_content
|
||||
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt):
|
||||
# import _thread as thread
|
||||
@@ -28,9 +36,13 @@ class QwenRequestInstance():
|
||||
if top_p == 0: top_p += 1e-5
|
||||
if top_p == 1: top_p -= 1e-5
|
||||
|
||||
model_name = llm_kwargs['llm_model']
|
||||
if model_name.startswith(model_prefix_to_remove): model_name = model_name[len(model_prefix_to_remove):]
|
||||
|
||||
self.reasoning_buf = ""
|
||||
self.result_buf = ""
|
||||
responses = Generation.call(
|
||||
model=llm_kwargs['llm_model'],
|
||||
model=model_name,
|
||||
messages=generate_message_payload(inputs, llm_kwargs, history, system_prompt),
|
||||
top_p=top_p,
|
||||
temperature=llm_kwargs.get('temperature', 1.0),
|
||||
@@ -46,18 +58,24 @@ class QwenRequestInstance():
|
||||
self.result_buf += response.output.choices[0].message.content
|
||||
except:
|
||||
pass
|
||||
yield self.result_buf
|
||||
yield self.format_reasoning(self.reasoning_buf, self.result_buf)
|
||||
break
|
||||
elif response.output.choices[0].finish_reason == 'length':
|
||||
self.result_buf += "[Local Message] 生成长度过长,后续输出被截断"
|
||||
yield self.result_buf
|
||||
yield self.format_reasoning(self.reasoning_buf, self.result_buf)
|
||||
break
|
||||
else:
|
||||
try:
|
||||
contain_reasoning = hasattr(response.output.choices[0].message, 'reasoning_content')
|
||||
except:
|
||||
contain_reasoning = False
|
||||
if contain_reasoning:
|
||||
self.reasoning_buf += response.output.choices[0].message.reasoning_content
|
||||
self.result_buf += response.output.choices[0].message.content
|
||||
yield self.result_buf
|
||||
yield self.format_reasoning(self.reasoning_buf, self.result_buf)
|
||||
else:
|
||||
self.result_buf += f"[Local Message] 请求错误:状态码:{response.status_code},错误码:{response.code},消息:{response.message}"
|
||||
yield self.result_buf
|
||||
yield self.format_reasoning(self.reasoning_buf, self.result_buf)
|
||||
break
|
||||
|
||||
# 耗尽generator避免报错
|
||||
|
||||
@@ -57,7 +57,7 @@ def decode_chunk(chunk):
|
||||
finish_reason = chunk["error"]["code"]
|
||||
except:
|
||||
finish_reason = "API_ERROR"
|
||||
return response, reasoning_content, finish_reason
|
||||
return response, reasoning_content, finish_reason, str(chunk)
|
||||
|
||||
try:
|
||||
if chunk["choices"][0]["delta"]["content"] is not None:
|
||||
@@ -122,7 +122,8 @@ def generate_message(input, model, key, history, max_output_token, system_prompt
|
||||
def get_predict_function(
|
||||
api_key_conf_name,
|
||||
max_output_token,
|
||||
disable_proxy = False
|
||||
disable_proxy = False,
|
||||
model_remove_prefix = [],
|
||||
):
|
||||
"""
|
||||
为openai格式的API生成响应函数,其中传入参数:
|
||||
@@ -137,6 +138,16 @@ def get_predict_function(
|
||||
|
||||
APIKEY = get_conf(api_key_conf_name)
|
||||
|
||||
def remove_prefix(model_name):
|
||||
# 去除模型名字的前缀,输入 volcengine-deepseek-r1-250120 会返回 deepseek-r1-250120
|
||||
if not model_remove_prefix:
|
||||
return model_name
|
||||
model_without_prefix = model_name
|
||||
for prefix in model_remove_prefix:
|
||||
if model_without_prefix.startswith(prefix):
|
||||
model_without_prefix = model_without_prefix[len(prefix):]
|
||||
return model_without_prefix
|
||||
|
||||
def predict_no_ui_long_connection(
|
||||
inputs,
|
||||
llm_kwargs,
|
||||
@@ -164,9 +175,11 @@ def get_predict_function(
|
||||
raise RuntimeError(f"APIKEY为空,请检查配置文件的{APIKEY}")
|
||||
if inputs == "":
|
||||
inputs = "你好👋"
|
||||
|
||||
|
||||
headers, payload = generate_message(
|
||||
input=inputs,
|
||||
model=llm_kwargs["llm_model"],
|
||||
model=remove_prefix(llm_kwargs["llm_model"]),
|
||||
key=APIKEY,
|
||||
history=history,
|
||||
max_output_token=max_output_token,
|
||||
@@ -302,7 +315,7 @@ def get_predict_function(
|
||||
|
||||
headers, payload = generate_message(
|
||||
input=inputs,
|
||||
model=llm_kwargs["llm_model"],
|
||||
model=remove_prefix(llm_kwargs["llm_model"]),
|
||||
key=APIKEY,
|
||||
history=history,
|
||||
max_output_token=max_output_token,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
https://public.agent-matrix.com/publish/gradio-3.32.12-py3-none-any.whl
|
||||
https://public.agent-matrix.com/publish/gradio-3.32.15-py3-none-any.whl
|
||||
fastapi==0.110
|
||||
gradio-client==0.8
|
||||
pypdf2==2.12.1
|
||||
|
||||
@@ -3,7 +3,8 @@ import re
|
||||
import os
|
||||
import math
|
||||
import html
|
||||
|
||||
import base64
|
||||
import gzip
|
||||
from loguru import logger
|
||||
from textwrap import dedent
|
||||
from functools import lru_cache
|
||||
@@ -325,6 +326,14 @@ def markdown_convertion_for_file(txt):
|
||||
# cat them together
|
||||
return pre + convert_stage_5 + suf
|
||||
|
||||
def compress_string(s):
|
||||
compress_string = gzip.compress(s.encode('utf-8'))
|
||||
return base64.b64encode(compress_string).decode()
|
||||
|
||||
def decompress_string(s):
|
||||
decoded_string = base64.b64decode(s)
|
||||
return gzip.decompress(decoded_string).decode('utf-8')
|
||||
|
||||
@lru_cache(maxsize=128) # 使用 lru缓存 加快转换速度
|
||||
def markdown_convertion(txt):
|
||||
"""
|
||||
@@ -336,6 +345,12 @@ def markdown_convertion(txt):
|
||||
# print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
|
||||
return txt # 已经被转化过,不需要再次转化
|
||||
|
||||
# 在文本中插入一个base64编码的原始文本,以便在复制时能够获得原始文本
|
||||
raw_text_encoded = compress_string(txt)
|
||||
raw_text_node = f'<div class="raw_text" style="display:none">{raw_text_encoded}</div><div class="message_tail" style="display:none"/>'
|
||||
suf = raw_text_node + "</div>"
|
||||
|
||||
# 用于查找数学公式的正则表达式
|
||||
find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
|
||||
|
||||
txt = fix_markdown_indent(txt)
|
||||
@@ -493,6 +508,7 @@ def simple_markdown_convertion(text):
|
||||
suf = "</div>"
|
||||
if text.startswith(pre) and text.endswith(suf):
|
||||
return text # 已经被转化过,不需要再次转化
|
||||
|
||||
text = compat_non_markdown_input(text) # 兼容非markdown输入
|
||||
text = markdown.markdown(
|
||||
text,
|
||||
|
||||
296
shared_utils/context_clip_policy.py
Normal file
296
shared_utils/context_clip_policy.py
Normal file
@@ -0,0 +1,296 @@
|
||||
import copy
|
||||
from shared_utils.config_loader import get_conf
|
||||
|
||||
def get_token_num(txt, tokenizer):
|
||||
return len(tokenizer.encode(txt, disallowed_special=()))
|
||||
|
||||
def get_model_info():
|
||||
from request_llms.bridge_all import model_info
|
||||
return model_info
|
||||
|
||||
def clip_history(inputs, history, tokenizer, max_token_limit):
|
||||
"""
|
||||
reduce the length of history by clipping.
|
||||
this function search for the longest entries to clip, little by little,
|
||||
until the number of token of history is reduced under threshold.
|
||||
|
||||
通过裁剪来缩短历史记录的长度。
|
||||
此函数逐渐地搜索最长的条目进行剪辑,
|
||||
直到历史记录的标记数量降低到阈值以下。
|
||||
|
||||
被动触发裁剪
|
||||
"""
|
||||
import numpy as np
|
||||
|
||||
input_token_num = get_token_num(inputs)
|
||||
|
||||
if max_token_limit < 5000:
|
||||
output_token_expect = 256 # 4k & 2k models
|
||||
elif max_token_limit < 9000:
|
||||
output_token_expect = 512 # 8k models
|
||||
else:
|
||||
output_token_expect = 1024 # 16k & 32k models
|
||||
|
||||
if input_token_num < max_token_limit * 3 / 4:
|
||||
# 当输入部分的token占比小于限制的3/4时,裁剪时
|
||||
# 1. 把input的余量留出来
|
||||
max_token_limit = max_token_limit - input_token_num
|
||||
# 2. 把输出用的余量留出来
|
||||
max_token_limit = max_token_limit - output_token_expect
|
||||
# 3. 如果余量太小了,直接清除历史
|
||||
if max_token_limit < output_token_expect:
|
||||
history = []
|
||||
return history
|
||||
else:
|
||||
# 当输入部分的token占比 > 限制的3/4时,直接清除历史
|
||||
history = []
|
||||
return history
|
||||
|
||||
everything = [""]
|
||||
everything.extend(history)
|
||||
n_token = get_token_num("\n".join(everything))
|
||||
everything_token = [get_token_num(e) for e in everything]
|
||||
|
||||
# 截断时的颗粒度
|
||||
delta = max(everything_token) // 16
|
||||
|
||||
while n_token > max_token_limit:
|
||||
where = np.argmax(everything_token)
|
||||
encoded = tokenizer.encode(everything[where], disallowed_special=())
|
||||
clipped_encoded = encoded[: len(encoded) - delta]
|
||||
everything[where] = tokenizer.decode(clipped_encoded)[
|
||||
:-1
|
||||
] # -1 to remove the may-be illegal char
|
||||
everything_token[where] = get_token_num(everything[where])
|
||||
n_token = get_token_num("\n".join(everything))
|
||||
|
||||
history = everything[1:]
|
||||
return history
|
||||
|
||||
|
||||
|
||||
def auto_context_clip_each_message(current, history):
|
||||
"""
|
||||
clip_history 是被动触发的
|
||||
|
||||
主动触发裁剪
|
||||
"""
|
||||
context = history + [current]
|
||||
trigger_clip_token_len = get_conf('AUTO_CONTEXT_CLIP_TRIGGER_TOKEN_LEN')
|
||||
model_info = get_model_info()
|
||||
tokenizer = model_info['gpt-4']['tokenizer']
|
||||
# 只保留最近的128条记录,无论token长度,防止计算token时计算过长的时间
|
||||
max_round = get_conf('AUTO_CONTEXT_MAX_ROUND')
|
||||
char_len = sum([len(h) for h in context])
|
||||
if char_len < trigger_clip_token_len*2:
|
||||
# 不需要裁剪
|
||||
history = context[:-1]
|
||||
current = context[-1]
|
||||
return current, history
|
||||
if len(context) > max_round:
|
||||
context = context[-max_round:]
|
||||
# 计算各个历史记录的token长度
|
||||
context_token_num = [get_token_num(h, tokenizer) for h in context]
|
||||
context_token_num_old = copy.copy(context_token_num)
|
||||
total_token_num = total_token_num_old = sum(context_token_num)
|
||||
if total_token_num < trigger_clip_token_len:
|
||||
# 不需要裁剪
|
||||
history = context[:-1]
|
||||
current = context[-1]
|
||||
return current, history
|
||||
clip_token_len = trigger_clip_token_len * 0.85
|
||||
# 越长越先被裁,越靠后越先被裁
|
||||
max_clip_ratio: list[float] = get_conf('AUTO_CONTEXT_MAX_CLIP_RATIO')
|
||||
max_clip_ratio = list(reversed(max_clip_ratio))
|
||||
if len(context) > len(max_clip_ratio):
|
||||
# give up the oldest context
|
||||
context = context[-len(max_clip_ratio):]
|
||||
context_token_num = context_token_num[-len(max_clip_ratio):]
|
||||
if len(context) < len(max_clip_ratio):
|
||||
# match the length of two array
|
||||
max_clip_ratio = max_clip_ratio[-len(context):]
|
||||
|
||||
# compute rank
|
||||
clip_prior_weight = [(token_num/clip_token_len + (len(context) - index)*0.1) for index, token_num in enumerate(context_token_num)]
|
||||
# print('clip_prior_weight', clip_prior_weight)
|
||||
# get sorted index of context_token_num, from largest to smallest
|
||||
sorted_index = sorted(range(len(context_token_num)), key=lambda k: clip_prior_weight[k], reverse=True)
|
||||
|
||||
# pre compute space yield
|
||||
for index in sorted_index:
|
||||
print('index', index, f'current total {total_token_num}, target {clip_token_len}')
|
||||
if total_token_num < clip_token_len:
|
||||
# no need to clip
|
||||
break
|
||||
# clip room left
|
||||
clip_room_left = total_token_num - clip_token_len
|
||||
# get the clip ratio
|
||||
allowed_token_num_this_entry = max_clip_ratio[index] * clip_token_len
|
||||
if context_token_num[index] < allowed_token_num_this_entry:
|
||||
print('index', index, '[allowed] before', context_token_num[index], 'allowed', allowed_token_num_this_entry)
|
||||
continue
|
||||
|
||||
token_to_clip = context_token_num[index] - allowed_token_num_this_entry
|
||||
if token_to_clip*0.85 > clip_room_left:
|
||||
print('index', index, '[careful clip] token_to_clip', token_to_clip, 'clip_room_left', clip_room_left)
|
||||
token_to_clip = clip_room_left
|
||||
|
||||
token_percent_to_clip = token_to_clip / context_token_num[index]
|
||||
char_percent_to_clip = token_percent_to_clip
|
||||
text_this_entry = context[index]
|
||||
char_num_to_clip = int(len(text_this_entry) * char_percent_to_clip)
|
||||
if char_num_to_clip < 500:
|
||||
# 如果裁剪的字符数小于500,则不裁剪
|
||||
print('index', index, 'before', context_token_num[index], 'allowed', allowed_token_num_this_entry)
|
||||
continue
|
||||
char_num_to_clip += 200 # 稍微多加一点
|
||||
char_to_preseve = len(text_this_entry) - char_num_to_clip
|
||||
_half = int(char_to_preseve / 2)
|
||||
# 前半 + ... (content clipped because token overflows) ... + 后半
|
||||
text_this_entry_clip = text_this_entry[:_half] + \
|
||||
" ... (content clipped because token overflows) ... " \
|
||||
+ text_this_entry[-_half:]
|
||||
context[index] = text_this_entry_clip
|
||||
post_clip_token_cnt = get_token_num(text_this_entry_clip, tokenizer)
|
||||
print('index', index, 'before', context_token_num[index], 'allowed', allowed_token_num_this_entry, 'after', post_clip_token_cnt)
|
||||
context_token_num[index] = post_clip_token_cnt
|
||||
total_token_num = sum(context_token_num)
|
||||
context_token_num_final = [get_token_num(h, tokenizer) for h in context]
|
||||
print('context_token_num_old', context_token_num_old)
|
||||
print('context_token_num_final', context_token_num_final)
|
||||
print('token change from', total_token_num_old, 'to', sum(context_token_num_final), 'target', clip_token_len)
|
||||
history = context[:-1]
|
||||
current = context[-1]
|
||||
return current, history
|
||||
|
||||
|
||||
|
||||
def auto_context_clip_search_optimal(current, history, promote_latest_long_message=False):
|
||||
"""
|
||||
current: 当前消息
|
||||
history: 历史消息列表
|
||||
promote_latest_long_message: 是否特别提高最后一条长message的权重,避免过度裁剪
|
||||
|
||||
主动触发裁剪
|
||||
"""
|
||||
context = history + [current]
|
||||
trigger_clip_token_len = get_conf('AUTO_CONTEXT_CLIP_TRIGGER_TOKEN_LEN')
|
||||
model_info = get_model_info()
|
||||
tokenizer = model_info['gpt-4']['tokenizer']
|
||||
# 只保留最近的128条记录,无论token长度,防止计算token时计算过长的时间
|
||||
max_round = get_conf('AUTO_CONTEXT_MAX_ROUND')
|
||||
char_len = sum([len(h) for h in context])
|
||||
if char_len < trigger_clip_token_len:
|
||||
# 不需要裁剪
|
||||
history = context[:-1]
|
||||
current = context[-1]
|
||||
return current, history
|
||||
if len(context) > max_round:
|
||||
context = context[-max_round:]
|
||||
# 计算各个历史记录的token长度
|
||||
context_token_num = [get_token_num(h, tokenizer) for h in context]
|
||||
context_token_num_old = copy.copy(context_token_num)
|
||||
total_token_num = total_token_num_old = sum(context_token_num)
|
||||
if total_token_num < trigger_clip_token_len:
|
||||
# 不需要裁剪
|
||||
history = context[:-1]
|
||||
current = context[-1]
|
||||
return current, history
|
||||
clip_token_len = trigger_clip_token_len * 0.90
|
||||
max_clip_ratio: list[float] = get_conf('AUTO_CONTEXT_MAX_CLIP_RATIO')
|
||||
max_clip_ratio = list(reversed(max_clip_ratio))
|
||||
if len(context) > len(max_clip_ratio):
|
||||
# give up the oldest context
|
||||
context = context[-len(max_clip_ratio):]
|
||||
context_token_num = context_token_num[-len(max_clip_ratio):]
|
||||
if len(context) < len(max_clip_ratio):
|
||||
# match the length of two array
|
||||
max_clip_ratio = max_clip_ratio[-len(context):]
|
||||
|
||||
_scale = _scale_init = 1.25
|
||||
token_percent_arr = [(token_num/clip_token_len) for index, token_num in enumerate(context_token_num)]
|
||||
|
||||
# promote last long message, avoid clipping it too much
|
||||
if promote_latest_long_message:
|
||||
promote_weight_constant = 1.6
|
||||
promote_index = -1
|
||||
threshold = 0.50
|
||||
for index, token_percent in enumerate(token_percent_arr):
|
||||
if token_percent > threshold:
|
||||
promote_index = index
|
||||
if promote_index >= 0:
|
||||
max_clip_ratio[promote_index] = promote_weight_constant
|
||||
|
||||
max_clip_ratio_arr = max_clip_ratio
|
||||
step = 0.05
|
||||
for i in range(int(_scale_init / step) - 1):
|
||||
_take = 0
|
||||
for max_clip, token_r in zip(max_clip_ratio_arr, token_percent_arr):
|
||||
_take += min(max_clip * _scale, token_r)
|
||||
if _take < 1.0:
|
||||
break
|
||||
_scale -= 0.05
|
||||
|
||||
# print('optimal scale', _scale)
|
||||
# print([_scale * max_clip for max_clip in max_clip_ratio_arr])
|
||||
# print([token_r for token_r in token_percent_arr])
|
||||
# print([min(token_r, _scale * max_clip) for token_r, max_clip in zip(token_percent_arr, max_clip_ratio_arr)])
|
||||
eps = 0.05
|
||||
max_clip_ratio = [_scale * max_clip + eps for max_clip in max_clip_ratio_arr]
|
||||
|
||||
# compute rank
|
||||
# clip_prior_weight_old = [(token_num/clip_token_len + (len(context) - index)*0.1) for index, token_num in enumerate(context_token_num)]
|
||||
clip_prior_weight = [ token_r / max_clip for max_clip, token_r in zip(max_clip_ratio_arr, token_percent_arr)]
|
||||
|
||||
# sorted_index_old = sorted(range(len(context_token_num)), key=lambda k: clip_prior_weight_old[k], reverse=True)
|
||||
# print('sorted_index_old', sorted_index_old)
|
||||
sorted_index = sorted(range(len(context_token_num)), key=lambda k: clip_prior_weight[k], reverse=True)
|
||||
# print('sorted_index', sorted_index)
|
||||
|
||||
# pre compute space yield
|
||||
for index in sorted_index:
|
||||
# print('index', index, f'current total {total_token_num}, target {clip_token_len}')
|
||||
if total_token_num < clip_token_len:
|
||||
# no need to clip
|
||||
break
|
||||
# clip room left
|
||||
clip_room_left = total_token_num - clip_token_len
|
||||
# get the clip ratio
|
||||
allowed_token_num_this_entry = max_clip_ratio[index] * clip_token_len
|
||||
if context_token_num[index] < allowed_token_num_this_entry:
|
||||
# print('index', index, '[allowed] before', context_token_num[index], 'allowed', allowed_token_num_this_entry)
|
||||
continue
|
||||
|
||||
token_to_clip = context_token_num[index] - allowed_token_num_this_entry
|
||||
if token_to_clip*0.85 > clip_room_left:
|
||||
# print('index', index, '[careful clip] token_to_clip', token_to_clip, 'clip_room_left', clip_room_left)
|
||||
token_to_clip = clip_room_left
|
||||
|
||||
token_percent_to_clip = token_to_clip / context_token_num[index]
|
||||
char_percent_to_clip = token_percent_to_clip
|
||||
text_this_entry = context[index]
|
||||
char_num_to_clip = int(len(text_this_entry) * char_percent_to_clip)
|
||||
if char_num_to_clip < 500:
|
||||
# 如果裁剪的字符数小于500,则不裁剪
|
||||
# print('index', index, 'before', context_token_num[index], 'allowed', allowed_token_num_this_entry)
|
||||
continue
|
||||
eps = 200
|
||||
char_num_to_clip = char_num_to_clip + eps # 稍微多加一点
|
||||
char_to_preseve = len(text_this_entry) - char_num_to_clip
|
||||
_half = int(char_to_preseve / 2)
|
||||
# 前半 + ... (content clipped because token overflows) ... + 后半
|
||||
text_this_entry_clip = text_this_entry[:_half] + \
|
||||
" ... (content clipped because token overflows) ... " \
|
||||
+ text_this_entry[-_half:]
|
||||
context[index] = text_this_entry_clip
|
||||
post_clip_token_cnt = get_token_num(text_this_entry_clip, tokenizer)
|
||||
# print('index', index, 'before', context_token_num[index], 'allowed', allowed_token_num_this_entry, 'after', post_clip_token_cnt)
|
||||
context_token_num[index] = post_clip_token_cnt
|
||||
total_token_num = sum(context_token_num)
|
||||
context_token_num_final = [get_token_num(h, tokenizer) for h in context]
|
||||
# print('context_token_num_old', context_token_num_old)
|
||||
# print('context_token_num_final', context_token_num_final)
|
||||
# print('token change from', total_token_num_old, 'to', sum(context_token_num_final), 'target', clip_token_len)
|
||||
history = context[:-1]
|
||||
current = context[-1]
|
||||
return current, history
|
||||
622
shared_utils/fastapi_stream_server.py
Normal file
622
shared_utils/fastapi_stream_server.py
Normal file
@@ -0,0 +1,622 @@
|
||||
"""
|
||||
FastAPI WebSocket 流式服务器模块
|
||||
|
||||
这个模块实现了一个基于FastAPI的WebSocket服务器,用于处理前端和后端之间的实时双向通信。
|
||||
主要功能包括:
|
||||
1. 维护WebSocket连接
|
||||
2. 处理消息队列
|
||||
3. 支持实时和阻塞式消息传输
|
||||
4. 提供事件管理机制
|
||||
|
||||
主要组件:
|
||||
- FutureEvent: 用于事件管理的线程事件类
|
||||
- UserInterfaceMsg: 定义前后端通信的消息格式
|
||||
- PythonMethod_AsyncConnectionMaintainer_AgentcraftInterface: 连接维护器
|
||||
- MasterMindWebSocketServer: WebSocket服务器主类
|
||||
"""
|
||||
|
||||
import uuid
|
||||
import time
|
||||
import json
|
||||
import platform
|
||||
import pickle
|
||||
import asyncio
|
||||
import threading
|
||||
import traceback
|
||||
from fastapi.websockets import WebSocketState
|
||||
from loguru import logger
|
||||
from queue import Queue
|
||||
from fastapi import FastAPI, WebSocket, File, UploadFile, Form
|
||||
from fastapi.responses import JSONResponse
|
||||
from datetime import datetime
|
||||
from typing import List, Optional, Any, Union
|
||||
from pydantic import BaseModel, Field
|
||||
import shutil
|
||||
import os
|
||||
from enum import auto, Enum
|
||||
from typing import List
|
||||
from toolbox import ChatBotWithCookies
|
||||
import starlette
|
||||
|
||||
|
||||
|
||||
class UserInterfaceMsg(BaseModel):
|
||||
"""
|
||||
用户界面消息模型,定义了前后端通信的数据结构
|
||||
|
||||
这个类使用Pydantic BaseModel定义了所有可能的消息字段,
|
||||
包括插件功能、输入内容、LLM参数、聊天记录等。
|
||||
"""
|
||||
function: str = Field(default="chat", description="使用哪个插件")
|
||||
main_input: str = Field(default="", description="主要输入内容,通常是用户的问题")
|
||||
llm_kwargs: dict = Field(default_factory=dict, description="围绕LLM的各种参数")
|
||||
# llm_kwargs = {
|
||||
# "api_key": cookies["api_key"],
|
||||
# "llm_model": cookies["llm_model"],
|
||||
# "top_p": 1.0,
|
||||
# "max_length": None,
|
||||
# "temperature": 1.0,
|
||||
# "user_name": "default_user", # Default user name
|
||||
# }
|
||||
plugin_kwargs: dict = Field(default_factory=dict, description="围绕该function的各种参数")
|
||||
chatbot: list[list[Union[str|None]]] = Field(default=[], description="聊天记录(给人类看的)。格式为 [ [user_msg, bot_msg], [user_msg_2, bot_msg_2],...],双层列表,第一层是每一轮对话,第二层是用户和机器人的消息。")
|
||||
chatbot_cookies: dict = Field(default_factory=dict, description="其他新前端涉及的参数")
|
||||
history: list[str] = Field(default=[], description="聊天记录(给模型看的)。单层列表")
|
||||
system_prompt: str = Field(default="", description="系统提示词")
|
||||
user_request: dict = Field(default="", description="用户相关的参数,如用户名")
|
||||
special_kwargs: dict = Field(default_factory=dict, description="其他新前端涉及的参数")
|
||||
special_state: dict = Field(default={}, description="特殊状态传递,例如对话结束。")
|
||||
|
||||
TERMINATE_MSG = UserInterfaceMsg(function="TERMINATE", special_state={"stop": True})
|
||||
|
||||
def setup_initial_com(initial_msg: UserInterfaceMsg):
|
||||
"""
|
||||
设置插件参数
|
||||
|
||||
从初始消息中提取各种参数并构建插件执行所需的参数字典。
|
||||
|
||||
参数:
|
||||
initial_msg: 初始的用户消息
|
||||
chatbot_with_cookies: 带有cookie的聊天机器人实例
|
||||
|
||||
返回:
|
||||
dict: 包含插件执行所需所有参数的字典
|
||||
"""
|
||||
from toolbox import get_plugin_default_kwargs
|
||||
|
||||
|
||||
com = get_plugin_default_kwargs()
|
||||
com["main_input"] = initial_msg.main_input
|
||||
# 设置LLM相关参数
|
||||
if initial_msg.llm_kwargs.get('api_key', None): com["llm_kwargs"]['api_key'] = initial_msg.llm_kwargs.get('api_key')
|
||||
if initial_msg.llm_kwargs.get('llm_model', None): com["llm_kwargs"]['llm_model'] = initial_msg.llm_kwargs.get('llm_model')
|
||||
if initial_msg.llm_kwargs.get('top_p', None): com["llm_kwargs"]['top_p'] = initial_msg.llm_kwargs.get('top_p')
|
||||
if initial_msg.llm_kwargs.get('max_length', None): com["llm_kwargs"]['max_length'] = initial_msg.llm_kwargs.get('max_length')
|
||||
if initial_msg.llm_kwargs.get('temperature', None): com["llm_kwargs"]['temperature'] = initial_msg.llm_kwargs.get('temperature')
|
||||
if initial_msg.llm_kwargs.get('user_name', None): com["llm_kwargs"]['user_name'] = initial_msg.llm_kwargs.get('user_name')
|
||||
if initial_msg.llm_kwargs.get('embed_model', None): com["llm_kwargs"]['embed_model'] = initial_msg.llm_kwargs.get('embed_model')
|
||||
|
||||
initial_msg.chatbot_cookies.update({
|
||||
'api_key': com["llm_kwargs"]['api_key'],
|
||||
'top_p': com["llm_kwargs"]['top_p'],
|
||||
'llm_model': com["llm_kwargs"]['llm_model'],
|
||||
'embed_model': com["llm_kwargs"]['embed_model'],
|
||||
'temperature': com["llm_kwargs"]['temperature'],
|
||||
'user_name': com["llm_kwargs"]['user_name'],
|
||||
'customize_fn_overwrite': {},
|
||||
})
|
||||
chatbot_with_cookies = ChatBotWithCookies(initial_msg.chatbot_cookies)
|
||||
chatbot_with_cookies.write_list(initial_msg.chatbot)
|
||||
# 设置其他参数
|
||||
com["plugin_kwargs"] = initial_msg.plugin_kwargs
|
||||
com["chatbot_with_cookie"] = chatbot_with_cookies
|
||||
com["history"] = initial_msg.history
|
||||
com["system_prompt"] = initial_msg.system_prompt
|
||||
com["user_request"] = initial_msg.user_request
|
||||
|
||||
return com
|
||||
|
||||
|
||||
class DummyRequest(object):
|
||||
def __call__(self, username):
|
||||
self.username = username
|
||||
|
||||
|
||||
def task_executor(initial_msg:UserInterfaceMsg, queue_blocking_from_client:asyncio.Queue, queue_back_to_client:asyncio.Queue):
|
||||
"""
|
||||
initial_msg: 初始的用户消息 ( <---- begin_contact_websocket_server:initial_message )
|
||||
queue_blocking_from_client: 从客户端接收阻塞消息的队列
|
||||
queue_back_to_client: 发送消息回客户端的队列
|
||||
"""
|
||||
from toolbox import get_plugin_handle
|
||||
from toolbox import get_plugin_default_kwargs
|
||||
from toolbox import on_file_uploaded
|
||||
|
||||
def update_ui_websocket(chatbot: List[List[str]], history: List[str], chatbot_cookies: dict, special_state: dict):
|
||||
send_obj = UserInterfaceMsg(chatbot=chatbot, history=history, chatbot_cookies=chatbot_cookies, special_state=special_state)
|
||||
queue_back_to_client.put_nowait(send_obj)
|
||||
|
||||
|
||||
com = setup_initial_com(initial_msg)
|
||||
|
||||
main_input = com["main_input"]
|
||||
plugin_kwargs = com["plugin_kwargs"]
|
||||
chatbot_with_cookies = com["chatbot_with_cookie"]
|
||||
history = com["history"]
|
||||
system_prompt = com["system_prompt"]
|
||||
user_request = com["user_request"]
|
||||
llm_kwargs = com["llm_kwargs"]
|
||||
|
||||
|
||||
# ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
# '''''''''''''''''''''''''''''''' 调用插件 '''''''''''''''''''''''''''''''''
|
||||
# ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
# ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
if initial_msg.function == "chat":
|
||||
plugin = get_plugin_handle("crazy_functions.Chat->Chat")
|
||||
my_working_plugin = (plugin)(**com)
|
||||
for cookies, chatbot, hist_json, msg in my_working_plugin:
|
||||
history = json.loads(hist_json)
|
||||
special_state = {'msg': msg}
|
||||
update_ui_websocket(chatbot=chatbot, history=history, chatbot_cookies=cookies, special_state=special_state) # ----> receive_callback_fn
|
||||
|
||||
if initial_msg.function == "basic":
|
||||
from request_llms.bridge_all import predict
|
||||
additional_fn = initial_msg.special_kwargs.get("core_function")
|
||||
for cookies, chatbot, hist_json, msg in \
|
||||
predict(main_input, llm_kwargs, plugin_kwargs, chatbot_with_cookies, history=history, system_prompt=system_prompt, stream = True, additional_fn=additional_fn):
|
||||
history = json.loads(hist_json)
|
||||
special_state = {'msg': msg}
|
||||
update_ui_websocket(chatbot=chatbot, history=history, chatbot_cookies=cookies, special_state=special_state) # ----> receive_callback_fn
|
||||
|
||||
# ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
# '''''''''''''''''''''''''''''''' 上传文件 '''''''''''''''''''''''''''''''''
|
||||
# ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
# ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
if initial_msg.function == "upload":
|
||||
def wait_upload_done():
|
||||
print('Waiting for file upload to complete...')
|
||||
file_upload_done_msg = queue_blocking_from_client.get()
|
||||
print('file upload complete...')
|
||||
if file_upload_done_msg.function != "upload_done":
|
||||
raise ValueError("Expected 'upload_done' function, got: {}".format(file_upload_done_msg.function))
|
||||
if 'files' not in file_upload_done_msg.special_kwargs:
|
||||
raise ValueError("Expected 'files' in special_kwargs, got: {}".format(file_upload_done_msg.special_kwargs))
|
||||
return file_upload_done_msg.special_kwargs['files']
|
||||
|
||||
request = DummyRequest()
|
||||
request.username = "default_user"
|
||||
chatbot = initial_msg.chatbot
|
||||
history = initial_msg.history
|
||||
chatbot_cookies = initial_msg.chatbot_cookies
|
||||
special_state = {'msg': '等待上传'}
|
||||
chatbot += [[initial_msg.main_input, None]]
|
||||
update_ui_websocket(chatbot=chatbot, history=history, chatbot_cookies=chatbot_cookies, special_state=special_state) # ----> receive_callback_fn
|
||||
|
||||
# 等待上传完成
|
||||
files = wait_upload_done()
|
||||
|
||||
# 读取文件并预览
|
||||
chatbot += [['正在处理', None]]
|
||||
update_ui_websocket(chatbot=chatbot, history=history, chatbot_cookies=chatbot_cookies, special_state=special_state) # ----> receive_callback_fn
|
||||
chatbot, _, _, chatbot_cookies = on_file_uploaded(
|
||||
request=request, files=files, chatbot=chatbot,
|
||||
txt="", txt2="", checkboxes="", cookies=chatbot_cookies
|
||||
)
|
||||
special_state = {'msg': '完成上传'}
|
||||
# 更新前端
|
||||
update_ui_websocket(chatbot=chatbot, history=history, chatbot_cookies=chatbot_cookies, special_state=special_state) # ----> receive_callback_fn
|
||||
|
||||
|
||||
|
||||
class FutureEvent(threading.Event):
|
||||
"""
|
||||
扩展的线程事件类,用于异步操作的结果获取
|
||||
|
||||
这个类扩展了threading.Event,添加了返回值存储功能,
|
||||
使得可以在事件完成时同时传递结果数据。
|
||||
"""
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.return_value = None
|
||||
|
||||
def terminate(self, return_value):
|
||||
"""
|
||||
终止事件并设置返回值
|
||||
|
||||
参数:
|
||||
return_value: 事件完成时要返回的值
|
||||
"""
|
||||
self.return_value = return_value
|
||||
self.set()
|
||||
|
||||
def wait_and_get_result(self):
|
||||
"""
|
||||
等待事件完成并获取结果
|
||||
|
||||
返回:
|
||||
任意类型: 事件完成时设置的返回值
|
||||
"""
|
||||
self.wait()
|
||||
return self.return_value
|
||||
|
||||
|
||||
class AtomicQueue:
|
||||
|
||||
def __init__(self):
|
||||
self.queue = Queue()
|
||||
|
||||
def put(self, item):
|
||||
self.queue.put(item)
|
||||
|
||||
def put_nowait(self, item):
|
||||
self.queue.put(item)
|
||||
|
||||
def get(self, timeout=600):
|
||||
while self.queue.empty() and timeout > 0:
|
||||
time.sleep(1)
|
||||
timeout -= 1
|
||||
if timeout <= 0:
|
||||
raise TimeoutError("Queue get operation timed out")
|
||||
return self.queue.get()
|
||||
|
||||
|
||||
class PythonMethod_AsyncConnectionMaintainer_AgentcraftInterface():
|
||||
"""
|
||||
异步连接维护器接口类
|
||||
|
||||
负责维护WebSocket连接的核心类,处理消息队列的创建和管理,
|
||||
以及维护与客户端的长连接通信。
|
||||
"""
|
||||
|
||||
def make_queue(self):
|
||||
"""
|
||||
创建消息队列
|
||||
|
||||
创建三个异步队列用于不同类型的消息传输:
|
||||
1. queue_back_to_client: 发送给客户端的消息队列
|
||||
2. queue_realtime_from_client: 实时接收的客户端消息队列
|
||||
3. queue_blocking_from_client: 阻塞式接收的客户端消息队列
|
||||
|
||||
返回:
|
||||
tuple: 包含三个异步队列的元组
|
||||
"""
|
||||
queue_back_to_client = asyncio.Queue()
|
||||
queue_realtime_from_client = asyncio.Queue()
|
||||
queue_blocking_from_client = AtomicQueue()
|
||||
terminate_event = asyncio.Event()
|
||||
return queue_back_to_client, queue_realtime_from_client, queue_blocking_from_client, terminate_event
|
||||
|
||||
async def maintain_connection_forever(self, initial_msg: UserInterfaceMsg, websocket: WebSocket, client_id: str):
|
||||
"""
|
||||
永久维护WebSocket连接
|
||||
|
||||
处理与客户端的持续连接,包括消息的发送和接收。
|
||||
创建独立的任务处理消息发送和接收,并启动聊天处理线程。
|
||||
|
||||
参数:
|
||||
initial_msg: 初始消息
|
||||
websocket: WebSocket连接对象
|
||||
client_id: 客户端标识符
|
||||
"""
|
||||
|
||||
async def wait_message_to_send(queue_back_to_client: asyncio.Queue, terminate_event: asyncio.Event):
|
||||
"""
|
||||
等待并发送消息到客户端
|
||||
|
||||
持续监听消息队列,将消息序列化后发送给客户端。
|
||||
|
||||
参数:
|
||||
queue_back_to_client: 发送给客户端的消息队列
|
||||
"""
|
||||
# 🕜 wait message to send away -> front end
|
||||
msg_cnt = 0
|
||||
try:
|
||||
while True:
|
||||
|
||||
################
|
||||
# get message and check terminate
|
||||
while True:
|
||||
try:
|
||||
if terminate_event.is_set():
|
||||
msg = TERMINATE_MSG
|
||||
break
|
||||
else:
|
||||
msg: UserInterfaceMsg = await asyncio.wait_for(queue_back_to_client.get(), timeout=0.25)
|
||||
break
|
||||
except asyncio.TimeoutError:
|
||||
continue # 继续检查条件
|
||||
if msg.function == TERMINATE_MSG.function:
|
||||
logger.info("Received terminate message, skip this message and stopping wait_message_to_send.")
|
||||
break
|
||||
################
|
||||
|
||||
|
||||
msg_cnt += 1
|
||||
if websocket.application_state != WebSocketState.CONNECTED:
|
||||
break
|
||||
msg.special_kwargs['uuid'] = uuid.uuid4().hex
|
||||
print(msg)
|
||||
await websocket.send_bytes(msg.model_dump_json())
|
||||
except Exception as e:
|
||||
logger.exception(f"Error in wait_message_to_send: {e}")
|
||||
raise e
|
||||
|
||||
async def receive_forever(queue_realtime_from_client: asyncio.Queue, queue_blocking_from_client: asyncio.Queue, queue_back_to_client: asyncio.Queue, terminate_event: asyncio.Event):
|
||||
"""
|
||||
永久接收客户端消息
|
||||
|
||||
持续监听WebSocket连接,接收客户端消息并根据消息类型分发到不同队列。
|
||||
|
||||
参数:
|
||||
queue_realtime_from_client: 实时消息队列
|
||||
queue_blocking_from_client: 阻塞消息队列
|
||||
queue_back_to_client: 发送回客户端的消息队列
|
||||
"""
|
||||
# 🕜 keep listening traffic <- front end
|
||||
msg_cnt:int = 0
|
||||
try:
|
||||
while True:
|
||||
################
|
||||
# get message and check terminate
|
||||
while True:
|
||||
try:
|
||||
if terminate_event.is_set():
|
||||
msg = TERMINATE_MSG
|
||||
break
|
||||
else:
|
||||
message = await asyncio.wait_for(websocket.receive_text(), timeout=0.25)
|
||||
msg: UserInterfaceMsg = UserInterfaceMsg.model_validate_json(message)
|
||||
break
|
||||
except asyncio.TimeoutError:
|
||||
continue # 继续检查条件
|
||||
if msg.function == TERMINATE_MSG.function:
|
||||
logger.info("Received terminate message, stopping receive_forever")
|
||||
break
|
||||
################
|
||||
msg_cnt += 1
|
||||
logger.info(f"Received message {msg_cnt}: {msg}")
|
||||
queue_blocking_from_client.put_nowait(msg)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.exception(f"Error in receive_forever: {e}")
|
||||
raise e
|
||||
|
||||
queue_back_to_client, queue_realtime_from_client, queue_blocking_from_client, terminate_event = self.make_queue()
|
||||
|
||||
def terminate_callback():
|
||||
terminate_event.set()
|
||||
|
||||
def ensure_last_message_sent(queue_back_to_client):
|
||||
"""
|
||||
确保最后一条消息被发送到客户端
|
||||
"""
|
||||
queue_back_to_client.put_nowait(TERMINATE_MSG)
|
||||
tic = time.time()
|
||||
termination_timeout = 5 # seconds
|
||||
while not queue_back_to_client.empty():
|
||||
time.sleep(0.25)
|
||||
if queue_back_to_client.empty():
|
||||
break
|
||||
if time.time() - tic > termination_timeout:
|
||||
break
|
||||
|
||||
def task_wrapper(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
res = func(*args, **kwargs)
|
||||
ensure_last_message_sent(queue_back_to_client)
|
||||
terminate_callback()
|
||||
return res
|
||||
return wrapper
|
||||
|
||||
t_x = asyncio.create_task(wait_message_to_send(queue_back_to_client, terminate_event))
|
||||
t_r = asyncio.create_task(receive_forever(queue_realtime_from_client, queue_blocking_from_client, queue_back_to_client, terminate_event))
|
||||
task_thread = threading.Thread(target=task_wrapper(task_executor), args=(initial_msg, queue_blocking_from_client, queue_back_to_client), daemon=True)
|
||||
task_thread.start()
|
||||
|
||||
await t_x
|
||||
await t_r
|
||||
await websocket.close()
|
||||
|
||||
|
||||
|
||||
class MasterMindWebSocketServer(PythonMethod_AsyncConnectionMaintainer_AgentcraftInterface):
|
||||
"""
|
||||
WebSocket服务器主类
|
||||
|
||||
继承自异步连接维护器接口,实现了完整的WebSocket服务器功能。
|
||||
负责处理客户端连接、事件管理和消息路由。
|
||||
"""
|
||||
|
||||
def __init__(self, host, port) -> None:
|
||||
"""
|
||||
初始化WebSocket服务器
|
||||
|
||||
参数:
|
||||
host: 服务器主机地址
|
||||
port: 服务器端口号
|
||||
"""
|
||||
self.websocket_connections = {}
|
||||
self.agentcraft_interface_websocket_connections = {}
|
||||
self._event_hub = {}
|
||||
self.host= host
|
||||
self.port = port
|
||||
pass
|
||||
|
||||
def create_event(self, event_name: str):
|
||||
"""
|
||||
创建一个新的事件
|
||||
|
||||
参数:
|
||||
event_name: 事件名称
|
||||
|
||||
返回:
|
||||
FutureEvent: 新创建的事件对象
|
||||
"""
|
||||
self._event_hub[event_name] = FutureEvent()
|
||||
return self._event_hub[event_name]
|
||||
|
||||
def terminate_event(self, event_name: str, msg:UserInterfaceMsg):
|
||||
"""
|
||||
终止指定的事件并设置返回消息
|
||||
|
||||
参数:
|
||||
event_name: 要终止的事件名称
|
||||
msg: 要返回的用户界面消息
|
||||
"""
|
||||
self._event_hub[event_name].terminate(return_value = msg)
|
||||
return
|
||||
|
||||
async def long_task_01_wait_incoming_connection(self):
|
||||
"""
|
||||
等待传入连接的长期任务
|
||||
|
||||
启动FastAPI服务器并等待WebSocket连接。
|
||||
处理新的连接请求,建立WebSocket通信。
|
||||
"""
|
||||
# task 1 wait incoming agent connection
|
||||
logger.info("task 1 wait incoming agent connection")
|
||||
|
||||
async def launch_websocket_server():
|
||||
"""
|
||||
启动WebSocket服务器
|
||||
|
||||
创建FastAPI应用并配置WebSocket路由,
|
||||
设置服务器参数并启动uvicorn服务器。
|
||||
"""
|
||||
app = FastAPI()
|
||||
|
||||
class UserInput(BaseModel):
|
||||
main_input: str
|
||||
@app.post("/predict_user_input")
|
||||
async def predict_user_input(main_input: UserInput):
|
||||
def predict_future_input(main_input):
|
||||
from request_llms.bridge_all import predict_no_ui_long_connection
|
||||
from toolbox import get_plugin_default_kwargs
|
||||
from textwrap import dedent
|
||||
com = get_plugin_default_kwargs()
|
||||
llm_kwargs = com['llm_kwargs']
|
||||
llm_kwargs['llm_model'] = 'one-api-glm-4-flash'
|
||||
completion_system_prompt = dedent("""Predict the next input that the user might type.
|
||||
1. Do not repeat the <current_input>. The <future_input> should be a continuation of <current_input>.
|
||||
2. Use same language as the input.
|
||||
3. Do not predict too far ahead.
|
||||
4. Use punctuation when needed. Sometimes <future_input> has to begin with some punctuation.
|
||||
|
||||
Example:
|
||||
<current_input>北京发布暴</current_input>
|
||||
<future_input>雨红色预警</future_input>
|
||||
|
||||
<current_input>世界是你们的,也是我们</current_input>
|
||||
<future_input>的,但是归根结底是你们的</future_input>
|
||||
|
||||
<current_input>多年以后,面对行刑队</current_input>
|
||||
<future_input>,奥雷里亚诺·布恩迪亚上校将会回想起父亲带他去见识冰块的那个遥远的下午。</future_input>
|
||||
|
||||
Format:
|
||||
<future_input>... the predicted input ...</future_input>
|
||||
""")
|
||||
main_input = "<current_input>" + main_input + "</current_input>"
|
||||
result = predict_no_ui_long_connection(
|
||||
inputs=main_input, llm_kwargs=llm_kwargs, sys_prompt=completion_system_prompt, history=[], console_silence=True
|
||||
)
|
||||
print(result)
|
||||
if "<future_input>" not in result or "</future_input>" not in result:
|
||||
raise ValueError("The response does not contain the expected future input format.")
|
||||
result = result.split("<future_input>")[-1].split("</future_input>")[0].strip()
|
||||
return result
|
||||
|
||||
return JSONResponse(content={'future':predict_future_input(main_input.main_input)})
|
||||
|
||||
@app.post("/core_functional")
|
||||
async def core_functional():
|
||||
import core_functional, importlib
|
||||
importlib.reload(core_functional) # 热更新prompt
|
||||
core_functionals = core_functional.get_core_functions()
|
||||
for k in list(core_functionals.keys()):
|
||||
v = core_functionals[k]
|
||||
# remove PreProcess if any
|
||||
if "PreProcess" in v:
|
||||
v.pop("PreProcess")
|
||||
if "Visible" in v and not v["Visible"]:
|
||||
core_functionals.pop(k)
|
||||
return JSONResponse(content=core_functionals)
|
||||
|
||||
@app.post("/upload")
|
||||
async def upload_files(files: List[UploadFile] = File(...)):
|
||||
"""上传文件接口,支持多文件上传并显示进度"""
|
||||
from toolbox import on_file_uploaded
|
||||
results = []
|
||||
upload_dir = "uploads"
|
||||
|
||||
# 确保上传目录存在
|
||||
if not os.path.exists(upload_dir):
|
||||
os.makedirs(upload_dir)
|
||||
|
||||
for file in files:
|
||||
try:
|
||||
# 构建文件保存路径
|
||||
file_path = os.path.join(upload_dir, file.filename)
|
||||
total_size = 0
|
||||
processed_size = 0
|
||||
|
||||
# 分块读取并保存文件,同时计算进度
|
||||
with open(file_path, "wb") as buffer:
|
||||
while chunk := await file.read(8192): # 8KB chunks
|
||||
buffer.write(chunk)
|
||||
processed_size += len(chunk)
|
||||
if file.size: # 如果文件大小可用
|
||||
progress = (processed_size / file.size) * 100
|
||||
logger.info(f"Uploading {file.filename}: {progress:.2f}%")
|
||||
|
||||
results.append({
|
||||
"filename": file.filename,
|
||||
"size": processed_size,
|
||||
"status": "success",
|
||||
"path": file_path
|
||||
})
|
||||
except Exception as e:
|
||||
results.append({
|
||||
"filename": file.filename,
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
})
|
||||
logger.error(f"Error uploading {file.filename}: {str(e)}")
|
||||
|
||||
return JSONResponse(content={
|
||||
"message": "Files uploaded successfully",
|
||||
"files": results
|
||||
})
|
||||
|
||||
@app.websocket("/main")
|
||||
async def main(websocket: WebSocket):
|
||||
"""
|
||||
WebSocket连接的主处理函数
|
||||
|
||||
处理新的WebSocket连接请求,接收初始消息并建立持久连接。
|
||||
|
||||
参数:
|
||||
websocket: WebSocket连接对象
|
||||
"""
|
||||
try:
|
||||
await websocket.accept()
|
||||
logger.info(f"WebSocket connection established: {websocket.client.host}:{websocket.client.port}")
|
||||
msg: UserInterfaceMsg = UserInterfaceMsg.model_validate_json(await websocket.receive_text())
|
||||
logger.info(msg)
|
||||
await self.maintain_connection_forever(msg, websocket, "client_id")
|
||||
except:
|
||||
logger.exception("Error in WebSocket connection handler")
|
||||
await websocket.close()
|
||||
import uvicorn
|
||||
config = uvicorn.Config(app, host=self.host, port=self.port, log_level="error", ws_ping_interval=300, ws_ping_timeout=600)
|
||||
server = uvicorn.Server(config)
|
||||
logger.info(f"uvicorn begin, serving at ws://{self.host}:{self.port}/main")
|
||||
await server.serve()
|
||||
|
||||
await launch_websocket_server()
|
||||
logger.info("uvicorn terminated")
|
||||
|
||||
if __name__ == "__main__":
|
||||
mmwss = MasterMindWebSocketServer(host="0.0.0.0", port=38000)
|
||||
asyncio.run(mmwss.long_task_01_wait_incoming_connection())
|
||||
@@ -79,6 +79,14 @@ def what_keys(keys):
|
||||
|
||||
return f"检测到: OpenAI Key {avail_key_list['OpenAI Key']} 个, Azure Key {avail_key_list['Azure Key']} 个, API2D Key {avail_key_list['API2D Key']} 个"
|
||||
|
||||
def is_o_family_for_openai(llm_model):
|
||||
if not llm_model.startswith('o'):
|
||||
return False
|
||||
if llm_model in ['o1', 'o2', 'o3', 'o4', 'o5', 'o6', 'o7', 'o8']:
|
||||
return True
|
||||
if llm_model[:3] in ['o1-', 'o2-', 'o3-', 'o4-', 'o5-', 'o6-', 'o7-', 'o8-']:
|
||||
return True
|
||||
return False
|
||||
|
||||
def select_api_key(keys, llm_model):
|
||||
import random
|
||||
@@ -86,7 +94,7 @@ def select_api_key(keys, llm_model):
|
||||
key_list = keys.split(',')
|
||||
|
||||
if llm_model.startswith('gpt-') or llm_model.startswith('chatgpt-') or \
|
||||
llm_model.startswith('one-api-') or llm_model == 'o1' or llm_model.startswith('o1-'):
|
||||
llm_model.startswith('one-api-') or is_o_family_for_openai(llm_model):
|
||||
for k in key_list:
|
||||
if is_openai_api_key(k): avail_key_list.append(k)
|
||||
|
||||
|
||||
@@ -332,3 +332,77 @@
|
||||
text-wrap: wrap;
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
.raw_text {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.message_tail {
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
.message_tail_stop {
|
||||
border: dotted !important;
|
||||
margin-top: 5px !important;
|
||||
padding-left: 8px !important;
|
||||
padding-top: 1px !important;
|
||||
padding-bottom: 1px !important;
|
||||
padding-right: 12px !important;
|
||||
}
|
||||
|
||||
/* ant 开始 */
|
||||
|
||||
/* 通用按钮样式 */
|
||||
.ant-btn {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
padding: 8px 16px;
|
||||
font-size: 14px;
|
||||
font-weight: bold;
|
||||
border-radius: 4px;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.3s;
|
||||
}
|
||||
|
||||
/* 按钮颜色和状态 */
|
||||
.ant-btn-primary {
|
||||
background-color: #1890ff;
|
||||
color: white;
|
||||
border: none;
|
||||
}
|
||||
|
||||
.ant-btn-primary:hover {
|
||||
background-color: #40a9ff;
|
||||
}
|
||||
|
||||
.ant-btn-loading {
|
||||
pointer-events: none;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
/* 图标样式 */
|
||||
.ant-btn-icon {
|
||||
display: inline-flex;
|
||||
margin-right: 8px;
|
||||
}
|
||||
|
||||
.anticon {
|
||||
width: 1em;
|
||||
height: 1em;
|
||||
fill: currentColor;
|
||||
}
|
||||
|
||||
.anticon-spin {
|
||||
animation: spin 1s linear infinite;
|
||||
}
|
||||
|
||||
/* 动画效果 */
|
||||
@keyframes spin {
|
||||
from {
|
||||
transform: rotate(0deg);
|
||||
}
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
130
themes/common.js
130
themes/common.js
@@ -259,7 +259,24 @@ function cancel_loading_status() {
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 第 2 部分: 复制按钮
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
// 解压缩函数
|
||||
function decompressString(compressedString) {
|
||||
// 第1步:Base64解码
|
||||
const binaryString = atob(compressedString);
|
||||
|
||||
// 第2步:将二进制字符串转换为Uint8Array
|
||||
const bytes = new Uint8Array(binaryString.length);
|
||||
for (let i = 0; i < binaryString.length; i++) {
|
||||
bytes[i] = binaryString.charCodeAt(i);
|
||||
}
|
||||
|
||||
// 第3步:使用DecompressionStream (基于Web Streams API)进行gzip解压缩
|
||||
const ds = new DecompressionStream('gzip');
|
||||
const decompressedStream = new Response(new Blob([bytes])).body.pipeThrough(ds);
|
||||
|
||||
// 第4步:获取解压后的数据并转换为字符串
|
||||
return new Response(decompressedStream).text();
|
||||
}
|
||||
|
||||
var allow_auto_read_continously = true;
|
||||
var allow_auto_read_tts_flag = false;
|
||||
@@ -272,7 +289,7 @@ function addCopyButton(botElement, index, is_last_in_arr) {
|
||||
const audioIcon = '<span><svg t="1713628577799" fill="currentColor" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="4587" width=".9em" height=".9em"><path d="M113.7664 540.4672c0-219.9552 178.2784-398.2336 398.2336-398.2336S910.2336 320.512 910.2336 540.4672v284.4672c0 31.4368-25.4976 56.9344-56.9344 56.9344h-56.9344c-31.4368 0-56.9344-25.4976-56.9344-56.9344V597.2992c0-31.4368 25.4976-56.9344 56.9344-56.9344h56.9344c0-188.5184-152.7808-341.2992-341.2992-341.2992S170.7008 351.9488 170.7008 540.4672h56.9344c31.4368 0 56.9344 25.4976 56.9344 56.9344v227.5328c0 31.4368-25.4976 56.9344-56.9344 56.9344h-56.9344c-31.4368 0-56.9344-25.4976-56.9344-56.9344V540.4672z" p-id="4588"></path></svg></span>';
|
||||
// const cancelAudioIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
|
||||
|
||||
// 此功能没准备好
|
||||
// audio functionality
|
||||
if (allow_auto_read_continously && is_last_in_arr && allow_auto_read_tts_flag) {
|
||||
process_latest_text_output(botElement.innerText, index);
|
||||
}
|
||||
@@ -283,19 +300,21 @@ function addCopyButton(botElement, index, is_last_in_arr) {
|
||||
return;
|
||||
}
|
||||
|
||||
var copyButton = document.createElement('button');
|
||||
copyButton.classList.add('copy-bot-btn');
|
||||
copyButton.setAttribute('aria-label', 'Copy');
|
||||
copyButton.innerHTML = copyIcon;
|
||||
copyButton.addEventListener('click', async () => {
|
||||
const textToCopy = botElement.innerText;
|
||||
// 原始文本拷贝
|
||||
var copyButtonOrig = document.createElement('button');
|
||||
copyButtonOrig.classList.add('copy-bot-btn');
|
||||
copyButtonOrig.setAttribute('aria-label', 'Copy');
|
||||
copyButtonOrig.innerHTML = copyIcon;
|
||||
copyButtonOrig.addEventListener('click', async () => {
|
||||
try {
|
||||
const base64gzipcode = botElement.getElementsByClassName('raw_text')[0].innerText;
|
||||
const textToCopy = await decompressString(base64gzipcode);
|
||||
// push_text_to_audio(textToCopy).catch(console.error);
|
||||
if ("clipboard" in navigator) {
|
||||
await navigator.clipboard.writeText(textToCopy);
|
||||
copyButton.innerHTML = copiedIcon;
|
||||
copyButtonOrig.innerHTML = copiedIcon;
|
||||
setTimeout(() => {
|
||||
copyButton.innerHTML = copyIcon;
|
||||
copyButtonOrig.innerHTML = copyIcon;
|
||||
}, 1500);
|
||||
} else {
|
||||
const textArea = document.createElement("textarea");
|
||||
@@ -304,9 +323,9 @@ function addCopyButton(botElement, index, is_last_in_arr) {
|
||||
textArea.select();
|
||||
try {
|
||||
document.execCommand('copy');
|
||||
copyButton.innerHTML = copiedIcon;
|
||||
copyButtonOrig.innerHTML = copiedIcon;
|
||||
setTimeout(() => {
|
||||
copyButton.innerHTML = copyIcon;
|
||||
copyButtonOrig.innerHTML = copyIcon;
|
||||
}, 1500);
|
||||
} catch (error) {
|
||||
console.error("Copy failed: ", error);
|
||||
@@ -345,7 +364,8 @@ function addCopyButton(botElement, index, is_last_in_arr) {
|
||||
|
||||
var messageBtnColumn = document.createElement('div');
|
||||
messageBtnColumn.classList.add('message-btn-row');
|
||||
messageBtnColumn.appendChild(copyButton);
|
||||
// messageBtnColumn.appendChild(copyButton);
|
||||
messageBtnColumn.appendChild(copyButtonOrig);
|
||||
if (enable_tts) {
|
||||
messageBtnColumn.appendChild(audioButton);
|
||||
}
|
||||
@@ -381,7 +401,6 @@ function do_something_but_not_too_frequently(min_interval, func) {
|
||||
|
||||
|
||||
function chatbotContentChanged(attempt = 1, force = false) {
|
||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||
for (var i = 0; i < attempt; i++) {
|
||||
setTimeout(() => {
|
||||
const messages = gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot');
|
||||
@@ -392,6 +411,9 @@ function chatbotContentChanged(attempt = 1, force = false) {
|
||||
// Now pass both the message element and the is_last_in_arr boolean to addCopyButton
|
||||
addCopyButton(message, index, is_last_in_arr);
|
||||
|
||||
// if last message, add stop btn link
|
||||
addStopButton(message, index, is_last_in_arr);
|
||||
|
||||
// save_conversation_history
|
||||
save_conversation_history_slow_down();
|
||||
});
|
||||
@@ -402,6 +424,79 @@ function chatbotContentChanged(attempt = 1, force = false) {
|
||||
|
||||
}
|
||||
|
||||
function addStopButton(botElement, index, is_last_in_arr) {
|
||||
function is_generating() {
|
||||
var statePanelElement = document.getElementById("state-panel");
|
||||
var generatingElement = statePanelElement.querySelector(".generating");
|
||||
if (generatingElement) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
function on_stop_btn_click() {
|
||||
let stopButton = document.getElementById("elem_stop");
|
||||
stopButton.click();
|
||||
}
|
||||
function remove_stop_generate_btn(messageTailElement) {
|
||||
// remove all child elements of messageTailElement
|
||||
while (messageTailElement.firstChild) {
|
||||
messageTailElement.removeChild(messageTailElement.firstChild);
|
||||
}
|
||||
messageTailElement.style.display = 'none';
|
||||
messageTailElement.classList.add('removed');
|
||||
}
|
||||
function add_stop_generate_btn() {
|
||||
// write here: add a beautiful stop btn `bottomElement` as child, when clicked execute on_stop_btn_click
|
||||
console.log("get_gradio_component")
|
||||
const bottomElement = document.createElement('button');
|
||||
bottomElement.className = 'ant-btn ant-btn-primary';
|
||||
bottomElement.innerHTML = `
|
||||
<span class="ant-btn-icon">
|
||||
<svg viewBox="64 64 896 896" focusable="false" data-icon="sync" width="1em" height="1em" fill="currentColor" aria-hidden="true" class="anticon anticon-sync anticon-spin">
|
||||
<path d="M168 504.2c1-43.7 10-86.1 26.9-126 17.3-41 42.1-77.7 73.7-109.4S337 212.3 378 195c42.4-17.9 87.4-27 133.9-27s91.5 9.1 133.8 27A341.5 341.5 0 01755 268.8c9.9 9.9 19.2 20.4 27.8 31.4l-60.2 47a8 8 0 003 14.1l175.7 43c5 1.2 9.9-2.6 9.9-7.7l.8-180.9c0-6.7-7.7-10.5-12.9-6.3l-56.4 44.1C765.8 155.1 646.2 92 511.8 92 282.7 92 96.3 275.6 92 503.8a8 8 0 008 8.2h60c4.4 0 7.9-3.5 8-7.8zm756 7.8h-60c-4.4 0-7.9 3.5-8 7.8-1 43.7-10 86.1-26.9 126-17.3 41-42.1 77.8-73.7 109.4A342.45 342.45 0 01512.1 856a342.24 342.24 0 01-243.2-100.8c-9.9-9.9-19.2-20.4-27.8-31.4l60.2-47a8 8 0 00-3-14.1l-175.7-43c-5-1.2-9.9 2.6-9.9 7.7l-.7 181c0 6.7 7.7 10.5 12.9 6.3l56.4-44.1C258.2 868.9 377.8 932 512.2 932c229.2 0 415.5-183.7 419.8-411.8a8 8 0 00-8-8.2z"></path>
|
||||
</svg>
|
||||
</span>
|
||||
<span>终止</span>
|
||||
`;
|
||||
bottomElement.classList.add('message_tail_stop');
|
||||
bottomElement.addEventListener('click', on_stop_btn_click);
|
||||
messageTailElement.appendChild(bottomElement);
|
||||
}
|
||||
|
||||
// find a sub element of class `message_tail`
|
||||
const messageTailElement = botElement.querySelector('.message_tail');
|
||||
// if not is_last_in_arr, hide this elem (display none)
|
||||
if (!messageTailElement) {
|
||||
return;
|
||||
}
|
||||
if (messageTailElement.classList.contains('removed')) {
|
||||
return;
|
||||
}
|
||||
if (!is_last_in_arr) {
|
||||
remove_stop_generate_btn(messageTailElement);
|
||||
return;
|
||||
}
|
||||
messageTailElement.style.display = 'flex';
|
||||
const messageTailStopElem = messageTailElement.querySelector('.message_tail_stop');
|
||||
if(!is_generating()) {
|
||||
setTimeout(() => {
|
||||
if(!is_generating()) {
|
||||
remove_stop_generate_btn(messageTailElement);
|
||||
return;
|
||||
} else {
|
||||
if (!messageTailStopElem) {
|
||||
add_stop_generate_btn()
|
||||
}
|
||||
}
|
||||
}, 500);
|
||||
} else {
|
||||
if (!messageTailStopElem) {
|
||||
add_stop_generate_btn()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
@@ -1089,6 +1184,15 @@ async function clear_conversation(a, b, c) {
|
||||
update_conversation_metadata();
|
||||
let stopButton = document.getElementById("elem_stop");
|
||||
stopButton.click();
|
||||
// Save back to local storage
|
||||
try {
|
||||
const EVENT_NAME = "gptac_reset_btn_clicked";
|
||||
window.dispatchEvent(
|
||||
new CustomEvent(EVENT_NAME, {
|
||||
detail: ""
|
||||
})
|
||||
);
|
||||
} catch (e) {}
|
||||
return reset_conversation(a, b);
|
||||
}
|
||||
|
||||
|
||||
72
toolbox.py
72
toolbox.py
@@ -37,6 +37,9 @@ from shared_utils.handle_upload import html_local_file
|
||||
from shared_utils.handle_upload import html_local_img
|
||||
from shared_utils.handle_upload import file_manifest_filter_type
|
||||
from shared_utils.handle_upload import extract_archive
|
||||
from shared_utils.context_clip_policy import clip_history
|
||||
from shared_utils.context_clip_policy import auto_context_clip_each_message
|
||||
from shared_utils.context_clip_policy import auto_context_clip_search_optimal
|
||||
from typing import List
|
||||
pj = os.path.join
|
||||
default_user_name = "default_user"
|
||||
@@ -133,6 +136,9 @@ def ArgsGeneralWrapper(f):
|
||||
if len(args) == 0: # 插件通道
|
||||
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, request)
|
||||
else: # 对话通道,或者基础功能通道
|
||||
# 基础对话通道,或者基础功能通道
|
||||
if get_conf('AUTO_CONTEXT_CLIP_ENABLE'):
|
||||
txt_passon, history = auto_context_clip(txt_passon, history)
|
||||
yield from f(txt_passon, llm_kwargs, plugin_kwargs, chatbot_with_cookie, history, system_prompt, *args)
|
||||
else:
|
||||
# 处理少数情况下的特殊插件的锁定状态
|
||||
@@ -588,7 +594,7 @@ def on_report_generated(cookies:dict, files:List[str], chatbot:ChatBotWithCookie
|
||||
file_links += (
|
||||
f'<br/><a href="file={os.path.abspath(f)}" target="_blank">{f}</a>'
|
||||
)
|
||||
chatbot.append(["报告如何远程获取?", f"报告已经添加到右侧“文件下载区”(可能处于折叠状态),请查收。{file_links}"])
|
||||
chatbot.append([None, f"已经添加到右侧“文件下载区”(可能处于折叠状态),请查收。您也可以点击以下链接直接下载:{file_links}"])
|
||||
return cookies, report_files, chatbot
|
||||
|
||||
|
||||
@@ -712,66 +718,14 @@ def run_gradio_in_subpath(demo, auth, port, custom_path):
|
||||
app = gr.mount_gradio_app(app, demo, path=custom_path)
|
||||
uvicorn.run(app, host="0.0.0.0", port=port) # , auth=auth
|
||||
|
||||
|
||||
def clip_history(inputs, history, tokenizer, max_token_limit):
|
||||
"""
|
||||
reduce the length of history by clipping.
|
||||
this function search for the longest entries to clip, little by little,
|
||||
until the number of token of history is reduced under threshold.
|
||||
通过裁剪来缩短历史记录的长度。
|
||||
此函数逐渐地搜索最长的条目进行剪辑,
|
||||
直到历史记录的标记数量降低到阈值以下。
|
||||
"""
|
||||
import numpy as np
|
||||
from request_llms.bridge_all import model_info
|
||||
|
||||
def get_token_num(txt):
|
||||
return len(tokenizer.encode(txt, disallowed_special=()))
|
||||
|
||||
input_token_num = get_token_num(inputs)
|
||||
|
||||
if max_token_limit < 5000:
|
||||
output_token_expect = 256 # 4k & 2k models
|
||||
elif max_token_limit < 9000:
|
||||
output_token_expect = 512 # 8k models
|
||||
def auto_context_clip(current, history, policy='search_optimal'):
|
||||
if policy == 'each_message':
|
||||
return auto_context_clip_each_message(current, history)
|
||||
elif policy == 'search_optimal':
|
||||
return auto_context_clip_search_optimal(current, history)
|
||||
else:
|
||||
output_token_expect = 1024 # 16k & 32k models
|
||||
raise RuntimeError(f"未知的自动上下文裁剪策略: {policy}。")
|
||||
|
||||
if input_token_num < max_token_limit * 3 / 4:
|
||||
# 当输入部分的token占比小于限制的3/4时,裁剪时
|
||||
# 1. 把input的余量留出来
|
||||
max_token_limit = max_token_limit - input_token_num
|
||||
# 2. 把输出用的余量留出来
|
||||
max_token_limit = max_token_limit - output_token_expect
|
||||
# 3. 如果余量太小了,直接清除历史
|
||||
if max_token_limit < output_token_expect:
|
||||
history = []
|
||||
return history
|
||||
else:
|
||||
# 当输入部分的token占比 > 限制的3/4时,直接清除历史
|
||||
history = []
|
||||
return history
|
||||
|
||||
everything = [""]
|
||||
everything.extend(history)
|
||||
n_token = get_token_num("\n".join(everything))
|
||||
everything_token = [get_token_num(e) for e in everything]
|
||||
|
||||
# 截断时的颗粒度
|
||||
delta = max(everything_token) // 16
|
||||
|
||||
while n_token > max_token_limit:
|
||||
where = np.argmax(everything_token)
|
||||
encoded = tokenizer.encode(everything[where], disallowed_special=())
|
||||
clipped_encoded = encoded[: len(encoded) - delta]
|
||||
everything[where] = tokenizer.decode(clipped_encoded)[
|
||||
:-1
|
||||
] # -1 to remove the may-be illegal char
|
||||
everything_token[where] = get_token_num(everything[where])
|
||||
n_token = get_token_num("\n".join(everything))
|
||||
|
||||
history = everything[1:]
|
||||
return history
|
||||
|
||||
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user