Merge branch 'master' into huggingfacelocal
This commit is contained in:
53
docs/GithubAction+AllCapacityBeta
Normal file
53
docs/GithubAction+AllCapacityBeta
Normal file
@@ -0,0 +1,53 @@
|
||||
# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacity --network=host --build-arg http_proxy=http://localhost:10881 --build-arg https_proxy=http://localhost:10881 .
|
||||
# docker build -t gpt-academic-all-capacity -f docs/GithubAction+AllCapacityBeta --network=host .
|
||||
# docker run -it --net=host gpt-academic-all-capacity bash
|
||||
|
||||
# 从NVIDIA源,从而支持显卡(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
|
||||
FROM fuqingxu/11.3.1-runtime-ubuntu20.04-with-texlive:latest
|
||||
|
||||
# use python3 as the system default python
|
||||
WORKDIR /gpt
|
||||
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
||||
|
||||
# # 非必要步骤,更换pip源 (以下三行,可以删除)
|
||||
# RUN echo '[global]' > /etc/pip.conf && \
|
||||
# echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
|
||||
# echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
|
||||
|
||||
# 下载pytorch
|
||||
RUN python3 -m pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu113
|
||||
# 准备pip依赖
|
||||
RUN python3 -m pip install openai numpy arxiv rich
|
||||
RUN python3 -m pip install colorama Markdown pygments pymupdf
|
||||
RUN python3 -m pip install python-docx moviepy pdfminer
|
||||
RUN python3 -m pip install zh_langchain==0.2.1 pypinyin
|
||||
RUN python3 -m pip install rarfile py7zr
|
||||
RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
||||
# 下载分支
|
||||
WORKDIR /gpt
|
||||
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
||||
WORKDIR /gpt/gpt_academic
|
||||
RUN git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss
|
||||
|
||||
RUN python3 -m pip install -r requirements.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_moss.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_qwen.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_chatglm.txt
|
||||
RUN python3 -m pip install -r request_llms/requirements_newbing.txt
|
||||
RUN python3 -m pip install nougat-ocr
|
||||
|
||||
# 预热Tiktoken模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
# 安装知识库插件的额外依赖
|
||||
RUN apt-get update && apt-get install libgl1 -y
|
||||
RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
|
||||
RUN pip3 install unstructured[all-docs] --upgrade
|
||||
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
|
||||
RUN rm -rf /usr/local/lib/python3.8/dist-packages/tests
|
||||
|
||||
|
||||
# COPY .cache /root/.cache
|
||||
# COPY config_private.py config_private.py
|
||||
# 启动
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
26
docs/GithubAction+NoLocal+Vectordb
Normal file
26
docs/GithubAction+NoLocal+Vectordb
Normal file
@@ -0,0 +1,26 @@
|
||||
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
|
||||
# 如何构建: 先修改 `config.py`, 然后 docker build -t gpt-academic-nolocal-vs -f docs/GithubAction+NoLocal+Vectordb .
|
||||
# 如何运行: docker run --rm -it --net=host gpt-academic-nolocal-vs
|
||||
FROM python:3.11
|
||||
|
||||
# 指定路径
|
||||
WORKDIR /gpt
|
||||
|
||||
# 装载项目文件
|
||||
COPY . .
|
||||
|
||||
# 安装依赖
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
# 安装知识库插件的额外依赖
|
||||
RUN apt-get update && apt-get install libgl1 -y
|
||||
RUN pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN pip3 install transformers protobuf langchain sentence-transformers faiss-cpu nltk beautifulsoup4 bitsandbytes tabulate icetk --upgrade
|
||||
RUN pip3 install unstructured[all-docs] --upgrade
|
||||
RUN python3 -c 'from check_proxy import warm_up_vectordb; warm_up_vectordb()'
|
||||
|
||||
# 可选步骤,用于预热模块
|
||||
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
||||
|
||||
# 启动
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
@@ -923,7 +923,7 @@
|
||||
"的第": "The",
|
||||
"个片段": "fragment",
|
||||
"总结文章": "Summarize the article",
|
||||
"根据以上的对话": "According to the above dialogue",
|
||||
"根据以上的对话": "According to the conversation above",
|
||||
"的主要内容": "The main content of",
|
||||
"所有文件都总结完成了吗": "Are all files summarized?",
|
||||
"如果是.doc文件": "If it is a .doc file",
|
||||
@@ -1501,7 +1501,7 @@
|
||||
"发送请求到OpenAI后": "After sending the request to OpenAI",
|
||||
"上下布局": "Vertical Layout",
|
||||
"左右布局": "Horizontal Layout",
|
||||
"对话窗的高度": "Height of the Dialogue Window",
|
||||
"对话窗的高度": "Height of the Conversation Window",
|
||||
"重试的次数限制": "Retry Limit",
|
||||
"gpt4现在只对申请成功的人开放": "GPT-4 is now only open to those who have successfully applied",
|
||||
"提高限制请查询": "Please check for higher limits",
|
||||
@@ -2183,9 +2183,8 @@
|
||||
"找不到合适插件执行该任务": "Cannot find a suitable plugin to perform this task",
|
||||
"接驳VoidTerminal": "Connect to VoidTerminal",
|
||||
"**很好": "**Very good",
|
||||
"对话|编程": "Conversation|Programming",
|
||||
"对话|编程|学术": "Conversation|Programming|Academic",
|
||||
"4. 建议使用 GPT3.5 或更强的模型": "4. It is recommended to use GPT3.5 or a stronger model",
|
||||
"对话|编程": "Conversation&ImageGenerating|Programming",
|
||||
"对话|编程|学术": "Conversation&ImageGenerating|Programming|Academic", "4. 建议使用 GPT3.5 或更强的模型": "4. It is recommended to use GPT3.5 or a stronger model",
|
||||
"「请调用插件翻译PDF论文": "Please call the plugin to translate the PDF paper",
|
||||
"3. 如果您使用「调用插件xxx」、「修改配置xxx」、「请问」等关键词": "3. If you use keywords such as 'call plugin xxx', 'modify configuration xxx', 'please', etc.",
|
||||
"以下是一篇学术论文的基本信息": "The following is the basic information of an academic paper",
|
||||
@@ -2630,7 +2629,7 @@
|
||||
"已经被记忆": "Already memorized",
|
||||
"默认用英文的": "Default to English",
|
||||
"错误追踪": "Error tracking",
|
||||
"对话|编程|学术|智能体": "Dialogue|Programming|Academic|Intelligent agent",
|
||||
"对话&编程|编程|学术|智能体": "Conversation&ImageGenerating|Programming|Academic|Intelligent agent",
|
||||
"请检查": "Please check",
|
||||
"检测到被滞留的缓存文档": "Detected cached documents being left behind",
|
||||
"还有哪些场合允许使用代理": "What other occasions allow the use of proxies",
|
||||
@@ -2864,7 +2863,7 @@
|
||||
"加载API_KEY": "Loading API_KEY",
|
||||
"协助您编写代码": "Assist you in writing code",
|
||||
"我可以为您提供以下服务": "I can provide you with the following services",
|
||||
"排队中请稍后 ...": "Please wait in line ...",
|
||||
"排队中请稍候 ...": "Please wait in line ...",
|
||||
"建议您使用英文提示词": "It is recommended to use English prompts",
|
||||
"不能支撑AutoGen运行": "Cannot support AutoGen operation",
|
||||
"帮助您解决编程问题": "Help you solve programming problems",
|
||||
@@ -2903,5 +2902,107 @@
|
||||
"高优先级": "High priority",
|
||||
"请配置ZHIPUAI_API_KEY": "Please configure ZHIPUAI_API_KEY",
|
||||
"单个azure模型": "Single Azure model",
|
||||
"预留参数 context 未实现": "Reserved parameter 'context' not implemented"
|
||||
}
|
||||
"预留参数 context 未实现": "Reserved parameter 'context' not implemented",
|
||||
"在输入区输入临时API_KEY后提交": "Submit after entering temporary API_KEY in the input area",
|
||||
"鸟": "Bird",
|
||||
"图片中需要修改的位置用橡皮擦擦除为纯白色": "Erase the areas in the image that need to be modified with an eraser to pure white",
|
||||
"└── PDF文档精准解析": "└── Accurate parsing of PDF documents",
|
||||
"└── ALLOW_RESET_CONFIG 是否允许通过自然语言描述修改本页的配置": "└── ALLOW_RESET_CONFIG Whether to allow modifying the configuration of this page through natural language description",
|
||||
"等待指令": "Waiting for instructions",
|
||||
"不存在": "Does not exist",
|
||||
"选择游戏": "Select game",
|
||||
"本地大模型示意图": "Local large model diagram",
|
||||
"无视此消息即可": "You can ignore this message",
|
||||
"即RGB=255": "That is, RGB=255",
|
||||
"如需追问": "If you have further questions",
|
||||
"也可以是具体的模型路径": "It can also be a specific model path",
|
||||
"才会起作用": "Will take effect",
|
||||
"下载失败": "Download failed",
|
||||
"网页刷新后失效": "Invalid after webpage refresh",
|
||||
"crazy_functions.互动小游戏-": "crazy_functions.Interactive mini game-",
|
||||
"右对齐": "Right alignment",
|
||||
"您可以调用下拉菜单中的“LoadConversationHistoryArchive”还原当下的对话": "You can use the 'LoadConversationHistoryArchive' in the drop-down menu to restore the current conversation",
|
||||
"左对齐": "Left alignment",
|
||||
"使用默认的 FP16": "Use default FP16",
|
||||
"一小时": "One hour",
|
||||
"从而方便内存的释放": "Thus facilitating memory release",
|
||||
"如何临时更换API_KEY": "How to temporarily change API_KEY",
|
||||
"请输入 1024x1024-HD": "Please enter 1024x1024-HD",
|
||||
"使用 INT8 量化": "Use INT8 quantization",
|
||||
"3. 输入修改需求": "3. Enter modification requirements",
|
||||
"刷新界面 由于请求gpt需要一段时间": "Refreshing the interface takes some time due to the request for gpt",
|
||||
"随机小游戏": "Random mini game",
|
||||
"那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型": "So please specify the specific model in QWEN_MODEL_SELECTION below",
|
||||
"表值": "Table value",
|
||||
"我画你猜": "I draw, you guess",
|
||||
"狗": "Dog",
|
||||
"2. 输入分辨率": "2. Enter resolution",
|
||||
"鱼": "Fish",
|
||||
"尚未完成": "Not yet completed",
|
||||
"表头": "Table header",
|
||||
"填localhost或者127.0.0.1": "Fill in localhost or 127.0.0.1",
|
||||
"请上传jpg格式的图片": "Please upload images in jpg format",
|
||||
"API_URL_REDIRECT填写格式是错误的": "The format of API_URL_REDIRECT is incorrect",
|
||||
"├── RWKV的支持见Wiki": "Support for RWKV is available in the Wiki",
|
||||
"如果中文Prompt效果不理想": "If the Chinese prompt is not effective",
|
||||
"/SEAFILE_LOCAL/50503047/我的资料库/学位/paperlatex/aaai/Fu_8368_with_appendix": "/SEAFILE_LOCAL/50503047/My Library/Degree/paperlatex/aaai/Fu_8368_with_appendix",
|
||||
"只有当AVAIL_LLM_MODELS包含了对应本地模型时": "Only when AVAIL_LLM_MODELS contains the corresponding local model",
|
||||
"选择本地模型变体": "Choose the local model variant",
|
||||
"如果您确信自己没填错": "If you are sure you haven't made a mistake",
|
||||
"PyPDF2这个库有严重的内存泄露问题": "PyPDF2 library has serious memory leak issues",
|
||||
"整理文件集合 输出消息": "Organize file collection and output message",
|
||||
"没有检测到任何近期上传的图像文件": "No recently uploaded image files detected",
|
||||
"游戏结束": "Game over",
|
||||
"调用结束": "Call ended",
|
||||
"猫": "Cat",
|
||||
"请及时切换模型": "Please switch models in time",
|
||||
"次中": "In the meantime",
|
||||
"如需生成高清图像": "If you need to generate high-definition images",
|
||||
"CPU 模式": "CPU mode",
|
||||
"项目目录": "Project directory",
|
||||
"动物": "Animal",
|
||||
"居中对齐": "Center alignment",
|
||||
"请注意拓展名需要小写": "Please note that the extension name needs to be lowercase",
|
||||
"重试第": "Retry",
|
||||
"实验性功能": "Experimental feature",
|
||||
"猜错了": "Wrong guess",
|
||||
"打开你的代理软件查看代理协议": "Open your proxy software to view the proxy agreement",
|
||||
"您不需要再重复强调该文件的路径了": "You don't need to emphasize the file path again",
|
||||
"请阅读": "Please read",
|
||||
"请直接输入您的问题": "Please enter your question directly",
|
||||
"API_URL_REDIRECT填错了": "API_URL_REDIRECT is filled incorrectly",
|
||||
"谜底是": "The answer is",
|
||||
"第一个模型": "The first model",
|
||||
"你猜对了!": "You guessed it right!",
|
||||
"已经接收到您上传的文件": "The file you uploaded has been received",
|
||||
"您正在调用“图像生成”插件": "You are calling the 'Image Generation' plugin",
|
||||
"刷新界面 界面更新": "Refresh the interface, interface update",
|
||||
"如果之前已经初始化了游戏实例": "If the game instance has been initialized before",
|
||||
"文件": "File",
|
||||
"老鼠": "Mouse",
|
||||
"列2": "Column 2",
|
||||
"等待图片": "Waiting for image",
|
||||
"使用 INT4 量化": "Use INT4 quantization",
|
||||
"from crazy_functions.互动小游戏 import 随机小游戏": "TranslatedText",
|
||||
"游戏主体": "TranslatedText",
|
||||
"该模型不具备上下文对话能力": "TranslatedText",
|
||||
"列3": "TranslatedText",
|
||||
"清理": "TranslatedText",
|
||||
"检查量化配置": "TranslatedText",
|
||||
"如果游戏结束": "TranslatedText",
|
||||
"蛇": "TranslatedText",
|
||||
"则继续该实例;否则重新初始化": "TranslatedText",
|
||||
"e.g. cat and 猫 are the same thing": "TranslatedText",
|
||||
"第三个模型": "TranslatedText",
|
||||
"如果你选择Qwen系列的模型": "TranslatedText",
|
||||
"列4": "TranslatedText",
|
||||
"输入“exit”获取答案": "TranslatedText",
|
||||
"把它放到子进程中运行": "TranslatedText",
|
||||
"列1": "TranslatedText",
|
||||
"使用该模型需要额外依赖": "TranslatedText",
|
||||
"再试试": "TranslatedText",
|
||||
"1. 上传图片": "TranslatedText",
|
||||
"保存状态": "TranslatedText",
|
||||
"GPT-Academic对话存档": "TranslatedText",
|
||||
"Arxiv论文精细翻译": "TranslatedText"
|
||||
}
|
||||
|
||||
@@ -1043,9 +1043,9 @@
|
||||
"jittorllms响应异常": "jittorllms response exception",
|
||||
"在项目根目录运行这两个指令": "Run these two commands in the project root directory",
|
||||
"获取tokenizer": "Get tokenizer",
|
||||
"chatbot 为WebUI中显示的对话列表": "chatbot is the list of dialogues displayed in WebUI",
|
||||
"chatbot 为WebUI中显示的对话列表": "chatbot is the list of conversations displayed in WebUI",
|
||||
"test_解析一个Cpp项目": "test_parse a Cpp project",
|
||||
"将对话记录history以Markdown格式写入文件中": "Write the dialogue record history to a file in Markdown format",
|
||||
"将对话记录history以Markdown格式写入文件中": "Write the conversations record history to a file in Markdown format",
|
||||
"装饰器函数": "Decorator function",
|
||||
"玫瑰色": "Rose color",
|
||||
"将单空行": "刪除單行空白",
|
||||
@@ -2270,4 +2270,4 @@
|
||||
"标注节点的行数范围": "標註節點的行數範圍",
|
||||
"默认 True": "默認 True",
|
||||
"将两个PDF拼接": "將兩個PDF拼接"
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user