# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM # - 1 修改 `config.py` # - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/GithubAction+NoLocal+Latex . # - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex FROM menghuan1918/python3_11_uv_texlive:latest ENV DEBIAN_FRONTEND=noninteractive SHELL ["/bin/bash", "-c"] WORKDIR /gpt COPY . . RUN source ~/.bashrc \ && uv venv --seed \ && source .venv/bin/activate \ && uv pip install openai numpy arxiv rich colorama Markdown pygments pymupdf python-docx pdfminer nougat-ocr \ && uv pip install -r requirements.txt # 可选步骤,用于预热模块 RUN .venv/bin/python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' # 启动 CMD [".venv/bin/python3", "-u", "main.py"]