镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
26 行
1.1 KiB
Plaintext
26 行
1.1 KiB
Plaintext
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
|
|
# - 1 修改 `config.py`
|
|
# - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/GithubAction+NoLocal+Latex .
|
|
# - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex
|
|
|
|
FROM menghuan1918/ubuntu_uv_ctex:latest
|
|
ENV DEBIAN_FRONTEND=noninteractive
|
|
SHELL ["/bin/bash", "-c"]
|
|
WORKDIR /gpt
|
|
COPY . .
|
|
RUN /root/.cargo/bin/uv venv --seed \
|
|
&& source .venv/bin/activate \
|
|
&& /root/.cargo/bin/uv pip install openai numpy arxiv rich colorama Markdown pygments pymupdf python-docx pdfminer \
|
|
&& /root/.cargo/bin/uv pip install -r requirements.txt \
|
|
&& /root/.cargo/bin/uv clean
|
|
|
|
# 对齐python3
|
|
RUN rm -f /usr/bin/python3 && ln -s /gpt/.venv/bin/python /usr/bin/python3
|
|
RUN rm -f /usr/bin/python && ln -s /gpt/.venv/bin/python /usr/bin/python
|
|
|
|
# 可选步骤,用于预热模块
|
|
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
|
|
|
# 启动
|
|
CMD ["python3", "-u", "main.py"]
|