Merge branch 'master' into frontier

这个提交包含在:
binary-husky
2024-10-15 09:10:36 +00:00
当前提交 0121cacc84
共有 6 个文件被更改,包括 156 次插入53 次删除

查看文件

@@ -0,0 +1,51 @@
# https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages
name: build-with-latex-arm
on:
push:
branches:
- "master"
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}_with_latex_arm
jobs:
build-and-push-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Checkout repository
uses: actions/checkout@v4
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Build and push Docker image
uses: docker/build-push-action@v6
with:
context: .
push: true
platforms: linux/arm64
file: docs/GithubAction+NoLocal+Latex+Arm
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

查看文件

@@ -145,8 +145,8 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
# <-------------- download arxiv source file -------------> # <-------------- download arxiv source file ------------->
def fix_url_and_download(): def fix_url_and_download():
for url_tar in [url_.replace('/abs/', '/e-print/'), url_.replace('/abs/', '/src/')]: # for url_tar in [url_.replace('/abs/', '/e-print/'), url_.replace('/abs/', '/src/')]:
# for url_tar in [url_.replace('/abs/', '/src/'), url_.replace('/abs/', '/e-print/')]: for url_tar in [url_.replace('/abs/', '/src/'), url_.replace('/abs/', '/e-print/')]:
proxies = get_conf('proxies') proxies = get_conf('proxies')
r = requests.get(url_tar, proxies=proxies) r = requests.get(url_tar, proxies=proxies)
if r.status_code == 200: if r.status_code == 200:

查看文件

@@ -697,15 +697,6 @@ def _merge_pdfs_ng(pdf1_path, pdf2_path, output_path):
), ),
0, 0,
) )
if "/Annots" in page1:
page1_annot_id = [annot.idnum for annot in page1["/Annots"]]
else:
page1_annot_id = []
if "/Annots" in page2:
page2_annot_id = [annot.idnum for annot in page2["/Annots"]]
else:
page2_annot_id = []
if "/Annots" in new_page: if "/Annots" in new_page:
annotations = new_page["/Annots"] annotations = new_page["/Annots"]
for i, annot in enumerate(annotations): for i, annot in enumerate(annotations):
@@ -720,7 +711,8 @@ def _merge_pdfs_ng(pdf1_path, pdf2_path, output_path):
if "/S" in action and action["/S"] == "/GoTo": if "/S" in action and action["/S"] == "/GoTo":
# 内部链接:跳转到文档中的某个页面 # 内部链接:跳转到文档中的某个页面
dest = action.get("/D") # 目标页或目标位置 dest = action.get("/D") # 目标页或目标位置
if dest and annot.idnum in page2_annot_id: # if dest and annot.idnum in page2_annot_id:
if dest in pdf2_reader.named_destinations:
# 获取原始文件中跳转信息,包括跳转页面 # 获取原始文件中跳转信息,包括跳转页面
destination = pdf2_reader.named_destinations[ destination = pdf2_reader.named_destinations[
dest dest
@@ -732,6 +724,7 @@ def _merge_pdfs_ng(pdf1_path, pdf2_path, output_path):
) )
# 更新跳转信息,跳转到对应的页面和,指定坐标 (100, 150),缩放比例为 100% # 更新跳转信息,跳转到对应的页面和,指定坐标 (100, 150),缩放比例为 100%
# “/D”:[10,'/XYZ',100,100,0] # “/D”:[10,'/XYZ',100,100,0]
if destination.dest_array[1] == "/XYZ":
annot_obj["/A"].update( annot_obj["/A"].update(
{ {
NameObject("/D"): ArrayObject( NameObject("/D"): ArrayObject(
@@ -739,7 +732,9 @@ def _merge_pdfs_ng(pdf1_path, pdf2_path, output_path):
NumberObject(page_number), NumberObject(page_number),
destination.dest_array[1], destination.dest_array[1],
FloatObject( FloatObject(
destination.dest_array[2] destination.dest_array[
2
]
+ int( + int(
page1.mediaBox.getWidth() page1.mediaBox.getWidth()
) )
@@ -750,6 +745,18 @@ def _merge_pdfs_ng(pdf1_path, pdf2_path, output_path):
) # 确保键和值是 PdfObject ) # 确保键和值是 PdfObject
} }
) )
else:
annot_obj["/A"].update(
{
NameObject("/D"): ArrayObject(
[
NumberObject(page_number),
destination.dest_array[1],
]
) # 确保键和值是 PdfObject
}
)
rect = annot_obj.get("/Rect") rect = annot_obj.get("/Rect")
# 更新点击坐标 # 更新点击坐标
rect = ArrayObject( rect = ArrayObject(
@@ -773,7 +780,9 @@ def _merge_pdfs_ng(pdf1_path, pdf2_path, output_path):
): rect # 确保键和值是 PdfObject ): rect # 确保键和值是 PdfObject
} }
) )
if dest and annot.idnum in page1_annot_id: # if dest and annot.idnum in page1_annot_id:
if dest in pdf1_reader.named_destinations:
# 获取原始文件中跳转信息,包括跳转页面 # 获取原始文件中跳转信息,包括跳转页面
destination = pdf1_reader.named_destinations[ destination = pdf1_reader.named_destinations[
dest dest
@@ -785,6 +794,7 @@ def _merge_pdfs_ng(pdf1_path, pdf2_path, output_path):
) )
# 更新跳转信息,跳转到对应的页面和,指定坐标 (100, 150),缩放比例为 100% # 更新跳转信息,跳转到对应的页面和,指定坐标 (100, 150),缩放比例为 100%
# “/D”:[10,'/XYZ',100,100,0] # “/D”:[10,'/XYZ',100,100,0]
if destination.dest_array[1] == "/XYZ":
annot_obj["/A"].update( annot_obj["/A"].update(
{ {
NameObject("/D"): ArrayObject( NameObject("/D"): ArrayObject(
@@ -792,7 +802,9 @@ def _merge_pdfs_ng(pdf1_path, pdf2_path, output_path):
NumberObject(page_number), NumberObject(page_number),
destination.dest_array[1], destination.dest_array[1],
FloatObject( FloatObject(
destination.dest_array[2] destination.dest_array[
2
]
), ),
destination.dest_array[3], destination.dest_array[3],
destination.dest_array[4], destination.dest_array[4],
@@ -800,6 +812,18 @@ def _merge_pdfs_ng(pdf1_path, pdf2_path, output_path):
) # 确保键和值是 PdfObject ) # 确保键和值是 PdfObject
} }
) )
else:
annot_obj["/A"].update(
{
NameObject("/D"): ArrayObject(
[
NumberObject(page_number),
destination.dest_array[1],
]
) # 确保键和值是 PdfObject
}
)
rect = annot_obj.get("/Rect") rect = annot_obj.get("/Rect")
rect = ArrayObject( rect = ArrayObject(
[ [
@@ -820,14 +844,12 @@ def _merge_pdfs_ng(pdf1_path, pdf2_path, output_path):
elif "/S" in action and action["/S"] == "/URI": elif "/S" in action and action["/S"] == "/URI":
# 外部链接跳转到某个URI # 外部链接跳转到某个URI
uri = action.get("/URI") uri = action.get("/URI")
output_writer.addPage(new_page) output_writer.addPage(new_page)
# Save the merged PDF file # Save the merged PDF file
with open(output_path, "wb") as output_file: with open(output_path, "wb") as output_file:
output_writer.write(output_file) output_writer.write(output_file)
def _merge_pdfs_legacy(pdf1_path, pdf2_path, output_path): def _merge_pdfs_legacy(pdf1_path, pdf2_path, output_path):
import PyPDF2 # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放 import PyPDF2 # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放

查看文件

@@ -0,0 +1,25 @@
# 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型,请参考 docs/Dockerfile+ChatGLM
# - 1 修改 `config.py`
# - 2 构建 docker build -t gpt-academic-nolocal-latex -f docs/GithubAction+NoLocal+Latex .
# - 3 运行 docker run -v /home/fuqingxu/arxiv_cache:/root/arxiv_cache --rm -it --net=host gpt-academic-nolocal-latex
FROM menghuan1918/ubuntu_uv_ctex:latest
ENV DEBIAN_FRONTEND=noninteractive
SHELL ["/bin/bash", "-c"]
WORKDIR /gpt
COPY . .
RUN /root/.cargo/bin/uv venv --seed \
&& source .venv/bin/activate \
&& /root/.cargo/bin/uv pip install openai numpy arxiv rich colorama Markdown pygments pymupdf python-docx pdfminer \
&& /root/.cargo/bin/uv pip install -r requirements.txt \
&& /root/.cargo/bin/uv clean
# 对齐python3
RUN rm -f /usr/bin/python3 && ln -s /gpt/.venv/bin/python /usr/bin/python3
RUN rm -f /usr/bin/python && ln -s /gpt/.venv/bin/python /usr/bin/python
# 可选步骤,用于预热模块
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
# 启动
CMD ["python3", "-u", "main.py"]

查看文件

@@ -256,6 +256,8 @@ model_info = {
"max_token": 128000, "max_token": 128000,
"tokenizer": tokenizer_gpt4, "tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4, "token_cnt": get_token_num_gpt4,
"openai_disable_system_prompt": True,
"openai_disable_stream": True,
}, },
"o1-mini": { "o1-mini": {
"fn_with_ui": chatgpt_ui, "fn_with_ui": chatgpt_ui,
@@ -264,6 +266,8 @@ model_info = {
"max_token": 128000, "max_token": 128000,
"tokenizer": tokenizer_gpt4, "tokenizer": tokenizer_gpt4,
"token_cnt": get_token_num_gpt4, "token_cnt": get_token_num_gpt4,
"openai_disable_system_prompt": True,
"openai_disable_stream": True,
}, },
"gpt-4-turbo": { "gpt-4-turbo": {
@@ -1281,4 +1285,3 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot,
# 更新一下llm_kwargs的参数,否则会出现参数不匹配的问题 # 更新一下llm_kwargs的参数,否则会出现参数不匹配的问题
yield from method(inputs, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, stream, additional_fn) yield from method(inputs, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, stream, additional_fn)

查看文件

@@ -202,10 +202,13 @@ def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[],
if (time.time()-observe_window[1]) > watch_dog_patience: if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("用户取消了程序。") raise RuntimeError("用户取消了程序。")
else: raise RuntimeError("意外Json结构"+delta) else: raise RuntimeError("意外Json结构"+delta)
if json_data and json_data['finish_reason'] == 'content_filter':
raise RuntimeError("由于提问含不合规内容被Azure过滤。") finish_reason = json_data.get('finish_reason', None) if json_data else None
if json_data and json_data['finish_reason'] == 'length': if finish_reason == 'content_filter':
raise RuntimeError("由于提问含不合规内容被过滤。")
if finish_reason == 'length':
raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。") raise ConnectionAbortedError("正常结束,但显示Token不足,导致输出不完整,请削减单次输入的文本量。")
return result return result
@@ -536,4 +539,3 @@ def generate_payload(inputs:str, llm_kwargs:dict, history:list, system_prompt:st
return headers,payload return headers,payload