镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 06:26:47 +00:00
修复本地模型在Windows下的加载BUG
这个提交包含在:
@@ -5,7 +5,7 @@ from transformers import AutoModel, AutoTokenizer
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
from toolbox import update_ui, get_conf
|
||||
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
||||
from multiprocessing import Process, Pipe
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
|
||||
@@ -52,14 +52,15 @@ class GetInternlmHandle(LocalLLMHandle):
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
device = get_conf('LOCAL_MODEL_DEVICE')
|
||||
if self._model is None:
|
||||
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
||||
if device=='cpu':
|
||||
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16)
|
||||
else:
|
||||
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda()
|
||||
with ProxyNetworkActivate('Download_LLM'):
|
||||
if self._model is None:
|
||||
tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True)
|
||||
if device=='cpu':
|
||||
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16)
|
||||
else:
|
||||
model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda()
|
||||
|
||||
model = model.eval()
|
||||
model = model.eval()
|
||||
return model, tokenizer
|
||||
|
||||
def llm_stream_generator(self, **kwargs):
|
||||
|
||||
在新工单中引用
屏蔽一个用户