镜像自地址
https://github.com/binary-husky/gpt_academic.git
已同步 2025-12-06 14:36:48 +00:00
修正internlm输入设备bug
这个提交包含在:
@@ -94,8 +94,9 @@ class GetInternlmHandle(LocalLLMHandle):
|
||||
|
||||
inputs = tokenizer([prompt], padding=True, return_tensors="pt")
|
||||
input_length = len(inputs["input_ids"][0])
|
||||
device = get_conf('LOCAL_MODEL_DEVICE')
|
||||
for k, v in inputs.items():
|
||||
inputs[k] = v.cuda()
|
||||
inputs[k] = v.to(device)
|
||||
input_ids = inputs["input_ids"]
|
||||
batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
|
||||
if generation_config is None:
|
||||
|
||||
在新工单中引用
屏蔽一个用户