Merge branch 'update-for-qwen' of https://github.com/alphaply/gpt_academic into alphaply-update-for-qwen

这个提交包含在:
binary-husky
2023-12-04 10:09:21 +08:00
当前提交 2cef81abbe
共有 2 个文件被更改,包括 6 次插入5 次删除

查看文件

@@ -1,4 +1,4 @@
model_name = "Qwen"
model_name = "Qwen-7B"
cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`"
@@ -30,7 +30,7 @@ class GetQwenLMHandle(LocalLLMHandle):
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
with ProxyNetworkActivate('Download_LLM'):
model_id = 'qwen/Qwen-7B-Chat'
model_id = 'qwen/Qwen-7B-Chat' #在这里更改路径,如果你已经下载好了的话,同时,别忘记tokenizer
self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True)
# use fp16
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval()
@@ -51,7 +51,7 @@ class GetQwenLMHandle(LocalLLMHandle):
query, max_length, top_p, temperature, history = adaptor(kwargs)
for response in self._model.chat(self._tokenizer, query, history=history, stream=True):
for response in self._model.chat_stream(self._tokenizer, query, history=history):
yield response
def try_to_import_special_deps(self, **kwargs):