镜像自地址
https://github.com/SCIR-HI/Huatuo-Llama-Med-Chinese.git
已同步 2025-12-06 14:36:49 +00:00
add test
这个提交包含在:
3
.gitignore
vendored
3
.gitignore
vendored
@@ -12,4 +12,5 @@ lora-**
|
|||||||
*ckpt
|
*ckpt
|
||||||
wandb
|
wandb
|
||||||
todo.txt
|
todo.txt
|
||||||
.vscode/
|
.vscode/
|
||||||
|
*tmp*
|
||||||
5
infer.py
5
infer.py
@@ -31,7 +31,7 @@ def main(
|
|||||||
instruct_dir: str = "",
|
instruct_dir: str = "",
|
||||||
use_lora: bool = True,
|
use_lora: bool = True,
|
||||||
lora_weights: str = "tloen/alpaca-lora-7b",
|
lora_weights: str = "tloen/alpaca-lora-7b",
|
||||||
# The prompt template to use, will default to alpaca.
|
# The prompt template to use, will default to med_template.
|
||||||
prompt_template: str = "med_template",
|
prompt_template: str = "med_template",
|
||||||
):
|
):
|
||||||
prompter = Prompter(prompt_template)
|
prompter = Prompter(prompt_template)
|
||||||
@@ -111,9 +111,10 @@ def main(
|
|||||||
infer_from_json(instruct_dir)
|
infer_from_json(instruct_dir)
|
||||||
else:
|
else:
|
||||||
for instruction in [
|
for instruction in [
|
||||||
"一位50岁女性出现不适、厌油腻、肝囊肿等症状,检查后发现为胆囊癌,并且病情十分严重,应该如何进行治疗?",
|
"我感冒了,怎么治疗",
|
||||||
"一个患有肝衰竭综合征的病人,除了常见的临床表现外,还有哪些特殊的体征?",
|
"一个患有肝衰竭综合征的病人,除了常见的临床表现外,还有哪些特殊的体征?",
|
||||||
"急性阑尾炎和缺血性心脏病的多发群体有何不同?",
|
"急性阑尾炎和缺血性心脏病的多发群体有何不同?",
|
||||||
|
"小李最近出现了心动过速的症状,伴有轻度胸痛。体检发现P-R间期延长,伴有T波低平和ST段异常",
|
||||||
]:
|
]:
|
||||||
print("Instruction:", instruction)
|
print("Instruction:", instruction)
|
||||||
print("Response:", evaluate(instruction))
|
print("Response:", evaluate(instruction))
|
||||||
|
|||||||
33
scripts/test.sh
普通文件
33
scripts/test.sh
普通文件
@@ -0,0 +1,33 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# If inferring with the llama model, set 'use_lora' to 'False' and 'prompt_template' to 'ori_template'.
|
||||||
|
# If inferring with the default alpaca model, set 'use_lora' to 'True', 'lora_weights' to 'tloen/alpaca-lora-7b', and 'prompt_template' to 'alpaca'.
|
||||||
|
# If inferring with the llama-med model, download the LORA weights and set 'lora_weights' to './lora-llama-med' (or the exact directory of LORA weights) and 'prompt_template' to 'med_template'.
|
||||||
|
|
||||||
|
BASE_MODEL="decapoda-research/llama-7b-hf"
|
||||||
|
# 原始llama
|
||||||
|
o_cmd="python infer.py \
|
||||||
|
--base_model ${BASE_MODEL} \
|
||||||
|
--use_lora False \
|
||||||
|
--prompt_template 'ori_template'"
|
||||||
|
|
||||||
|
# Alpaca
|
||||||
|
a_cmd="python infer.py \
|
||||||
|
--base_model ${BASE_MODEL} \
|
||||||
|
--use_lora True \
|
||||||
|
--lora_weights "tloen/alpaca-lora-7b" \
|
||||||
|
--prompt_template 'alpaca'"
|
||||||
|
|
||||||
|
# llama-med
|
||||||
|
m_cmd="python infer.py \
|
||||||
|
--base_model ${BASE_MODEL} \
|
||||||
|
--use_lora True \
|
||||||
|
--lora_weights "lora-llama-med" \
|
||||||
|
--prompt_template 'med_template'"
|
||||||
|
|
||||||
|
echo "ori"
|
||||||
|
eval $o_cmd > infer_result/o_tmp.txt
|
||||||
|
echo "alpaca"
|
||||||
|
eval $a_cmd > infer_result/a_tmp.txt
|
||||||
|
echo "med"
|
||||||
|
eval $m_cmd > infer_result/m_tmp.txt
|
||||||
在新工单中引用
屏蔽一个用户