更改了一些注释

This commit is contained in:
Alpha 2023-12-04 12:51:41 +08:00
parent 0cd3274d04
commit b0c627909a
2 changed files with 3 additions and 1 deletions

View File

@ -101,6 +101,8 @@ MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
# 选择本地模型变体只有当AVAIL_LLM_MODELS包含了对应本地模型时才会起作用 # 选择本地模型变体只有当AVAIL_LLM_MODELS包含了对应本地模型时才会起作用
# 如果你选择Qwen系列的模型那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型
# 也可以是具体的模型路径
QWEN_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8" QWEN_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"

View File

@ -22,7 +22,7 @@ class GetQwenLMHandle(LocalLLMHandle):
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig from transformers.generation import GenerationConfig
with ProxyNetworkActivate('Download_LLM'): with ProxyNetworkActivate('Download_LLM'):
model_id = get_conf('QWEN_MODEL_SELECTION') #在这里更改路径如果你已经下载好了的话同时别忘记tokenizer model_id = get_conf('QWEN_MODEL_SELECTION')
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True) self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
# use fp16 # use fp16
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval() model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()