更改了一些注释
This commit is contained in:
parent
0cd3274d04
commit
b0c627909a
@ -101,6 +101,8 @@ MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
|
|||||||
|
|
||||||
|
|
||||||
# 选择本地模型变体(只有当AVAIL_LLM_MODELS包含了对应本地模型时,才会起作用)
|
# 选择本地模型变体(只有当AVAIL_LLM_MODELS包含了对应本地模型时,才会起作用)
|
||||||
|
# 如果你选择Qwen系列的模型,那么请在下面的QWEN_MODEL_SELECTION中指定具体的模型
|
||||||
|
# 也可以是具体的模型路径
|
||||||
QWEN_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
|
QWEN_MODEL_SELECTION = "Qwen/Qwen-1_8B-Chat-Int8"
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ class GetQwenLMHandle(LocalLLMHandle):
|
|||||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
from transformers.generation import GenerationConfig
|
from transformers.generation import GenerationConfig
|
||||||
with ProxyNetworkActivate('Download_LLM'):
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
model_id = get_conf('QWEN_MODEL_SELECTION') #在这里更改路径,如果你已经下载好了的话,同时,别忘记tokenizer
|
model_id = get_conf('QWEN_MODEL_SELECTION')
|
||||||
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
|
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
|
||||||
# use fp16
|
# use fp16
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
|
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
|
||||||
|
Loading…
x
Reference in New Issue
Block a user