upgrade chatglm to chatglm2

This commit is contained in:
binary-husky 2023-06-30 12:06:28 +08:00
parent 37172906ef
commit 403667aec1
2 changed files with 13 additions and 4 deletions

View File

@ -152,7 +152,7 @@ model_info = {
"token_cnt": get_token_num_gpt4, "token_cnt": get_token_num_gpt4,
}, },
# chatglm # chatglm 直接对齐到 chatglm2
"chatglm": { "chatglm": {
"fn_with_ui": chatglm_ui, "fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui, "fn_without_ui": chatglm_noui,
@ -161,6 +161,15 @@ model_info = {
"tokenizer": tokenizer_gpt35, "tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35, "token_cnt": get_token_num_gpt35,
}, },
"chatglm2": {
"fn_with_ui": chatglm_ui,
"fn_without_ui": chatglm_noui,
"endpoint": None,
"max_token": 1024,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
# newbing # newbing
"newbing": { "newbing": {
"fn_with_ui": newbing_ui, "fn_with_ui": newbing_ui,

View File

@ -40,12 +40,12 @@ class GetGLMHandle(Process):
while True: while True:
try: try:
if self.chatglm_model is None: if self.chatglm_model is None:
self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
device, = get_conf('LOCAL_MODEL_DEVICE') device, = get_conf('LOCAL_MODEL_DEVICE')
if device=='cpu': if device=='cpu':
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).float()
else: else:
self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True).half().cuda()
self.chatglm_model = self.chatglm_model.eval() self.chatglm_model = self.chatglm_model.eval()
break break
else: else: