diff --git a/config.py b/config.py index fcad051..87f736c 100644 --- a/config.py +++ b/config.py @@ -91,7 +91,7 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-prev "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", - "chatglm3", "moss", "claude-2", "deepseekcoder"] + "chatglm3", "moss", "claude-2"] # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] @@ -114,9 +114,6 @@ CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本 -# 设置deepseekcoder运行时输入的最大token数(超过4096没有意义),对话过程爆显存可以适当调小 -MAX_INPUT_TOKEN_LENGTH = 2048 - # 设置gradio的并行线程数(不需要修改) CONCURRENT_COUNT = 100 diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 8dece54..dcfeba9 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -552,7 +552,7 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder "fn_with_ui": deepseekcoder_ui, "fn_without_ui": deepseekcoder_noui, "endpoint": None, - "max_token": 4096, + "max_token": 2048, "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, } diff --git a/request_llms/bridge_deepseekcoder.py b/request_llms/bridge_deepseekcoder.py index 09bd0b3..89964ab 100644 --- a/request_llms/bridge_deepseekcoder.py +++ b/request_llms/bridge_deepseekcoder.py @@ -8,7 +8,6 @@ from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from threading import Thread import torch -MAX_INPUT_TOKEN_LENGTH = get_conf("MAX_INPUT_TOKEN_LENGTH") def download_huggingface_model(model_name, max_retry, local_dir): from huggingface_hub import snapshot_download for i in range(1, max_retry): @@ -94,8 +93,8 @@ class GetCoderLMHandle(LocalLLMHandle): history.append({ 'role': 'user', 'content': query}) messages = history inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt") - if inputs.shape[1] > MAX_INPUT_TOKEN_LENGTH: - inputs = inputs[:, -MAX_INPUT_TOKEN_LENGTH:] + if inputs.shape[1] > max_length: + inputs = inputs[:, -max_length:] inputs = inputs.to(self._model.device) generation_kwargs = dict( inputs=inputs,