From 498598624398ec75e231804e35c576583f12cd70 Mon Sep 17 00:00:00 2001 From: jlw463195935 <463195395@qq.com> Date: Fri, 1 Dec 2023 16:11:44 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E5=8A=A0=E5=85=A5=E4=BA=86int4=20int8?= =?UTF-8?q?=E9=87=8F=E5=8C=96=EF=BC=8C=E5=8A=A0=E5=85=A5=E9=BB=98=E8=AE=A4?= =?UTF-8?q?fp16=E5=8A=A0=E8=BD=BD=EF=BC=88in4=E5=92=8Cint8=E9=9C=80?= =?UTF-8?q?=E8=A6=81=E5=AE=89=E8=A3=85=E9=A2=9D=E5=A4=96=E7=9A=84=E5=BA=93?= =?UTF-8?q?=EF=BC=89=20=E8=A7=A3=E5=86=B3=E8=BF=9E=E7=BB=AD=E5=AF=B9?= =?UTF-8?q?=E8=AF=9Dtoken=E6=97=A0=E9=99=90=E5=A2=9E=E9=95=BF=E7=88=86?= =?UTF-8?q?=E6=98=BE=E5=AD=98=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 8 +++++ config.py | 4 ++- request_llms/bridge_deepseekcoder.py | 48 ++++++++++++++++++++++++++-- 3 files changed, 56 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 54bf7c1..e8893d6 100644 --- a/README.md +++ b/README.md @@ -166,6 +166,14 @@ git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # # 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] + +# 【可选步骤V】支持本地模型INT8,INT4量化(模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择) +pip install bitsandbyte +# windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui +python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui +pip install -U git+https://github.com/huggingface/transformers.git +pip install -U git+https://github.com/huggingface/accelerate.git +pip install peft ```

diff --git a/config.py b/config.py index f170a2b..fcad051 100644 --- a/config.py +++ b/config.py @@ -91,7 +91,7 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-prev "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", - "chatglm3", "moss", "claude-2"] + "chatglm3", "moss", "claude-2", "deepseekcoder"] # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] @@ -114,6 +114,8 @@ CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本 +# 设置deepseekcoder运行时输入的最大token数(超过4096没有意义),对话过程爆显存可以适当调小 +MAX_INPUT_TOKEN_LENGTH = 2048 # 设置gradio的并行线程数(不需要修改) CONCURRENT_COUNT = 100 diff --git a/request_llms/bridge_deepseekcoder.py b/request_llms/bridge_deepseekcoder.py index 2242eec..09bd0b3 100644 --- a/request_llms/bridge_deepseekcoder.py +++ b/request_llms/bridge_deepseekcoder.py @@ -6,7 +6,9 @@ from toolbox import ProxyNetworkActivate from toolbox import get_conf from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from threading import Thread +import torch +MAX_INPUT_TOKEN_LENGTH = get_conf("MAX_INPUT_TOKEN_LENGTH") def download_huggingface_model(model_name, max_retry, local_dir): from huggingface_hub import snapshot_download for i in range(1, max_retry): @@ -36,9 +38,46 @@ class GetCoderLMHandle(LocalLLMHandle): # tokenizer = download_huggingface_model(model_name, max_retry=128, local_dir=local_dir) tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) self._streamer = TextIteratorStreamer(tokenizer) - model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) + device_map = { + "transformer.word_embeddings": 0, + "transformer.word_embeddings_layernorm": 0, + "lm_head": 0, + "transformer.h": 0, + "transformer.ln_f": 0, + "model.embed_tokens": 0, + "model.layers": 0, + "model.norm": 0, + } + + # 检查量化配置 + quantization_type = get_conf('LOCAL_MODEL_QUANT') + if get_conf('LOCAL_MODEL_DEVICE') != 'cpu': - model = model.cuda() + if quantization_type == "INT8": + from transformers import BitsAndBytesConfig + # 使用 INT8 量化 + model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, load_in_8bit=True, + device_map=device_map) + elif quantization_type == "INT4": + from transformers import BitsAndBytesConfig + # 使用 INT4 量化 + bnb_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_use_double_quant=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.bfloat16 + ) + model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, + quantization_config=bnb_config, device_map=device_map) + else: + # 使用默认的 FP16 + model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, + torch_dtype=torch.bfloat16, device_map=device_map) + else: + # CPU 模式 + model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, + torch_dtype=torch.bfloat16) + return model, tokenizer def llm_stream_generator(self, **kwargs): @@ -54,7 +93,10 @@ class GetCoderLMHandle(LocalLLMHandle): query, max_length, top_p, temperature, history = adaptor(kwargs) history.append({ 'role': 'user', 'content': query}) messages = history - inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt").to(self._model.device) + inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt") + if inputs.shape[1] > MAX_INPUT_TOKEN_LENGTH: + inputs = inputs[:, -MAX_INPUT_TOKEN_LENGTH:] + inputs = inputs.to(self._model.device) generation_kwargs = dict( inputs=inputs, max_new_tokens=max_length, From 552219fd5a7a30e924d042b78f29547ced8c333c Mon Sep 17 00:00:00 2001 From: jlw463195935 <463195395@qq.com> Date: Fri, 1 Dec 2023 16:17:30 +0800 Subject: [PATCH 2/2] =?UTF-8?q?=E5=8A=A0=E5=85=A5=E4=BA=86int4=20int8?= =?UTF-8?q?=E9=87=8F=E5=8C=96=EF=BC=8C=E5=8A=A0=E5=85=A5=E9=BB=98=E8=AE=A4?= =?UTF-8?q?fp16=E5=8A=A0=E8=BD=BD=EF=BC=88in4=E5=92=8Cint8=E9=9C=80?= =?UTF-8?q?=E8=A6=81=E5=AE=89=E8=A3=85=E9=A2=9D=E5=A4=96=E7=9A=84=E5=BA=93?= =?UTF-8?q?=EF=BC=8C=E7=9B=AE=E5=89=8D=E5=8F=AA=E6=B5=8B=E8=AF=95=E5=8A=A0?= =?UTF-8?q?=E5=85=A5deepseek-coder=E6=A8=A1=E5=9E=8B=EF=BC=8C=E5=90=8E?= =?UTF-8?q?=E7=BB=AD=E6=B5=8B=E8=AF=95=E4=BC=9A=E5=8A=A0=E5=85=A5=E6=9B=B4?= =?UTF-8?q?=E5=A4=9A=EF=BC=89=20=E8=A7=A3=E5=86=B3deepseek-coder=E8=BF=9E?= =?UTF-8?q?=E7=BB=AD=E5=AF=B9=E8=AF=9Dtoken=E6=97=A0=E9=99=90=E5=A2=9E?= =?UTF-8?q?=E9=95=BF=E7=88=86=E6=98=BE=E5=AD=98=E7=9A=84=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index e8893d6..fd0ec5c 100644 --- a/README.md +++ b/README.md @@ -167,7 +167,7 @@ git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # # 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案): AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -# 【可选步骤V】支持本地模型INT8,INT4量化(模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择) +# 【可选步骤V】支持本地模型INT8,INT4量化(这里所指的模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择) pip install bitsandbyte # windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui