加入了int4 int8量化,加入默认fp16加载(in4和int8需要安装额外的库)
解决连续对话token无限增长爆显存的问题
This commit is contained in:
parent
d8958da8cd
commit
4985986243
@ -166,6 +166,14 @@ git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss #
|
|||||||
|
|
||||||
# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
|
# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
|
||||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
|
||||||
|
# 【可选步骤V】支持本地模型INT8,INT4量化(模型本身不是量化版本,目前deepseek-coder支持,后面测试后会加入更多模型量化选择)
|
||||||
|
pip install bitsandbyte
|
||||||
|
# windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui
|
||||||
|
python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui
|
||||||
|
pip install -U git+https://github.com/huggingface/transformers.git
|
||||||
|
pip install -U git+https://github.com/huggingface/accelerate.git
|
||||||
|
pip install peft
|
||||||
```
|
```
|
||||||
|
|
||||||
</p>
|
</p>
|
||||||
|
@ -91,7 +91,7 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-prev
|
|||||||
"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||||
"api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
"api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
||||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
||||||
"chatglm3", "moss", "claude-2"]
|
"chatglm3", "moss", "claude-2", "deepseekcoder"]
|
||||||
# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
|
# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
|
||||||
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
|
||||||
@ -114,6 +114,8 @@ CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b
|
|||||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||||
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
LOCAL_MODEL_QUANT = "FP16" # 默认 "FP16" "INT4" 启用量化INT4版本 "INT8" 启用量化INT8版本
|
||||||
|
|
||||||
|
# 设置deepseekcoder运行时输入的最大token数(超过4096没有意义),对话过程爆显存可以适当调小
|
||||||
|
MAX_INPUT_TOKEN_LENGTH = 2048
|
||||||
|
|
||||||
# 设置gradio的并行线程数(不需要修改)
|
# 设置gradio的并行线程数(不需要修改)
|
||||||
CONCURRENT_COUNT = 100
|
CONCURRENT_COUNT = 100
|
||||||
|
@ -6,7 +6,9 @@ from toolbox import ProxyNetworkActivate
|
|||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
from threading import Thread
|
from threading import Thread
|
||||||
|
import torch
|
||||||
|
|
||||||
|
MAX_INPUT_TOKEN_LENGTH = get_conf("MAX_INPUT_TOKEN_LENGTH")
|
||||||
def download_huggingface_model(model_name, max_retry, local_dir):
|
def download_huggingface_model(model_name, max_retry, local_dir):
|
||||||
from huggingface_hub import snapshot_download
|
from huggingface_hub import snapshot_download
|
||||||
for i in range(1, max_retry):
|
for i in range(1, max_retry):
|
||||||
@ -36,9 +38,46 @@ class GetCoderLMHandle(LocalLLMHandle):
|
|||||||
# tokenizer = download_huggingface_model(model_name, max_retry=128, local_dir=local_dir)
|
# tokenizer = download_huggingface_model(model_name, max_retry=128, local_dir=local_dir)
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
||||||
self._streamer = TextIteratorStreamer(tokenizer)
|
self._streamer = TextIteratorStreamer(tokenizer)
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
device_map = {
|
||||||
|
"transformer.word_embeddings": 0,
|
||||||
|
"transformer.word_embeddings_layernorm": 0,
|
||||||
|
"lm_head": 0,
|
||||||
|
"transformer.h": 0,
|
||||||
|
"transformer.ln_f": 0,
|
||||||
|
"model.embed_tokens": 0,
|
||||||
|
"model.layers": 0,
|
||||||
|
"model.norm": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
# 检查量化配置
|
||||||
|
quantization_type = get_conf('LOCAL_MODEL_QUANT')
|
||||||
|
|
||||||
if get_conf('LOCAL_MODEL_DEVICE') != 'cpu':
|
if get_conf('LOCAL_MODEL_DEVICE') != 'cpu':
|
||||||
model = model.cuda()
|
if quantization_type == "INT8":
|
||||||
|
from transformers import BitsAndBytesConfig
|
||||||
|
# 使用 INT8 量化
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, load_in_8bit=True,
|
||||||
|
device_map=device_map)
|
||||||
|
elif quantization_type == "INT4":
|
||||||
|
from transformers import BitsAndBytesConfig
|
||||||
|
# 使用 INT4 量化
|
||||||
|
bnb_config = BitsAndBytesConfig(
|
||||||
|
load_in_4bit=True,
|
||||||
|
bnb_4bit_use_double_quant=True,
|
||||||
|
bnb_4bit_quant_type="nf4",
|
||||||
|
bnb_4bit_compute_dtype=torch.bfloat16
|
||||||
|
)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
||||||
|
quantization_config=bnb_config, device_map=device_map)
|
||||||
|
else:
|
||||||
|
# 使用默认的 FP16
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
||||||
|
torch_dtype=torch.bfloat16, device_map=device_map)
|
||||||
|
else:
|
||||||
|
# CPU 模式
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
||||||
|
torch_dtype=torch.bfloat16)
|
||||||
|
|
||||||
return model, tokenizer
|
return model, tokenizer
|
||||||
|
|
||||||
def llm_stream_generator(self, **kwargs):
|
def llm_stream_generator(self, **kwargs):
|
||||||
@ -54,7 +93,10 @@ class GetCoderLMHandle(LocalLLMHandle):
|
|||||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||||
history.append({ 'role': 'user', 'content': query})
|
history.append({ 'role': 'user', 'content': query})
|
||||||
messages = history
|
messages = history
|
||||||
inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt").to(self._model.device)
|
inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt")
|
||||||
|
if inputs.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
||||||
|
inputs = inputs[:, -MAX_INPUT_TOKEN_LENGTH:]
|
||||||
|
inputs = inputs.to(self._model.device)
|
||||||
generation_kwargs = dict(
|
generation_kwargs = dict(
|
||||||
inputs=inputs,
|
inputs=inputs,
|
||||||
max_new_tokens=max_length,
|
max_new_tokens=max_length,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user