* Zhipu sdk update 适配最新的智谱SDK,支持GLM4v (#1502) * 适配 google gemini 优化为从用户input中提取文件 * 适配最新的智谱SDK、支持glm-4v * requirements.txt fix * pending history check --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> * Update "生成多种Mermaid图表" plugin: Separate out the file reading function (#1520) * Update crazy_functional.py with new functionality deal with PDF * Update crazy_functional.py and Mermaid.py for plugin_kwargs * Update crazy_functional.py with new chart type: mind map * Update SELECT_PROMPT and i_say_show_user messages * Update ArgsReminder message in get_crazy_functions() function * Update with read md file and update PROMPTS * Return the PROMPTS as the test found that the initial version worked best * Update Mermaid chart generation function * version 3.71 * 解决issues #1510 * Remove unnecessary text from sys_prompt in 解析历史输入 function * Remove sys_prompt message in 解析历史输入 function * Update bridge_all.py: supports gpt-4-turbo-preview (#1517) * Update bridge_all.py: supports gpt-4-turbo-preview supports gpt-4-turbo-preview * Update bridge_all.py --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * Update config.py: supports gpt-4-turbo-preview (#1516) * Update config.py: supports gpt-4-turbo-preview supports gpt-4-turbo-preview * Update config.py --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * Refactor 解析历史输入 function to handle file input * Update Mermaid chart generation functionality * rename files and functions --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> Co-authored-by: hongyi-zhao <hongyi.zhao@gmail.com> Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * 接入mathpix ocr功能 (#1468) * Update Latex输出PDF结果.py 借助mathpix实现了PDF翻译中文并重新编译PDF * Update config.py add mathpix appid & appkey * Add 'PDF翻译中文并重新编译PDF' feature to plugins. --------- Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * fix zhipuai * check picture * remove glm-4 due to bug * 修改config * 检查MATHPIX_APPID * Remove unnecessary code and update function_plugins dictionary * capture non-standard token overflow * bug fix #1524 * change mermaid style * 支持mermaid 滚动放大缩小重置,鼠标滚动和拖拽 (#1530) * 支持mermaid 滚动放大缩小重置,鼠标滚动和拖拽 * 微调未果 先stage一下 * update --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> Co-authored-by: binary-husky <96192199+binary-husky@users.noreply.github.com> * ver 3.72 * change live2d * save the status of ``clear btn` in cookie * 前端选择保持 * js ui bug fix * reset btn bug fix * update live2d tips * fix missing get_token_num method * fix live2d toggle switch * fix persistent custom btn with cookie * fix zhipuai feedback with core functionality * Refactor button update and clean up functions * tailing space removal * Fix missing MATHPIX_APPID and MATHPIX_APPKEY configuration * Prompt fix、脑图提示词优化 (#1537) * 适配 google gemini 优化为从用户input中提取文件 * 脑图提示词优化 * Fix missing MATHPIX_APPID and MATHPIX_APPKEY configuration --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> * 优化“PDF翻译中文并重新编译PDF”插件 (#1602) * Add gemini_endpoint to API_URL_REDIRECT (#1560) * Add gemini_endpoint to API_URL_REDIRECT * Update gemini-pro and gemini-pro-vision model_info endpoints * Update to support new claude models (#1606) * Add anthropic library and update claude models * 更新bridge_claude.py文件,添加了对图片输入的支持。修复了一些bug。 * 添加Claude_3_Models变量以限制图片数量 * Refactor code to improve readability and maintainability * minor claude bug fix * more flexible one-api support * reformat config * fix one-api new access bug * dummy * compat non-standard api * version 3.73 --------- Co-authored-by: XIao <46100050+Kilig947@users.noreply.github.com> Co-authored-by: Menghuan1918 <menghuan2003@outlook.com> Co-authored-by: hongyi-zhao <hongyi.zhao@gmail.com> Co-authored-by: Hao Ma <893017927@qq.com> Co-authored-by: zeyuan huang <599012428@qq.com>
129 lines
6.2 KiB
Python
129 lines
6.2 KiB
Python
model_name = "deepseek-coder-6.7b-instruct"
|
|
cmd_to_install = "未知" # "`pip install -r request_llms/requirements_qwen.txt`"
|
|
|
|
import os
|
|
from toolbox import ProxyNetworkActivate
|
|
from toolbox import get_conf
|
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
|
from threading import Thread
|
|
import torch
|
|
|
|
def download_huggingface_model(model_name, max_retry, local_dir):
|
|
from huggingface_hub import snapshot_download
|
|
for i in range(1, max_retry):
|
|
try:
|
|
snapshot_download(repo_id=model_name, local_dir=local_dir, resume_download=True)
|
|
break
|
|
except Exception as e:
|
|
print(f'\n\n下载失败,重试第{i}次中...\n\n')
|
|
return local_dir
|
|
# ------------------------------------------------------------------------------------------------------------------------
|
|
# 🔌💻 Local Model
|
|
# ------------------------------------------------------------------------------------------------------------------------
|
|
class GetCoderLMHandle(LocalLLMHandle):
|
|
|
|
def load_model_info(self):
|
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
|
self.model_name = model_name
|
|
self.cmd_to_install = cmd_to_install
|
|
|
|
def load_model_and_tokenizer(self):
|
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
|
with ProxyNetworkActivate('Download_LLM'):
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
|
model_name = "deepseek-ai/deepseek-coder-6.7b-instruct"
|
|
# local_dir = f"~/.cache/{model_name}"
|
|
# if not os.path.exists(local_dir):
|
|
# tokenizer = download_huggingface_model(model_name, max_retry=128, local_dir=local_dir)
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
|
self._streamer = TextIteratorStreamer(tokenizer)
|
|
device_map = {
|
|
"transformer.word_embeddings": 0,
|
|
"transformer.word_embeddings_layernorm": 0,
|
|
"lm_head": 0,
|
|
"transformer.h": 0,
|
|
"transformer.ln_f": 0,
|
|
"model.embed_tokens": 0,
|
|
"model.layers": 0,
|
|
"model.norm": 0,
|
|
}
|
|
|
|
# 检查量化配置
|
|
quantization_type = get_conf('LOCAL_MODEL_QUANT')
|
|
|
|
if get_conf('LOCAL_MODEL_DEVICE') != 'cpu':
|
|
if quantization_type == "INT8":
|
|
from transformers import BitsAndBytesConfig
|
|
# 使用 INT8 量化
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, load_in_8bit=True,
|
|
device_map=device_map)
|
|
elif quantization_type == "INT4":
|
|
from transformers import BitsAndBytesConfig
|
|
# 使用 INT4 量化
|
|
bnb_config = BitsAndBytesConfig(
|
|
load_in_4bit=True,
|
|
bnb_4bit_use_double_quant=True,
|
|
bnb_4bit_quant_type="nf4",
|
|
bnb_4bit_compute_dtype=torch.bfloat16
|
|
)
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
|
quantization_config=bnb_config, device_map=device_map)
|
|
else:
|
|
# 使用默认的 FP16
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
|
torch_dtype=torch.bfloat16, device_map=device_map)
|
|
else:
|
|
# CPU 模式
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True,
|
|
torch_dtype=torch.bfloat16)
|
|
|
|
return model, tokenizer
|
|
|
|
def llm_stream_generator(self, **kwargs):
|
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
|
def adaptor(kwargs):
|
|
query = kwargs['query']
|
|
max_length = kwargs['max_length']
|
|
top_p = kwargs['top_p']
|
|
temperature = kwargs['temperature']
|
|
history = kwargs['history']
|
|
return query, max_length, top_p, temperature, history
|
|
|
|
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
|
history.append({ 'role': 'user', 'content': query})
|
|
messages = history
|
|
inputs = self._tokenizer.apply_chat_template(messages, return_tensors="pt")
|
|
if inputs.shape[1] > max_length:
|
|
inputs = inputs[:, -max_length:]
|
|
inputs = inputs.to(self._model.device)
|
|
generation_kwargs = dict(
|
|
inputs=inputs,
|
|
max_new_tokens=max_length,
|
|
do_sample=False,
|
|
top_p=top_p,
|
|
streamer = self._streamer,
|
|
top_k=50,
|
|
temperature=temperature,
|
|
num_return_sequences=1,
|
|
eos_token_id=32021,
|
|
)
|
|
thread = Thread(target=self._model.generate, kwargs=generation_kwargs, daemon=True)
|
|
thread.start()
|
|
generated_text = ""
|
|
for new_text in self._streamer:
|
|
generated_text += new_text
|
|
# print(generated_text)
|
|
yield generated_text
|
|
|
|
|
|
def try_to_import_special_deps(self, **kwargs): pass
|
|
# import something that will raise error if the user does not install requirement_*.txt
|
|
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
|
# import importlib
|
|
# importlib.import_module('modelscope')
|
|
|
|
|
|
# ------------------------------------------------------------------------------------------------------------------------
|
|
# 🔌💻 GPT-Academic Interface
|
|
# ------------------------------------------------------------------------------------------------------------------------
|
|
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetCoderLMHandle, model_name, history_format='chatglm3') |