model_name = "Qwen-7B" cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`" from transformers import AutoModel, AutoTokenizer import time import threading import importlib from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # ๐Ÿ”Œ๐Ÿ’ป Local Model # ------------------------------------------------------------------------------------------------------------------------ class GetQwenLMHandle(LocalLLMHandle): def load_model_info(self): # ๐Ÿƒโ€โ™‚๏ธ๐Ÿƒโ€โ™‚๏ธ๐Ÿƒโ€โ™‚๏ธ ๅญ่ฟ›็จ‹ๆ‰ง่กŒ self.model_name = model_name self.cmd_to_install = cmd_to_install def load_model_and_tokenizer(self): # ๐Ÿƒโ€โ™‚๏ธ๐Ÿƒโ€โ™‚๏ธ๐Ÿƒโ€โ™‚๏ธ ๅญ่ฟ›็จ‹ๆ‰ง่กŒ import os, glob import os import platform from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig with ProxyNetworkActivate('Download_LLM'): model_id = 'qwen/Qwen-7B-Chat' #ๅœจ่ฟ™้‡Œๆ›ดๆ”น่ทฏๅพ„๏ผŒๅฆ‚ๆžœไฝ ๅทฒ็ปไธ‹่ฝฝๅฅฝไบ†็š„่ฏ๏ผŒๅŒๆ—ถ๏ผŒๅˆซๅฟ˜่ฎฐtokenizer self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True) # use fp16 model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval() model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # ๅฏๆŒ‡ๅฎšไธๅŒ็š„็”Ÿๆˆ้•ฟๅบฆใ€top_p็ญ‰็›ธๅ…ณ่ถ…ๅ‚ self._model = model return self._model, self._tokenizer def llm_stream_generator(self, **kwargs): # ๐Ÿƒโ€โ™‚๏ธ๐Ÿƒโ€โ™‚๏ธ๐Ÿƒโ€โ™‚๏ธ ๅญ่ฟ›็จ‹ๆ‰ง่กŒ def adaptor(kwargs): query = kwargs['query'] max_length = kwargs['max_length'] top_p = kwargs['top_p'] temperature = kwargs['temperature'] history = kwargs['history'] return query, max_length, top_p, temperature, history query, max_length, top_p, temperature, history = adaptor(kwargs) for response in self._model.chat_stream(self._tokenizer, query, history=history): yield response def try_to_import_special_deps(self, **kwargs): # import something that will raise error if the user does not install requirement_*.txt # ๐Ÿƒโ€โ™‚๏ธ๐Ÿƒโ€โ™‚๏ธ๐Ÿƒโ€โ™‚๏ธ ไธป่ฟ›็จ‹ๆ‰ง่กŒ import importlib importlib.import_module('modelscope') # ------------------------------------------------------------------------------------------------------------------------ # ๐Ÿ”Œ๐Ÿ’ป GPT-Academic Interface # ------------------------------------------------------------------------------------------------------------------------ predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name)