add free newbing without cookie using edge-gpt

This commit is contained in:
qingxu fu 2023-05-24 10:42:11 +08:00
parent 728eba04ec
commit 42eef1bea7
6 changed files with 1438 additions and 63 deletions

View File

@ -301,16 +301,19 @@ gpt_academic开发者QQ群-2610599535
``` ```
代码中参考了很多其他优秀项目中的设计,主要包括: 代码中参考了很多其他优秀项目中的设计,主要包括:
# 项目1清华ChatGLM-6B # 项目1清华ChatGLM-6B:
https://github.com/THUDM/ChatGLM-6B https://github.com/THUDM/ChatGLM-6B
# 项目2清华JittorLLMs # 项目2清华JittorLLMs:
https://github.com/Jittor/JittorLLMs https://github.com/Jittor/JittorLLMs
# 项目3借鉴了ChuanhuChatGPT中诸多技巧 # 项目3Edge-GPT:
https://github.com/acheong08/EdgeGPT
# 项目4ChuanhuChatGPT:
https://github.com/GaiZhenbiao/ChuanhuChatGPT https://github.com/GaiZhenbiao/ChuanhuChatGPT
# 项目4ChatPaper # 项目5ChatPaper:
https://github.com/kaixindelele/ChatPaper https://github.com/kaixindelele/ChatPaper
# 更多: # 更多:

View File

@ -47,7 +47,7 @@ MAX_RETRY = 2
# 模型选择是 # 模型选择是
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
# P.S. 其他可用的模型还包括 ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] # P.S. 其他可用的模型还包括 ["newbing-free", "jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
@ -73,6 +73,7 @@ CUSTOM_PATH = "/"
# 如果需要使用newbing把newbing的长长的cookie放到这里 # 如果需要使用newbing把newbing的长长的cookie放到这里
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"] NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
# 从现在起,如果您调用"newbing-free"模型则无需填写NEWBING_COOKIES
NEWBING_COOKIES = """ NEWBING_COOKIES = """
your bing cookies here your bing cookies here
""" """

View File

@ -201,7 +201,20 @@ if "stack-claude" in AVAIL_LLM_MODELS:
"token_cnt": get_token_num_gpt35, "token_cnt": get_token_num_gpt35,
} }
}) })
if "newbing-free" in AVAIL_LLM_MODELS:
from .bridge_newbingfree import predict_no_ui_long_connection as newbingfree_noui
from .bridge_newbingfree import predict as newbingfree_ui
# claude
model_info.update({
"newbing-free": {
"fn_with_ui": newbingfree_ui,
"fn_without_ui": newbingfree_noui,
"endpoint": newbing_endpoint,
"max_token": 4096,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
}
})
def LLM_CATCH_EXCEPTION(f): def LLM_CATCH_EXCEPTION(f):
""" """

View File

@ -0,0 +1,243 @@
"""
========================================================================
第一部分来自EdgeGPT.py
https://github.com/acheong08/EdgeGPT
========================================================================
"""
from .edge_gpt_free import Chatbot as NewbingChatbot
load_message = "等待NewBing响应。"
"""
========================================================================
第二部分子进程Worker调用主体
========================================================================
"""
import time
import json
import re
import logging
import asyncio
import importlib
import threading
from toolbox import update_ui, get_conf, trimmed_format_exc
from multiprocessing import Process, Pipe
def preprocess_newbing_out(s):
pattern = r'\^(\d+)\^' # 匹配^数字^
sub = lambda m: '('+m.group(1)+')' # 将匹配到的数字作为替换值
result = re.sub(pattern, sub, s) # 替换操作
if '[1]' in result:
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
return result
def preprocess_newbing_out_simple(result):
if '[1]' in result:
result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
return result
class NewBingHandle(Process):
def __init__(self):
super().__init__(daemon=True)
self.parent, self.child = Pipe()
self.newbing_model = None
self.info = ""
self.success = True
self.local_history = []
self.check_dependency()
self.start()
self.threadLock = threading.Lock()
def check_dependency(self):
try:
self.success = False
import certifi, httpx, rich
self.info = "依赖检测通过等待NewBing响应。注意目前不能多人同时调用NewBing接口有线程锁否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时会自动使用已配置的代理。"
self.success = True
except:
self.info = "缺少的依赖如果要使用Newbing除了基础的pip依赖以外您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。"
self.success = False
def ready(self):
return self.newbing_model is not None
async def async_run(self):
# 读取配置
NEWBING_STYLE, = get_conf('NEWBING_STYLE')
from request_llm.bridge_all import model_info
endpoint = model_info['newbing']['endpoint']
while True:
# 等待
kwargs = self.child.recv()
question=kwargs['query']
history=kwargs['history']
system_prompt=kwargs['system_prompt']
# 是否重置
if len(self.local_history) > 0 and len(history)==0:
await self.newbing_model.reset()
self.local_history = []
# 开始问问题
prompt = ""
if system_prompt not in self.local_history:
self.local_history.append(system_prompt)
prompt += system_prompt + '\n'
# 追加历史
for ab in history:
a, b = ab
if a not in self.local_history:
self.local_history.append(a)
prompt += a + '\n'
# if b not in self.local_history:
# self.local_history.append(b)
# prompt += b + '\n'
# 问题
prompt += question
self.local_history.append(question)
print('question:', prompt)
# 提交
async for final, response in self.newbing_model.ask_stream(
prompt=question,
conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
):
if not final:
print(response)
self.child.send(str(response))
else:
print('-------- receive final ---------')
self.child.send('[Finish]')
# self.local_history.append(response)
def run(self):
"""
这个函数运行在子进程
"""
# 第一次运行,加载参数
self.success = False
self.local_history = []
if (self.newbing_model is None) or (not self.success):
# 代理设置
proxies, = get_conf('proxies')
if proxies is None:
self.proxies_https = None
else:
self.proxies_https = proxies['https']
try:
self.newbing_model = NewbingChatbot(proxy=self.proxies_https)
except:
self.success = False
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] 不能加载Newbing组件。{tb_str}')
self.child.send('[Fail]')
self.child.send('[Finish]')
raise RuntimeError(f"不能加载Newbing组件。")
self.success = True
try:
# 进入任务等待状态
asyncio.run(self.async_run())
except Exception:
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
self.child.send('[Fail]')
self.child.send('[Finish]')
def stream_chat(self, **kwargs):
"""
这个函数运行在主进程
"""
self.threadLock.acquire()
self.parent.send(kwargs) # 发送请求到子进程
while True:
res = self.parent.recv() # 等待newbing回复的片段
if res == '[Finish]':
break # 结束
elif res == '[Fail]':
self.success = False
break
else:
yield res # newbing回复的片段
self.threadLock.release()
"""
========================================================================
第三部分主进程统一调用函数接口
========================================================================
"""
global newbingfree_handle
newbingfree_handle = None
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
"""
多线程方法
函数的说明请见 request_llm/bridge_all.py
"""
global newbingfree_handle
if (newbingfree_handle is None) or (not newbingfree_handle.success):
newbingfree_handle = NewBingHandle()
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
if not newbingfree_handle.success:
error = newbingfree_handle.info
newbingfree_handle = None
raise RuntimeError(error)
# 没有 sys_prompt 接口因此把prompt加入 history
history_feedin = []
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
if len(observe_window) >= 1: observe_window[0] = "[Local Message]: 等待NewBing响应中 ..."
for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
if len(observe_window) >= 1: observe_window[0] = preprocess_newbing_out_simple(response)
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
return preprocess_newbing_out_simple(response)
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llm/bridge_all.py
"""
chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ..."))
global newbingfree_handle
if (newbingfree_handle is None) or (not newbingfree_handle.success):
newbingfree_handle = NewBingHandle()
chatbot[-1] = (inputs, load_message + "\n\n" + newbingfree_handle.info)
yield from update_ui(chatbot=chatbot, history=[])
if not newbingfree_handle.success:
newbingfree_handle = None
return
if additional_fn is not None:
import core_functional
importlib.reload(core_functional) # 热更新prompt
core_functional = core_functional.get_core_functions()
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
history_feedin = []
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
chatbot[-1] = (inputs, "[Local Message]: 等待NewBing响应中 ...")
response = "[Local Message]: 等待NewBing响应中 ..."
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢尚未完成全部响应请耐心完成后再提交新问题。")
for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
chatbot[-1] = (inputs, preprocess_newbing_out(response))
yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢尚未完成全部响应请耐心完成后再提交新问题。")
if response == "[Local Message]: 等待NewBing响应中 ...": response = "[Local Message]: NewBing响应异常请刷新界面重试 ..."
history.extend([inputs, response])
logging.info(f'[raw_input] {inputs}')
logging.info(f'[response] {response}')
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")

1114
request_llm/edge_gpt_free.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -9,69 +9,70 @@ def validate_path():
sys.path.append(root_dir_assume) sys.path.append(root_dir_assume)
validate_path() # validate path so you can run from base directory validate_path() # validate path so you can run from base directory
if __name__ == "__main__":
from request_llm.bridge_newbingfree import predict_no_ui_long_connection
# from request_llm.bridge_moss import predict_no_ui_long_connection
# from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
# from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection
from request_llm.bridge_moss import predict_no_ui_long_connection llm_kwargs = {
# from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection 'max_length': 512,
# from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection 'top_p': 1,
'temperature': 1,
}
llm_kwargs = { result = predict_no_ui_long_connection(inputs="你好",
'max_length': 512, llm_kwargs=llm_kwargs,
'top_p': 1, history=[],
'temperature': 1, sys_prompt="")
} print('final result:', result)
result = predict_no_ui_long_connection(inputs="你好",
llm_kwargs=llm_kwargs,
history=[],
sys_prompt="")
print('final result:', result)
result = predict_no_ui_long_connection(inputs="what is a hero?", result = predict_no_ui_long_connection(inputs="what is a hero?",
llm_kwargs=llm_kwargs, llm_kwargs=llm_kwargs,
history=["hello world"], history=["hello world"],
sys_prompt="") sys_prompt="")
print('final result:', result) print('final result:', result)
result = predict_no_ui_long_connection(inputs="如何理解传奇?", result = predict_no_ui_long_connection(inputs="如何理解传奇?",
llm_kwargs=llm_kwargs, llm_kwargs=llm_kwargs,
history=[], history=[],
sys_prompt="") sys_prompt="")
print('final result:', result) print('final result:', result)
# # print(result) # # print(result)
# from multiprocessing import Process, Pipe # from multiprocessing import Process, Pipe
# class GetGLMHandle(Process): # class GetGLMHandle(Process):
# def __init__(self): # def __init__(self):
# super().__init__(daemon=True) # super().__init__(daemon=True)
# pass # pass
# def run(self): # def run(self):
# # 子进程执行 # # 子进程执行
# # 第一次运行,加载参数 # # 第一次运行,加载参数
# def validate_path(): # def validate_path():
# import os, sys # import os, sys
# dir_name = os.path.dirname(__file__) # dir_name = os.path.dirname(__file__)
# root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') # root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
# os.chdir(root_dir_assume + '/request_llm/jittorllms') # os.chdir(root_dir_assume + '/request_llm/jittorllms')
# sys.path.append(root_dir_assume + '/request_llm/jittorllms') # sys.path.append(root_dir_assume + '/request_llm/jittorllms')
# validate_path() # validate path so you can run from base directory # validate_path() # validate path so you can run from base directory
# jittorllms_model = None # jittorllms_model = None
# import types # import types
# try: # try:
# if jittorllms_model is None: # if jittorllms_model is None:
# from models import get_model # from models import get_model
# # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"] # # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
# args_dict = {'model': 'chatrwkv'} # args_dict = {'model': 'chatrwkv'}
# print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))') # print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
# jittorllms_model = get_model(types.SimpleNamespace(**args_dict)) # jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
# print('done get model') # print('done get model')
# except: # except:
# # self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。') # # self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
# raise RuntimeError("不能正常加载jittorllms的参数") # raise RuntimeError("不能正常加载jittorllms的参数")
# x = GetGLMHandle() # x = GetGLMHandle()
# x.start() # x.start()
# input() # input()