diff --git a/request_llm/bridge_newbing.py b/request_llm/bridge_newbing.py index 3bc59eb..986dc56 100644 --- a/request_llm/bridge_newbing.py +++ b/request_llm/bridge_newbing.py @@ -16,7 +16,6 @@ import ssl import sys import uuid from enum import Enum -from pathlib import Path from typing import Generator from typing import Literal from typing import Optional @@ -354,7 +353,7 @@ class Chatbot: async def ask( self, prompt: str, - wss_link: str = "wss://sydney.bing.com/sydney/ChatHub", + wss_link: str, conversation_style: CONVERSATION_STYLE_TYPE = None, options: dict = None, ) -> dict: @@ -375,7 +374,7 @@ class Chatbot: async def ask_stream( self, prompt: str, - wss_link: str = "wss://sydney.bing.com/sydney/ChatHub", + wss_link: str, conversation_style: CONVERSATION_STYLE_TYPE = None, raw: bool = False, options: dict = None, @@ -403,7 +402,7 @@ class Chatbot: Reset the conversation """ await self.close() - self.chat_hub = _ChatHub(_Conversation(self.cookies)) + self.chat_hub = _ChatHub(_Conversation(self.cookies, self.proxy)) @@ -411,13 +410,14 @@ load_message = "" """ ======================================================================== -第二部分:子进程Worker +第二部分:子进程Worker(调用主体) ======================================================================== """ import time import importlib from toolbox import update_ui, get_conf, trimmed_format_exc from multiprocessing import Process, Pipe + class GetNewBingHandle(Process): def __init__(self): super().__init__(daemon=True) @@ -431,7 +431,8 @@ class GetNewBingHandle(Process): def check_dependency(self): try: - self.info = "依赖检测通过" + import rich + self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口,否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。" self.success = True except: self.info = "缺少的依赖,如果要使用Newbing,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_newbing.txt`安装Newbing的依赖。" @@ -440,34 +441,77 @@ class GetNewBingHandle(Process): def ready(self): return self.newbing_model is not None - async def async_run(self, question, history): + async def async_run(self): # 读取配置 NEWBING_STYLE, = get_conf('NEWBING_STYLE') from request_llm.bridge_all import model_info endpoint = model_info['newbing']['endpoint'] - - # 开始问问题 - self.local_history.append(question) - async for final, response in self.newbing_model.ask_stream( - prompt=question, - conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"] - wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub" - ): - if not final: - self.child.send(str(response)) - print(response) + while True: + # 等待 + kwargs = self.child.recv() + question=kwargs['query'] + history=kwargs['history'] + system_prompt=kwargs['system_prompt'] + # 是否重置 + if len(self.local_history) > 0 and len(history)==0: + await self.newbing_model.reset() + self.local_history = [] + + # 开始问问题 + prompt = "" + if system_prompt not in self.local_history: + self.local_history.append(system_prompt) + prompt += system_prompt + '\n' + + # 追加历史 + for ab in history: + a, b = ab + if a not in self.local_history: + self.local_history.append(a) + prompt += a + '\n' + if b not in self.local_history: + self.local_history.append(b) + prompt += b + '\n' + + # 问题 + prompt += question + self.local_history.append(question) + + # 提交 + async for final, response in self.newbing_model.ask_stream( + prompt=question, + conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"] + wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub" + ): + if not final: + print(response) + self.child.send(str(response)) + else: + print('-------- receive final ---------') + self.child.send('[Finish]') + + def run(self): + """ + 这个函数运行在子进程 + """ # 第一次运行,加载参数 retry = 0 self.local_history = [] while True: try: if self.newbing_model is None: + # 代理设置 proxies, = get_conf('proxies') + if proxies is None: + self.proxies_https = None + else: + self.proxies_https = proxies['https'] + NEWBING_COOKIES, = get_conf('NEWBING_COOKIES') cookies = json.loads(NEWBING_COOKIES) - self.newbing_model = Chatbot(proxy=proxies['https'], cookies=cookies) + self.newbing_model = Chatbot(proxy=self.proxies_https, cookies=cookies) break else: break @@ -479,23 +523,24 @@ class GetNewBingHandle(Process): raise RuntimeError("不能加载Newbing组件。") # 进入任务等待状态 - while True: - kwargs = self.child.recv() - try: - asyncio.run(self.async_run(question=kwargs['query'], history=kwargs['history'])) - except: - tb_str = '```\n' + trimmed_format_exc() + '```' - self.child.send('[Local Message] Newbing失败.') + try: + asyncio.run(self.async_run()) + except Exception: + tb_str = '```\n' + trimmed_format_exc() + '```' + self.child.send(f'[Local Message] Newbing失败 {tb_str}.') self.child.send('[Finish]') def stream_chat(self, **kwargs): - self.parent.send(kwargs) + """ + 这个函数运行在主进程 + """ + self.parent.send(kwargs) # 发送请求到子进程 while True: - res = self.parent.recv() + res = self.parent.recv() # 等待newbing回复的片段 if res != '[Finish]': - yield res + yield res # newbing回复的片段 else: - break + break # 结束 return @@ -523,13 +568,12 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", # 没有 sys_prompt 接口,因此把prompt加入 history history_feedin = [] - history_feedin.append(["What can I do?", sys_prompt]) for i in range(len(history)//2): history_feedin.append([history[2*i], history[2*i+1]] ) watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 response = "" - for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): + for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): observe_window[0] = response if len(observe_window) >= 2: if (time.time()-observe_window[1]) > watch_dog_patience: @@ -543,7 +587,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp 单线程方法 函数的说明请见 request_llm/bridge_all.py """ - chatbot.append((inputs, "")) + chatbot.append((inputs, "[Local Message]: 等待Bing响应 ...")) global newbing_handle if newbing_handle is None or (not newbing_handle.success): @@ -562,13 +606,23 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] history_feedin = [] - history_feedin.append(["What can I do?", system_prompt] ) for i in range(len(history)//2): history_feedin.append([history[2*i], history[2*i+1]] ) - for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) + yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") + for response in newbing_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): + chatbot[-1] = (inputs, preprocess_newbing_out(response)) + yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") - history.extend([inputs, response]) - yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file + history.extend([inputs, preprocess_newbing_out(response)]) + yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。") + +def preprocess_newbing_out(s): + pattern = r'\^(\d+)\^' # 匹配^数字^ + sub = lambda m: '\['+m.group(1)+'\]' # 将匹配到的数字作为替换值 + result = re.sub(pattern, sub, s) # 替换操作 + + if '[1]' in result: + result += '\n\n```\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n' + + return result \ No newline at end of file