From 127385b8469dc2faa6debab949bd3506ad1c08e5 Mon Sep 17 00:00:00 2001 From: binary-husky Date: Sat, 28 Oct 2023 19:23:43 +0800 Subject: [PATCH] =?UTF-8?q?=E6=8E=A5=E5=85=A5=E6=96=B0=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 7 +- request_llms/bridge_all.py | 16 +++++ request_llms/bridge_chatglm.py | 6 +- request_llms/bridge_chatglmft.py | 6 +- request_llms/bridge_jittorllms_llama.py | 6 +- request_llms/bridge_jittorllms_pangualpha.py | 6 +- request_llms/bridge_jittorllms_rwkv.py | 6 +- request_llms/bridge_moss.py | 6 +- request_llms/bridge_newbingfree.py | 10 +-- request_llms/bridge_qianfan.py | 6 +- request_llms/bridge_spark.py | 6 +- request_llms/bridge_stackclaude.py | 12 ++-- request_llms/bridge_zhipu.py | 59 +++++++++++++++++ request_llms/com_zhipuapi.py | 67 ++++++++++++++++++++ request_llms/local_llm_class.py | 6 +- tests/test_llms.py | 3 +- tests/test_markdown.py | 44 +++++++++++++ toolbox.py | 21 ++++++ 18 files changed, 253 insertions(+), 40 deletions(-) create mode 100644 request_llms/bridge_zhipu.py create mode 100644 request_llms/com_zhipuapi.py create mode 100644 tests/test_markdown.py diff --git a/config.py b/config.py index a18bc4a..6d62a67 100644 --- a/config.py +++ b/config.py @@ -87,7 +87,7 @@ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k', "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude-2"] -# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" +# P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random" # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"] @@ -172,6 +172,11 @@ XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +# 接入智谱大模型 +ZHIPUAI_API_KEY = "" +ZHIPUAI_MODEL = "chatglm_turbo" + + # Claude API KEY ANTHROPIC_API_KEY = "" diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 70e2c43..37357ed 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -483,6 +483,22 @@ if "llama2" in AVAIL_LLM_MODELS: # llama2 }) except: print(trimmed_format_exc()) +if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai + try: + from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui + from .bridge_zhipu import predict as zhipu_ui + model_info.update({ + "zhipuai": { + "fn_with_ui": zhipu_ui, + "fn_without_ui": zhipu_noui, + "endpoint": None, + "max_token": 4096, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + } + }) + except: + print(trimmed_format_exc()) # <-- 用于定义和切换多个azure模型 --> AZURE_CFG_ARRAY, = get_conf("AZURE_CFG_ARRAY") diff --git a/request_llms/bridge_chatglm.py b/request_llms/bridge_chatglm.py index 194cd1a..3a7cc72 100644 --- a/request_llms/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -155,13 +155,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收chatglm的回复 - response = "[Local Message]: 等待ChatGLM响应中 ..." + response = "[Local Message] 等待ChatGLM响应中 ..." for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待ChatGLM响应中 ...": - response = "[Local Message]: ChatGLM响应异常 ..." + if response == "[Local Message] 等待ChatGLM响应中 ...": + response = "[Local Message] ChatGLM响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_chatglmft.py b/request_llms/bridge_chatglmft.py index 8755bc1..63f3604 100644 --- a/request_llms/bridge_chatglmft.py +++ b/request_llms/bridge_chatglmft.py @@ -195,13 +195,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收chatglmft的回复 - response = "[Local Message]: 等待ChatGLMFT响应中 ..." + response = "[Local Message] 等待ChatGLMFT响应中 ..." for response in glmft_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待ChatGLMFT响应中 ...": - response = "[Local Message]: ChatGLMFT响应异常 ..." + if response == "[Local Message] 等待ChatGLMFT响应中 ...": + response = "[Local Message] ChatGLMFT响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_jittorllms_llama.py b/request_llms/bridge_jittorllms_llama.py index 6099cd6..af2d9fb 100644 --- a/request_llms/bridge_jittorllms_llama.py +++ b/request_llms/bridge_jittorllms_llama.py @@ -163,13 +163,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." + response = "[Local Message] 等待jittorllms响应中 ..." for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." + if response == "[Local Message] 等待jittorllms响应中 ...": + response = "[Local Message] jittorllms响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_jittorllms_pangualpha.py b/request_llms/bridge_jittorllms_pangualpha.py index eebefcc..df0523b 100644 --- a/request_llms/bridge_jittorllms_pangualpha.py +++ b/request_llms/bridge_jittorllms_pangualpha.py @@ -163,13 +163,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." + response = "[Local Message] 等待jittorllms响应中 ..." for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." + if response == "[Local Message] 等待jittorllms响应中 ...": + response = "[Local Message] jittorllms响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_jittorllms_rwkv.py b/request_llms/bridge_jittorllms_rwkv.py index 32ba3b8..875673a 100644 --- a/request_llms/bridge_jittorllms_rwkv.py +++ b/request_llms/bridge_jittorllms_rwkv.py @@ -163,13 +163,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." + response = "[Local Message] 等待jittorllms响应中 ..." for response in rwkv_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." + if response == "[Local Message] 等待jittorllms响应中 ...": + response = "[Local Message] jittorllms响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_moss.py b/request_llms/bridge_moss.py index 5061fcf..d7399f5 100644 --- a/request_llms/bridge_moss.py +++ b/request_llms/bridge_moss.py @@ -219,7 +219,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp moss_handle = None return else: - response = "[Local Message]: 等待MOSS响应中 ..." + response = "[Local Message] 等待MOSS响应中 ..." chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) @@ -238,7 +238,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == "[Local Message]: 等待MOSS响应中 ...": - response = "[Local Message]: MOSS响应异常 ..." + if response == "[Local Message] 等待MOSS响应中 ...": + response = "[Local Message] MOSS响应异常 ..." history.extend([inputs, response.strip('<|MOSS|>: ')]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/request_llms/bridge_newbingfree.py b/request_llms/bridge_newbingfree.py index b5bfb30..5dddb61 100644 --- a/request_llms/bridge_newbingfree.py +++ b/request_llms/bridge_newbingfree.py @@ -199,7 +199,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 response = "" - if len(observe_window) >= 1: observe_window[0] = "[Local Message]: 等待NewBing响应中 ..." + if len(observe_window) >= 1: observe_window[0] = "[Local Message] 等待NewBing响应中 ..." for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): if len(observe_window) >= 1: observe_window[0] = preprocess_newbing_out_simple(response) if len(observe_window) >= 2: @@ -212,7 +212,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp 单线程方法 函数的说明请见 request_llms/bridge_all.py """ - chatbot.append((inputs, "[Local Message]: 等待NewBing响应中 ...")) + chatbot.append((inputs, "[Local Message] 等待NewBing响应中 ...")) global newbingfree_handle if (newbingfree_handle is None) or (not newbingfree_handle.success): @@ -231,13 +231,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp for i in range(len(history)//2): history_feedin.append([history[2*i], history[2*i+1]] ) - chatbot[-1] = (inputs, "[Local Message]: 等待NewBing响应中 ...") - response = "[Local Message]: 等待NewBing响应中 ..." + chatbot[-1] = (inputs, "[Local Message] 等待NewBing响应中 ...") + response = "[Local Message] 等待NewBing响应中 ..." yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, preprocess_newbing_out(response)) yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") - if response == "[Local Message]: 等待NewBing响应中 ...": response = "[Local Message]: NewBing响应异常,请刷新界面重试 ..." + if response == "[Local Message] 等待NewBing响应中 ...": response = "[Local Message] NewBing响应异常,请刷新界面重试 ..." history.extend([inputs, response]) logging.info(f'[raw_input] {inputs}') logging.info(f'[response] {response}') diff --git a/request_llms/bridge_qianfan.py b/request_llms/bridge_qianfan.py index bf78a34..99f0623 100644 --- a/request_llms/bridge_qianfan.py +++ b/request_llms/bridge_qianfan.py @@ -158,8 +158,8 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp return # 总结输出 - response = f"[Local Message]: {model_name}响应异常 ..." - if response == f"[Local Message]: 等待{model_name}响应中 ...": - response = f"[Local Message]: {model_name}响应异常 ..." + response = f"[Local Message] {model_name}响应异常 ..." + if response == f"[Local Message] 等待{model_name}响应中 ...": + response = f"[Local Message] {model_name}响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/request_llms/bridge_spark.py b/request_llms/bridge_spark.py index 8c7bf59..d6ff42f 100644 --- a/request_llms/bridge_spark.py +++ b/request_llms/bridge_spark.py @@ -42,7 +42,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp yield from update_ui(chatbot=chatbot, history=history) if validate_key() is False: - yield from update_ui_lastest_msg(lastmsg="[Local Message]: 请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET", chatbot=chatbot, history=history, delay=0) + yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置讯飞星火大模型的XFYUN_APPID, XFYUN_API_KEY, XFYUN_API_SECRET", chatbot=chatbot, history=history, delay=0) return if additional_fn is not None: @@ -57,7 +57,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == f"[Local Message]: 等待{model_name}响应中 ...": - response = f"[Local Message]: {model_name}响应异常 ..." + if response == f"[Local Message] 等待{model_name}响应中 ...": + response = f"[Local Message] {model_name}响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/request_llms/bridge_stackclaude.py b/request_llms/bridge_stackclaude.py index 48612b3..3b57615 100644 --- a/request_llms/bridge_stackclaude.py +++ b/request_llms/bridge_stackclaude.py @@ -222,7 +222,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 response = "" - observe_window[0] = "[Local Message]: 等待Claude响应中 ..." + observe_window[0] = "[Local Message] 等待Claude响应中 ..." for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): observe_window[0] = preprocess_newbing_out_simple(response) if len(observe_window) >= 2: @@ -236,7 +236,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp 单线程方法 函数的说明请见 request_llms/bridge_all.py """ - chatbot.append((inputs, "[Local Message]: 等待Claude响应中 ...")) + chatbot.append((inputs, "[Local Message] 等待Claude响应中 ...")) global claude_handle if (claude_handle is None) or (not claude_handle.success): @@ -255,14 +255,14 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp for i in range(len(history)//2): history_feedin.append([history[2*i], history[2*i+1]]) - chatbot[-1] = (inputs, "[Local Message]: 等待Claude响应中 ...") - response = "[Local Message]: 等待Claude响应中 ..." + chatbot[-1] = (inputs, "[Local Message] 等待Claude响应中 ...") + response = "[Local Message] 等待Claude响应中 ..." yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt): chatbot[-1] = (inputs, preprocess_newbing_out(response)) yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") - if response == "[Local Message]: 等待Claude响应中 ...": - response = "[Local Message]: Claude响应异常,请刷新界面重试 ..." + if response == "[Local Message] 等待Claude响应中 ...": + response = "[Local Message] Claude响应异常,请刷新界面重试 ..." history.extend([inputs, response]) logging.info(f'[raw_input] {inputs}') logging.info(f'[response] {response}') diff --git a/request_llms/bridge_zhipu.py b/request_llms/bridge_zhipu.py new file mode 100644 index 0000000..2fd19ad --- /dev/null +++ b/request_llms/bridge_zhipu.py @@ -0,0 +1,59 @@ + +import time +from toolbox import update_ui, get_conf, update_ui_lastest_msg + +model_name = '智谱AI大模型' + +def validate_key(): + ZHIPUAI_API_KEY, = get_conf("ZHIPUAI_API_KEY") + if ZHIPUAI_API_KEY == '': return False + return True + +def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): + """ + ⭐多线程方法 + 函数的说明请见 request_llms/bridge_all.py + """ + watch_dog_patience = 5 + response = "" + + if validate_key() is False: + raise RuntimeError('请配置ZHIPUAI_API_KEY') + + from .com_zhipuapi import ZhipuRequestInstance + sri = ZhipuRequestInstance() + for response in sri.generate(inputs, llm_kwargs, history, sys_prompt): + if len(observe_window) >= 1: + observe_window[0] = response + if len(observe_window) >= 2: + if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。") + return response + +def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): + """ + ⭐单线程方法 + 函数的说明请见 request_llms/bridge_all.py + """ + chatbot.append((inputs, "")) + yield from update_ui(chatbot=chatbot, history=history) + + if validate_key() is False: + yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置ZHIPUAI_API_KEY", chatbot=chatbot, history=history, delay=0) + return + + if additional_fn is not None: + from core_functional import handle_core_functionality + inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot) + + # 开始接收回复 + from .com_zhipuapi import ZhipuRequestInstance + sri = ZhipuRequestInstance() + for response in sri.generate(inputs, llm_kwargs, history, system_prompt): + chatbot[-1] = (inputs, response) + yield from update_ui(chatbot=chatbot, history=history) + + # 总结输出 + if response == f"[Local Message] 等待{model_name}响应中 ...": + response = f"[Local Message] {model_name}响应异常 ..." + history.extend([inputs, response]) + yield from update_ui(chatbot=chatbot, history=history) \ No newline at end of file diff --git a/request_llms/com_zhipuapi.py b/request_llms/com_zhipuapi.py new file mode 100644 index 0000000..445720d --- /dev/null +++ b/request_llms/com_zhipuapi.py @@ -0,0 +1,67 @@ +from toolbox import get_conf +import threading +import logging + +timeout_bot_msg = '[Local Message] Request timeout. Network error.' + +class ZhipuRequestInstance(): + def __init__(self): + + self.time_to_yield_event = threading.Event() + self.time_to_exit_event = threading.Event() + + self.result_buf = "" + + def generate(self, inputs, llm_kwargs, history, system_prompt): + # import _thread as thread + import zhipuai + ZHIPUAI_API_KEY, ZHIPUAI_MODEL = get_conf("ZHIPUAI_API_KEY", "ZHIPUAI_MODEL") + zhipuai.api_key = ZHIPUAI_API_KEY + self.result_buf = "" + response = zhipuai.model_api.sse_invoke( + model=ZHIPUAI_MODEL, + prompt=generate_message_payload(inputs, llm_kwargs, history, system_prompt), + top_p=llm_kwargs['top_p'], + temperature=llm_kwargs['temperature'], + ) + for event in response.events(): + if event.event == "add": + self.result_buf += event.data + yield self.result_buf + elif event.event == "error" or event.event == "interrupted": + raise RuntimeError("Unknown error:" + event.data) + elif event.event == "finish": + yield self.result_buf + break + else: + raise RuntimeError("Unknown error:" + str(event)) + + logging.info(f'[raw_input] {inputs}') + logging.info(f'[response] {self.result_buf}') + return self.result_buf + +def generate_message_payload(inputs, llm_kwargs, history, system_prompt): + conversation_cnt = len(history) // 2 + messages = [{"role": "user", "content": system_prompt}, {"role": "assistant", "content": "Certainly!"}] + if conversation_cnt: + for index in range(0, 2*conversation_cnt, 2): + what_i_have_asked = {} + what_i_have_asked["role"] = "user" + what_i_have_asked["content"] = history[index] + what_gpt_answer = {} + what_gpt_answer["role"] = "assistant" + what_gpt_answer["content"] = history[index+1] + if what_i_have_asked["content"] != "": + if what_gpt_answer["content"] == "": + continue + if what_gpt_answer["content"] == timeout_bot_msg: + continue + messages.append(what_i_have_asked) + messages.append(what_gpt_answer) + else: + messages[-1]['content'] = what_gpt_answer['content'] + what_i_ask_now = {} + what_i_ask_now["role"] = "user" + what_i_ask_now["content"] = inputs + messages.append(what_i_ask_now) + return messages diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index e742d51..a421ddf 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -166,14 +166,14 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name): history_feedin.append([history[2*i], history[2*i+1]] ) # 开始接收回复 - response = f"[Local Message]: 等待{model_name}响应中 ..." + response = f"[Local Message] 等待{model_name}响应中 ..." for response in _llm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): chatbot[-1] = (inputs, response) yield from update_ui(chatbot=chatbot, history=history) # 总结输出 - if response == f"[Local Message]: 等待{model_name}响应中 ...": - response = f"[Local Message]: {model_name}响应异常 ..." + if response == f"[Local Message] 等待{model_name}响应中 ...": + response = f"[Local Message] {model_name}响应异常 ..." history.extend([inputs, response]) yield from update_ui(chatbot=chatbot, history=history) diff --git a/tests/test_llms.py b/tests/test_llms.py index 6b7019d..f43f368 100644 --- a/tests/test_llms.py +++ b/tests/test_llms.py @@ -17,7 +17,8 @@ if __name__ == "__main__": # from request_llms.bridge_claude import predict_no_ui_long_connection # from request_llms.bridge_internlm import predict_no_ui_long_connection # from request_llms.bridge_qwen import predict_no_ui_long_connection - from request_llms.bridge_spark import predict_no_ui_long_connection + # from request_llms.bridge_spark import predict_no_ui_long_connection + from request_llms.bridge_zhipu import predict_no_ui_long_connection llm_kwargs = { 'max_length': 4096, diff --git a/tests/test_markdown.py b/tests/test_markdown.py new file mode 100644 index 0000000..c92b4c4 --- /dev/null +++ b/tests/test_markdown.py @@ -0,0 +1,44 @@ +md = """ +作为您的写作和编程助手,我可以为您提供以下服务: + +1. 写作: + - 帮助您撰写文章、报告、散文、故事等。 + - 提供写作建议和技巧。 + - 协助您进行文案策划和内容创作。 + +2. 编程: + - 帮助您解决编程问题,提供编程思路和建议。 + - 协助您编写代码,包括但不限于 Python、Java、C++ 等。 + - 为您解释复杂的技术概念,让您更容易理解。 + +3. 项目支持: + - 协助您规划项目进度和任务分配。 + - 提供项目管理和协作建议。 + - 在项目实施过程中提供支持,确保项目顺利进行。 + +4. 学习辅导: + - 帮助您巩固编程基础,提高编程能力。 + - 提供计算机科学、数据科学、人工智能等相关领域的学习资源和建议。 + - 解答您在学习过程中遇到的问题,让您更好地掌握知识。 + +5. 行业动态和趋势分析: + - 为您提供业界最新的新闻和技术趋势。 + - 分析行业动态,帮助您了解市场发展和竞争态势。 + - 为您制定技术战略提供参考和建议。 + +请随时告诉我您的需求,我会尽力提供帮助。如果您有任何问题或需要解答的议题,请随时提问。 +""" + +def validate_path(): + import os, sys + dir_name = os.path.dirname(__file__) + root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') + os.chdir(root_dir_assume) + sys.path.append(root_dir_assume) +validate_path() # validate path so you can run from base directory +from toolbox import markdown_convertion + +html = markdown_convertion(md) +print(html) +with open('test.html', 'w', encoding='utf-8') as f: + f.write(html) \ No newline at end of file diff --git a/toolbox.py b/toolbox.py index 4a783a3..137313e 100644 --- a/toolbox.py +++ b/toolbox.py @@ -7,6 +7,7 @@ import os import gradio import shutil import glob +import math from latex2mathml.converter import convert as tex2mathml from functools import wraps, lru_cache pj = os.path.join @@ -372,6 +373,26 @@ def markdown_convertion(txt): contain_any_eq = True return contain_any_eq + def fix_markdown_indent(txt): + # fix markdown indent + if (' - ' not in txt) or ('. ' not in txt): + return txt # do not need to fix, fast escape + # walk through the lines and fix non-standard indentation + lines = txt.split("\n") + pattern = re.compile(r'^\s+-') + activated = False + for i, line in enumerate(lines): + if line.startswith('- ') or line.startswith('1. '): + activated = True + if activated and pattern.match(line): + stripped_string = line.lstrip() + num_spaces = len(line) - len(stripped_string) + if (num_spaces % 4) == 3: + num_spaces_should_be = math.ceil(num_spaces/4) * 4 + lines[i] = ' ' * num_spaces_should_be + stripped_string + return '\n'.join(lines) + + txt = fix_markdown_indent(txt) if is_equation(txt): # 有$标识的公式符号,且没有代码段```的标识 # convert everything to html format split = markdown.markdown(text='---')