From 03ba072c16ea8ab126f902b3327089861520bd66 Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 17 Apr 2023 20:34:55 +0800 Subject: [PATCH] =?UTF-8?q?=E6=94=B9=E5=96=84word=E6=80=BB=E7=BB=93?= =?UTF-8?q?=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/crazy_functions_test.py | 43 ++++++++++++ crazy_functions/总结word文档.py | 92 +++++++++++-------------- request_llm/bridge_all.py | 40 ++++++++++- 3 files changed, 122 insertions(+), 53 deletions(-) create mode 100644 crazy_functions/crazy_functions_test.py diff --git a/crazy_functions/crazy_functions_test.py b/crazy_functions/crazy_functions_test.py new file mode 100644 index 0000000..4df55a8 --- /dev/null +++ b/crazy_functions/crazy_functions_test.py @@ -0,0 +1,43 @@ +""" +这是什么? + 这个文件用于函数插件的单元测试 + 运行方法 python crazy_functions/crazy_functions_test.py +""" + +def validate_path(): + import os, sys + dir_name = os.path.dirname(__file__) + root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') + os.chdir(root_dir_assume) + sys.path.append(root_dir_assume) + +validate_path() # validate path so you can run from base directory + +from toolbox import get_conf, ChatBotWithCookies +proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \ + get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY') + +llm_kwargs = { + 'api_key': API_KEY, + 'llm_model': LLM_MODEL, + 'top_p':1.0, + 'max_length': None, + 'temperature':1.0, +} +plugin_kwargs = { } +chatbot = ChatBotWithCookies(llm_kwargs) +history = [] +system_prompt = "Serve me as a writing and programming assistant." +web_port = 1024 + + +def test_总结word文档(): + from crazy_functions.总结word文档 import 总结word文档 + txt = "F:/AMD" + for _ in 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + pass + +test_总结word文档() + +input("程序完成,回车退出。") +print("退出。") \ No newline at end of file diff --git a/crazy_functions/总结word文档.py b/crazy_functions/总结word文档.py index 742c7ab..f1fe201 100644 --- a/crazy_functions/总结word文档.py +++ b/crazy_functions/总结word文档.py @@ -8,8 +8,6 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot import time, os # pip install python-docx 用于docx格式,跨平台 # pip install pywin32 用于doc格式,仅支持Win平台 - - print('begin analysis on:', file_manifest) for index, fp in enumerate(file_manifest): if fp.split(".")[-1] == "docx": from docx import Document @@ -29,18 +27,20 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot word.Quit() print(file_content) - - prefix = "接下来请你逐文件分析下面的论文文件," if index == 0 else "" # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名 - i_say = prefix + f'请对下面的文章片段用中英文做概述,文件名是{os.path.relpath(fp, project_folder)},' \ - f'文章内容是 ```{file_content}```' - i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 假设你是论文审稿专家,请对下面的文章片段做概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** + from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf + from request_llm.bridge_all import model_info + max_token = model_info[llm_kwargs['llm_model']]['max_token'] + TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4 + paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf( + txt=file_content, + get_token_fn=model_info[llm_kwargs['llm_model']]['token_cnt'], + limit=TOKEN_LIMIT_PER_FRAGMENT + ) + this_paper_history = [] + for i, paper_frag in enumerate(paper_fragments): + i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```' + i_say_show_user = f'请对下面的文章片段做概述: {os.path.abspath(fp)}的第{i+1}/{len(paper_fragments)}个片段。' gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, inputs_show_user=i_say_show_user, @@ -48,46 +48,34 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot chatbot=chatbot, history=[], sys_prompt="总结文章。" - ) # 带超时倒计时 + ) + chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user) - history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) + history.extend([i_say_show_user,gpt_say]) + this_paper_history.extend([i_say_show_user,gpt_say]) - """ - # 可按需启用 - i_say = f'根据你上述的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一篇英文的。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + # 已经对该文章的所有片段总结完毕,如果文章被切分了, + if len(paper_fragments) > 1: + i_say = f"根据以上的对话,总结文章{os.path.abspath(fp)}的主要内容。" + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, + inputs_show_user=i_say, + llm_kwargs=llm_kwargs, + chatbot=chatbot, + history=this_paper_history, + sys_prompt="总结文章。" + ) + history.extend([i_say,gpt_say]) + this_paper_history.extend([i_say,gpt_say]) - i_say = f'我想让你做一个论文写作导师。您的任务是使用人工智能工具(例如自然语言处理)提供有关如何改进其上述文章的反馈。' \ - f'您还应该利用您在有效写作技巧方面的修辞知识和经验来建议作者可以更好地以书面形式表达他们的想法和想法的方法。' \ - f'根据你之前的分析,提出建议' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - """ - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=history, - sys_prompt="总结文章。" - ) # 带超时倒计时 - chatbot[-1] = (i_say, gpt_say) - history.append(i_say) - history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 res = write_results_to_file(history) chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + res = write_results_to_file(history) + chatbot.append(("所有文件都总结完成了吗?", res)) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 @CatchException @@ -123,11 +111,11 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr return # 搜索需要处理的文件清单 - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.docx', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.doc', recursive=True)] - # [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] + if txt.endswith('.docx') or txt.endswith('.doc'): + file_manifest = [txt] + else: + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.docx', recursive=True)] + \ + [f for f in glob.glob(f'{project_folder}/**/*.doc', recursive=True)] # 如果没找到任何文件 if len(file_manifest) == 0: diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index 6fdd846..cd3f9d8 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -8,6 +8,7 @@ 具备多线程调用能力的函数 2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程 """ +import tiktoken from concurrent.futures import ThreadPoolExecutor @@ -31,6 +32,43 @@ methods = { "tgui-ui": tgui_ui, } +model_info = { + # openai + "gpt-3.5-turbo": { + "max_token": 4096, + "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"), + "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())), + }, + + "gpt-4": { + "max_token": 4096, + "tokenizer": tiktoken.encoding_for_model("gpt-4"), + "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())), + }, + # api_2d + "gpt-3.5-turbo-api2d": { + "max_token": 4096, + "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"), + "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())), + }, + + "gpt-4-api2d": { + "max_token": 4096, + "tokenizer": tiktoken.encoding_for_model("gpt-4"), + "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())), + }, + + # chatglm + "chatglm": { + "max_token": 1024, + "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"), + "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())), + }, + + +} + + def LLM_CATCH_EXCEPTION(f): """ 装饰器函数,将错误显示出来 @@ -47,7 +85,7 @@ def LLM_CATCH_EXCEPTION(f): return tb_str return decorated -colors = ['#FF00FF', '#00FFFF', '#FF0000''#990099', '#009999', '#990044'] +colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044'] def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False): """