改善word总结功能
This commit is contained in:
		
							parent
							
								
									2472185de9
								
							
						
					
					
						commit
						03ba072c16
					
				
							
								
								
									
										43
									
								
								crazy_functions/crazy_functions_test.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										43
									
								
								crazy_functions/crazy_functions_test.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,43 @@
 | 
				
			|||||||
 | 
					"""
 | 
				
			||||||
 | 
					这是什么?
 | 
				
			||||||
 | 
					    这个文件用于函数插件的单元测试
 | 
				
			||||||
 | 
					    运行方法 python crazy_functions/crazy_functions_test.py
 | 
				
			||||||
 | 
					"""
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def validate_path():
 | 
				
			||||||
 | 
					    import os, sys
 | 
				
			||||||
 | 
					    dir_name = os.path.dirname(__file__)
 | 
				
			||||||
 | 
					    root_dir_assume = os.path.abspath(os.path.dirname(__file__) +  '/..')
 | 
				
			||||||
 | 
					    os.chdir(root_dir_assume)
 | 
				
			||||||
 | 
					    sys.path.append(root_dir_assume)
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					validate_path() # validate path so you can run from base directory
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					from toolbox import get_conf, ChatBotWithCookies
 | 
				
			||||||
 | 
					proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY = \
 | 
				
			||||||
 | 
					    get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					llm_kwargs = {
 | 
				
			||||||
 | 
					    'api_key': API_KEY,
 | 
				
			||||||
 | 
					    'llm_model': LLM_MODEL,
 | 
				
			||||||
 | 
					    'top_p':1.0, 
 | 
				
			||||||
 | 
					    'max_length': None,
 | 
				
			||||||
 | 
					    'temperature':1.0,
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					plugin_kwargs = { }
 | 
				
			||||||
 | 
					chatbot = ChatBotWithCookies(llm_kwargs)
 | 
				
			||||||
 | 
					history = []
 | 
				
			||||||
 | 
					system_prompt = "Serve me as a writing and programming assistant."
 | 
				
			||||||
 | 
					web_port = 1024
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def test_总结word文档():
 | 
				
			||||||
 | 
					    from crazy_functions.总结word文档 import 总结word文档
 | 
				
			||||||
 | 
					    txt = "F:/AMD"
 | 
				
			||||||
 | 
					    for _ in 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
 | 
				
			||||||
 | 
					        pass
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					test_总结word文档()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					input("程序完成,回车退出。")
 | 
				
			||||||
 | 
					print("退出。")
 | 
				
			||||||
@ -8,8 +8,6 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
 | 
				
			|||||||
    import time, os
 | 
					    import time, os
 | 
				
			||||||
    # pip install python-docx 用于docx格式,跨平台
 | 
					    # pip install python-docx 用于docx格式,跨平台
 | 
				
			||||||
    # pip install pywin32 用于doc格式,仅支持Win平台
 | 
					    # pip install pywin32 用于doc格式,仅支持Win平台
 | 
				
			||||||
 | 
					 | 
				
			||||||
    print('begin analysis on:', file_manifest)
 | 
					 | 
				
			||||||
    for index, fp in enumerate(file_manifest):
 | 
					    for index, fp in enumerate(file_manifest):
 | 
				
			||||||
        if fp.split(".")[-1] == "docx":
 | 
					        if fp.split(".")[-1] == "docx":
 | 
				
			||||||
            from docx import Document
 | 
					            from docx import Document
 | 
				
			||||||
@ -29,18 +27,20 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
 | 
				
			|||||||
            word.Quit()
 | 
					            word.Quit()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        print(file_content)
 | 
					        print(file_content)
 | 
				
			||||||
 | 
					 | 
				
			||||||
        prefix = "接下来请你逐文件分析下面的论文文件," if index == 0 else ""
 | 
					 | 
				
			||||||
        # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名
 | 
					        # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名
 | 
				
			||||||
        i_say = prefix + f'请对下面的文章片段用中英文做概述,文件名是{os.path.relpath(fp, project_folder)},' \
 | 
					        from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
 | 
				
			||||||
                         f'文章内容是 ```{file_content}```'
 | 
					        from request_llm.bridge_all import model_info
 | 
				
			||||||
        i_say_show_user = prefix + f'[{index+1}/{len(file_manifest)}] 假设你是论文审稿专家,请对下面的文章片段做概述: {os.path.abspath(fp)}'
 | 
					        max_token = model_info[llm_kwargs['llm_model']]['max_token']
 | 
				
			||||||
        chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
 | 
					        TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4
 | 
				
			||||||
        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 | 
					        paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
 | 
				
			||||||
 | 
					            txt=file_content,  
 | 
				
			||||||
        if not fast_debug:
 | 
					            get_token_fn=model_info[llm_kwargs['llm_model']]['token_cnt'], 
 | 
				
			||||||
            msg = '正常'
 | 
					            limit=TOKEN_LIMIT_PER_FRAGMENT
 | 
				
			||||||
            # ** gpt request **
 | 
					        )
 | 
				
			||||||
 | 
					        this_paper_history = []
 | 
				
			||||||
 | 
					        for i, paper_frag in enumerate(paper_fragments):
 | 
				
			||||||
 | 
					            i_say = f'请对下面的文章片段用中文做概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{paper_frag}```'
 | 
				
			||||||
 | 
					            i_say_show_user = f'请对下面的文章片段做概述: {os.path.abspath(fp)}的第{i+1}/{len(paper_fragments)}个片段。'
 | 
				
			||||||
            gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
 | 
					            gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
 | 
				
			||||||
                inputs=i_say, 
 | 
					                inputs=i_say, 
 | 
				
			||||||
                inputs_show_user=i_say_show_user, 
 | 
					                inputs_show_user=i_say_show_user, 
 | 
				
			||||||
@ -48,46 +48,34 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
 | 
				
			|||||||
                chatbot=chatbot, 
 | 
					                chatbot=chatbot, 
 | 
				
			||||||
                history=[],
 | 
					                history=[],
 | 
				
			||||||
                sys_prompt="总结文章。"
 | 
					                sys_prompt="总结文章。"
 | 
				
			||||||
            )  # 带超时倒计时
 | 
					            )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            chatbot[-1] = (i_say_show_user, gpt_say)
 | 
					            chatbot[-1] = (i_say_show_user, gpt_say)
 | 
				
			||||||
            history.append(i_say_show_user)
 | 
					            history.extend([i_say_show_user,gpt_say])
 | 
				
			||||||
            history.append(gpt_say)
 | 
					            this_paper_history.extend([i_say_show_user,gpt_say])
 | 
				
			||||||
            yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
 | 
					 | 
				
			||||||
            if not fast_debug: time.sleep(2)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    """
 | 
					        # 已经对该文章的所有片段总结完毕,如果文章被切分了,
 | 
				
			||||||
    # 可按需启用
 | 
					        if len(paper_fragments) > 1:
 | 
				
			||||||
    i_say = f'根据你上述的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一篇英文的。'
 | 
					            i_say = f"根据以上的对话,总结文章{os.path.abspath(fp)}的主要内容。"
 | 
				
			||||||
    chatbot.append((i_say, "[Local Message] waiting gpt response."))
 | 
					            gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
 | 
				
			||||||
    yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 | 
					                inputs=i_say, 
 | 
				
			||||||
 | 
					                inputs_show_user=i_say, 
 | 
				
			||||||
 | 
					                llm_kwargs=llm_kwargs,
 | 
				
			||||||
 | 
					                chatbot=chatbot, 
 | 
				
			||||||
 | 
					                history=this_paper_history,
 | 
				
			||||||
 | 
					                sys_prompt="总结文章。"
 | 
				
			||||||
 | 
					            )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            history.extend([i_say,gpt_say])
 | 
				
			||||||
 | 
					            this_paper_history.extend([i_say,gpt_say])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    i_say = f'我想让你做一个论文写作导师。您的任务是使用人工智能工具(例如自然语言处理)提供有关如何改进其上述文章的反馈。' \
 | 
					 | 
				
			||||||
            f'您还应该利用您在有效写作技巧方面的修辞知识和经验来建议作者可以更好地以书面形式表达他们的想法和想法的方法。' \
 | 
					 | 
				
			||||||
            f'根据你之前的分析,提出建议'
 | 
					 | 
				
			||||||
    chatbot.append((i_say, "[Local Message] waiting gpt response."))
 | 
					 | 
				
			||||||
    yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 | 
					 | 
				
			||||||
  
 | 
					 | 
				
			||||||
    """
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    if not fast_debug:
 | 
					 | 
				
			||||||
        msg = '正常'
 | 
					 | 
				
			||||||
        # ** gpt request **
 | 
					 | 
				
			||||||
        gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
 | 
					 | 
				
			||||||
            inputs=i_say, 
 | 
					 | 
				
			||||||
            inputs_show_user=i_say, 
 | 
					 | 
				
			||||||
            llm_kwargs=llm_kwargs,
 | 
					 | 
				
			||||||
            chatbot=chatbot, 
 | 
					 | 
				
			||||||
            history=history,
 | 
					 | 
				
			||||||
            sys_prompt="总结文章。"
 | 
					 | 
				
			||||||
        )  # 带超时倒计时
 | 
					 | 
				
			||||||
        chatbot[-1] = (i_say, gpt_say)
 | 
					 | 
				
			||||||
        history.append(i_say)
 | 
					 | 
				
			||||||
        history.append(gpt_say)
 | 
					 | 
				
			||||||
        yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
 | 
					 | 
				
			||||||
        res = write_results_to_file(history)
 | 
					        res = write_results_to_file(history)
 | 
				
			||||||
        chatbot.append(("完成了吗?", res))
 | 
					        chatbot.append(("完成了吗?", res))
 | 
				
			||||||
        yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
 | 
					        yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    res = write_results_to_file(history)
 | 
				
			||||||
 | 
					    chatbot.append(("所有文件都总结完成了吗?", res))
 | 
				
			||||||
 | 
					    yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@CatchException
 | 
					@CatchException
 | 
				
			||||||
@ -123,11 +111,11 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr
 | 
				
			|||||||
        return
 | 
					        return
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    # 搜索需要处理的文件清单
 | 
					    # 搜索需要处理的文件清单
 | 
				
			||||||
    file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.docx', recursive=True)] + \
 | 
					    if txt.endswith('.docx') or txt.endswith('.doc'):
 | 
				
			||||||
                    [f for f in glob.glob(f'{project_folder}/**/*.doc', recursive=True)]
 | 
					        file_manifest = [txt]
 | 
				
			||||||
    # [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
 | 
					    else:
 | 
				
			||||||
    # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
 | 
					        file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.docx', recursive=True)] + \
 | 
				
			||||||
    # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
 | 
					                        [f for f in glob.glob(f'{project_folder}/**/*.doc', recursive=True)]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    # 如果没找到任何文件
 | 
					    # 如果没找到任何文件
 | 
				
			||||||
    if len(file_manifest) == 0:
 | 
					    if len(file_manifest) == 0:
 | 
				
			||||||
 | 
				
			|||||||
@ -8,6 +8,7 @@
 | 
				
			|||||||
    具备多线程调用能力的函数
 | 
					    具备多线程调用能力的函数
 | 
				
			||||||
    2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
 | 
					    2. predict_no_ui_long_connection:在实验过程中发现调用predict_no_ui处理长文档时,和openai的连接容易断掉,这个函数用stream的方式解决这个问题,同样支持多线程
 | 
				
			||||||
"""
 | 
					"""
 | 
				
			||||||
 | 
					import tiktoken
 | 
				
			||||||
 | 
					
 | 
				
			||||||
from concurrent.futures import ThreadPoolExecutor
 | 
					from concurrent.futures import ThreadPoolExecutor
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -31,6 +32,43 @@ methods = {
 | 
				
			|||||||
    "tgui-ui": tgui_ui,
 | 
					    "tgui-ui": tgui_ui,
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					model_info = {
 | 
				
			||||||
 | 
					    # openai
 | 
				
			||||||
 | 
					    "gpt-3.5-turbo": {
 | 
				
			||||||
 | 
					        "max_token": 4096,
 | 
				
			||||||
 | 
					        "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
 | 
				
			||||||
 | 
					        "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    "gpt-4": {
 | 
				
			||||||
 | 
					        "max_token": 4096,
 | 
				
			||||||
 | 
					        "tokenizer": tiktoken.encoding_for_model("gpt-4"),
 | 
				
			||||||
 | 
					        "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())),
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					    # api_2d
 | 
				
			||||||
 | 
					    "gpt-3.5-turbo-api2d": {
 | 
				
			||||||
 | 
					        "max_token": 4096,
 | 
				
			||||||
 | 
					        "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
 | 
				
			||||||
 | 
					        "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    "gpt-4-api2d": {
 | 
				
			||||||
 | 
					        "max_token": 4096,
 | 
				
			||||||
 | 
					        "tokenizer": tiktoken.encoding_for_model("gpt-4"),
 | 
				
			||||||
 | 
					        "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-4").encode(txt, disallowed_special=())),
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    # chatglm
 | 
				
			||||||
 | 
					    "chatglm": {
 | 
				
			||||||
 | 
					        "max_token": 1024,
 | 
				
			||||||
 | 
					        "tokenizer": tiktoken.encoding_for_model("gpt-3.5-turbo"),
 | 
				
			||||||
 | 
					        "token_cnt": lambda txt: len(tiktoken.encoding_for_model("gpt-3.5-turbo").encode(txt, disallowed_special=())),
 | 
				
			||||||
 | 
					    },
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def LLM_CATCH_EXCEPTION(f):
 | 
					def LLM_CATCH_EXCEPTION(f):
 | 
				
			||||||
    """
 | 
					    """
 | 
				
			||||||
        装饰器函数,将错误显示出来
 | 
					        装饰器函数,将错误显示出来
 | 
				
			||||||
@ -47,7 +85,7 @@ def LLM_CATCH_EXCEPTION(f):
 | 
				
			|||||||
            return tb_str
 | 
					            return tb_str
 | 
				
			||||||
    return decorated
 | 
					    return decorated
 | 
				
			||||||
 | 
					
 | 
				
			||||||
colors = ['#FF00FF', '#00FFFF', '#FF0000''#990099', '#009999', '#990044']
 | 
					colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
 | 
					def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False):
 | 
				
			||||||
    """
 | 
					    """
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user