From d587189cebaeb16c88fa4f6e724ad5115fe2269c Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Tue, 11 Apr 2023 17:40:50 +0800 Subject: [PATCH] =?UTF-8?q?=E6=9B=B4=E6=AD=A3bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/crazy_utils.py | 2 +- crazy_functions/解析项目源代码.py | 20 ++++++++++++++++++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index 1a877dd..9e96e58 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -76,7 +76,7 @@ def request_gpt_model_in_new_thread_with_ui_alive( try: # 【第一种情况】:顺利完成 result = predict_no_ui_long_connection( - inputs=inputs, llm_kwargs=llm_kwargs, + inputs=inputs, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt, observe_window=mutable) return result except ConnectionAbortedError as token_exceeded_error: diff --git a/crazy_functions/解析项目源代码.py b/crazy_functions/解析项目源代码.py index 72a6967..6e4b128 100644 --- a/crazy_functions/解析项目源代码.py +++ b/crazy_functions/解析项目源代码.py @@ -41,6 +41,22 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot.append(("完成?", "逐个文件分析已完成。" + res + "\n\n正在开始汇总。")) yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面 + ############################## <存储中间数据进行调试> ################################## + + # def objdump(obj): + # import pickle + # with open('objdump.tmp', 'wb+') as f: + # pickle.dump(obj, f) + # return + + # def objload(): + # import pickle, os + # if not os.path.exists('objdump.tmp'): + # return + # with open('objdump.tmp', 'rb') as f: + # return pickle.load(f) + # objdump([report_part_1, gpt_response_collection, history_to_return, file_manifest, project_folder, fp, llm_kwargs, chatbot]) + ############################## <第二步,综合,单线程,分组+迭代处理> ################################## batchsize = 16 # 10个文件为一组 report_part_2 = [] @@ -53,14 +69,14 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, file_rel_path = [os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)] # 把“请对下面的程序文件做一个概述” 替换成 精简的 "文件名:{all_file[index]}" for index, content in enumerate(this_iteration_gpt_response_collection): - if index%2==0: this_iteration_gpt_response_collection[index] = f"文件名:{file_rel_path[index//2]}" + if index%2==0: this_iteration_gpt_response_collection[index] = f"{file_rel_path[index//2]}" # 只保留文件名节省token previous_iteration_files.extend([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)]) previous_iteration_files_string = ', '.join(previous_iteration_files) current_iteration_focus = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(this_iteration_file_manifest)]) i_say = f'根据以上分析,对程序的整体功能和构架重新做出概括。然后用一张markdown表格整理每个文件的功能(包括{previous_iteration_files_string})。' inputs_show_user = f'根据以上分析,对程序的整体功能和构架重新做出概括,由于输入长度限制,可能需要分组处理,本组文件为 {current_iteration_focus} + 已经汇总的文件组。' this_iteration_history = copy.deepcopy(this_iteration_gpt_response_collection) - this_iteration_history.extend(last_iteration_result) + this_iteration_history.append(last_iteration_result) result = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, inputs_show_user=inputs_show_user, llm_kwargs=llm_kwargs, chatbot=chatbot, history=this_iteration_history, # 迭代之前的分析