diff --git a/crazy_functional.py b/crazy_functional.py index 8e3ab6a..8f3b682 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -19,12 +19,18 @@ def get_crazy_functions(): from crazy_functions.解析项目源代码 import 解析一个Lua项目 from crazy_functions.解析项目源代码 import 解析一个CSharp项目 from crazy_functions.总结word文档 import 总结word文档 + from crazy_functions.解析JupyterNotebook import 解析ipynb文件 function_plugins = { "解析整个Python项目": { "Color": "stop", # 按钮颜色 "Function": HotReload(解析一个Python项目) }, + "解析Jupyter Notebook文件": { + "Color": "stop", + "AsButton":False, + "Function": HotReload(解析ipynb文件), + }, "批量总结Word文档": { "Color": "stop", "Function": HotReload(总结word文档) diff --git a/crazy_functions/解析JupyterNotebook.py b/crazy_functions/解析JupyterNotebook.py new file mode 100644 index 0000000..86f246c --- /dev/null +++ b/crazy_functions/解析JupyterNotebook.py @@ -0,0 +1,135 @@ +from toolbox import update_ui +from toolbox import CatchException, report_execption +fast_debug = True + + +class PaperFileGroup(): + def __init__(self): + self.file_paths = [] + self.file_contents = [] + self.sp_file_contents = [] + self.sp_file_index = [] + self.sp_file_tag = [] + + # count_token + from request_llm.bridge_all import model_info + enc = model_info["gpt-3.5-turbo"]['tokenizer'] + def get_token_num(txt): return len( + enc.encode(txt, disallowed_special=())) + self.get_token_num = get_token_num + + def run_file_split(self, max_token_limit=1900): + """ + 将长文本分离开来 + """ + for index, file_content in enumerate(self.file_contents): + if self.get_token_num(file_content) < max_token_limit: + self.sp_file_contents.append(file_content) + self.sp_file_index.append(index) + self.sp_file_tag.append(self.file_paths[index]) + else: + from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf + segments = breakdown_txt_to_satisfy_token_limit_for_pdf( + file_content, self.get_token_num, max_token_limit) + for j, segment in enumerate(segments): + self.sp_file_contents.append(segment) + self.sp_file_index.append(index) + self.sp_file_tag.append( + self.file_paths[index] + f".part-{j}.txt") + + + +def parseNotebook(filename, enable_markdown=1): + import json + + CodeBlocks = [] + with open(filename, 'r', encoding='utf-8', errors='replace') as f: + notebook = json.load(f) + for cell in notebook['cells']: + if cell['cell_type'] == 'code' and cell['source']: + # remove blank lines + cell['source'] = [line for line in cell['source'] if line.strip() + != ''] + CodeBlocks.append("".join(cell['source'])) + elif enable_markdown and cell['cell_type'] == 'markdown' and cell['source']: + cell['source'] = [line for line in cell['source'] if line.strip() + != ''] + CodeBlocks.append("Markdown:"+"".join(cell['source'])) + + Code = "" + for idx, code in enumerate(CodeBlocks): + Code += f"This is {idx+1}th code block: \n" + Code += code+"\n" + + return Code + + +def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): + from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency + + pfg = PaperFileGroup() + + print(file_manifest) + for fp in file_manifest: + file_content = parseNotebook(fp, enable_markdown=1) + pfg.file_paths.append(fp) + pfg.file_contents.append(file_content) + + # <-------- 拆分过长的IPynb文件 ----------> + pfg.run_file_split(max_token_limit=1024) + n_split = len(pfg.sp_file_contents) + + inputs_array = [f"This is a Jupyter Notebook file, tell me about Each Block in Chinese. Focus Just On Code." + + f"If a block starts with `Markdown` which means it's a markdown block in ipynbipynb. " + + f"Start a new line for a block and block num use Chinese." + + f"\n\n{frag}" for frag in pfg.sp_file_contents] + inputs_show_user_array = [f"{f}的分析如下" for f in pfg.sp_file_tag] + sys_prompt_array = ["You are a professional programmer."] * n_split + + gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( + inputs_array=inputs_array, + inputs_show_user_array=inputs_show_user_array, + llm_kwargs=llm_kwargs, + chatbot=chatbot, + history_array=[[""] for _ in range(n_split)], + sys_prompt_array=sys_prompt_array, + # max_workers=5, # OpenAI所允许的最大并行过载 + scroller_max_len=80 + ) + + # <-------- 整理结果,退出 ----------> + block_result = " \n".join(gpt_response_collection) + chatbot.append(("解析的结果如下", block_result)) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + +@CatchException +def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + chatbot.append([ + "函数插件功能?", + "对IPynb文件进行解析"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + history = [] # 清空历史 + import glob + import os + if os.path.exists(txt): + project_folder = txt + else: + if txt == "": + txt = '空空如也的输入栏' + report_execption(chatbot, history, + a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + if txt.endswith('.ipynb'): + file_manifest = [txt] + else: + file_manifest = [f for f in glob.glob( + f'{project_folder}/**/*.ipynb', recursive=True)] + if len(file_manifest) == 0: + report_execption(chatbot, history, + a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + yield from ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, )