From da55ae68f61c510ae7dc08c002323c6f95593c6f Mon Sep 17 00:00:00 2001 From: Your Name Date: Fri, 31 Mar 2023 21:03:12 +0800 Subject: [PATCH] =?UTF-8?q?pdfminer=E6=95=B4=E5=90=88=E5=88=B0=E4=B8=80?= =?UTF-8?q?=E4=B8=AA=E6=96=87=E4=BB=B6=E4=B8=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/批量总结PDF文档.py | 1 - crazy_functions/批量总结PDF文档pdfminer.py | 151 +++++++++++++++++++++ crazy_functions/读文章写摘要.py | 20 +-- functional_crazy.py | 10 +- requirements.txt | 13 +- toolbox.py | 58 +------- 6 files changed, 171 insertions(+), 82 deletions(-) create mode 100644 crazy_functions/批量总结PDF文档pdfminer.py diff --git a/crazy_functions/批量总结PDF文档.py b/crazy_functions/批量总结PDF文档.py index bf7fe6f..102bc9e 100644 --- a/crazy_functions/批量总结PDF文档.py +++ b/crazy_functions/批量总结PDF文档.py @@ -11,7 +11,6 @@ def 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, histor file_content = "" for page in doc: file_content += page.get_text() - file_content = file_content.encode('gbk', 'ignore').decode('gbk') print(file_content) prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" diff --git a/crazy_functions/批量总结PDF文档pdfminer.py b/crazy_functions/批量总结PDF文档pdfminer.py new file mode 100644 index 0000000..060187c --- /dev/null +++ b/crazy_functions/批量总结PDF文档pdfminer.py @@ -0,0 +1,151 @@ +from predict import predict_no_ui +from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down + +fast_debug = False + +def readPdf(pdfPath): + """ + 读取pdf文件,返回文本内容 + """ + import pdfminer + from pdfminer.pdfparser import PDFParser + from pdfminer.pdfdocument import PDFDocument + from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed + from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter + from pdfminer.pdfdevice import PDFDevice + from pdfminer.layout import LAParams + from pdfminer.converter import PDFPageAggregator + + fp = open(pdfPath, 'rb') + + # Create a PDF parser object associated with the file object + parser = PDFParser(fp) + + # Create a PDF document object that stores the document structure. + # Password for initialization as 2nd parameter + document = PDFDocument(parser) + # Check if the document allows text extraction. If not, abort. + if not document.is_extractable: + raise PDFTextExtractionNotAllowed + + # Create a PDF resource manager object that stores shared resources. + rsrcmgr = PDFResourceManager() + + # Create a PDF device object. + # device = PDFDevice(rsrcmgr) + + # BEGIN LAYOUT ANALYSIS. + # Set parameters for analysis. + laparams = LAParams( + char_margin=10.0, + line_margin=0.2, + boxes_flow=0.2, + all_texts=False, + ) + # Create a PDF page aggregator object. + device = PDFPageAggregator(rsrcmgr, laparams=laparams) + # Create a PDF interpreter object. + interpreter = PDFPageInterpreter(rsrcmgr, device) + + # loop over all pages in the document + outTextList = [] + for page in PDFPage.create_pages(document): + # read the page into a layout object + interpreter.process_page(page) + layout = device.get_result() + for obj in layout._objs: + if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal): + # print(obj.get_text()) + outTextList.append(obj.get_text()) + + return outTextList + + +def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt): + import time, glob, os + from bs4 import BeautifulSoup + print('begin analysis on:', file_manifest) + for index, fp in enumerate(file_manifest): + if ".tex" in fp: + with open(fp, 'r', encoding='utf-8') as f: + file_content = f.read() + if ".pdf" in fp.lower(): + file_content = readPdf(fp) + file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk') + + prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" + i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' + i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' + chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) + print('[1] yield chatbot, history') + yield chatbot, history, '正常' + + if not fast_debug: + msg = '正常' + # ** gpt request ** + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 + + print('[2] end gpt req') + chatbot[-1] = (i_say_show_user, gpt_say) + history.append(i_say_show_user); history.append(gpt_say) + print('[3] yield chatbot, history') + yield chatbot, history, msg + print('[4] next') + if not fast_debug: time.sleep(2) + + all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) + i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' + chatbot.append((i_say, "[Local Message] waiting gpt response.")) + yield chatbot, history, '正常' + + if not fast_debug: + msg = '正常' + # ** gpt request ** + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时 + + chatbot[-1] = (i_say, gpt_say) + history.append(i_say); history.append(gpt_say) + yield chatbot, history, msg + res = write_results_to_file(history) + chatbot.append(("完成了吗?", res)) + yield chatbot, history, msg + + + +@CatchException +def 批量总结PDF文档pdfminer(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): + history = [] # 清空历史,以免输入溢出 + import glob, os + + # 基本信息:功能、贡献者 + chatbot.append([ + "函数插件功能?", + "批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"]) + yield chatbot, history, '正常' + + # 尝试导入依赖,如果缺少依赖,则给出安装建议 + try: + import pdfminer, bs4 + except: + report_execption(chatbot, history, + a = f"解析项目: {txt}", + b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") + yield chatbot, history, '正常' + return + if os.path.exists(txt): + project_folder = txt + else: + if txt == "": txt = '空空如也的输入栏' + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + yield chatbot, history, '正常' + return + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ + [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \ + # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ + # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] + if len(file_manifest) == 0: + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") + yield chatbot, history, '正常' + return + yield from 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) + diff --git a/crazy_functions/读文章写摘要.py b/crazy_functions/读文章写摘要.py index 4144d11..dc92256 100644 --- a/crazy_functions/读文章写摘要.py +++ b/crazy_functions/读文章写摘要.py @@ -1,19 +1,14 @@ from predict import predict_no_ui -from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down, readPdf +from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down fast_debug = False -from bs4 import BeautifulSoup def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt): import time, glob, os print('begin analysis on:', file_manifest) for index, fp in enumerate(file_manifest): - if ".tex" in fp: - with open(fp, 'r', encoding='utf-8') as f: - file_content = f.read() - if ".pdf" in fp.lower(): - file_content = readPdf(fp) - file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk') + with open(fp, 'r', encoding='utf-8') as f: + file_content = f.read() prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' @@ -22,7 +17,7 @@ def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, hist print('[1] yield chatbot, history') yield chatbot, history, '正常' - if not fast_debug: + if not fast_debug: msg = '正常' # ** gpt request ** gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 @@ -40,7 +35,7 @@ def 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, hist chatbot.append((i_say, "[Local Message] waiting gpt response.")) yield chatbot, history, '正常' - if not fast_debug: + if not fast_debug: msg = '正常' # ** gpt request ** gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时 @@ -65,12 +60,11 @@ def 读文章写摘要(txt, top_p, temperature, chatbot, history, systemPromptTx report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield chatbot, history, '正常' return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \ + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \ # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") yield chatbot, history, '正常' return yield from 解析Paper(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) diff --git a/functional_crazy.py b/functional_crazy.py index 4b90af4..ef5b6c2 100644 --- a/functional_crazy.py +++ b/functional_crazy.py @@ -30,7 +30,7 @@ def get_crazy_functionals(): "Color": "stop", # 按钮颜色 "Function": 解析一个C项目 }, - "读tex or pdf论文写摘要": { + "读tex论文写摘要": { "Color": "stop", # 按钮颜色 "Function": 读文章写摘要 }, @@ -55,7 +55,13 @@ def get_crazy_functionals(): "Function": 批量总结PDF文档 }, }) - + from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer + function_plugins.update({ + "[仅供开发调试] 批量总结PDF文档pdfminer": { + "Color": "stop", + "Function": 批量总结PDF文档pdfminer + }, + }) # VisibleLevel=2 尚未充分测试的函数插件,放在这里 if UserVisibleLevel >= 2: function_plugins.update({ diff --git a/requirements.txt b/requirements.txt index 56c5b23..0e1d7db 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,5 @@ gradio>=3.23 -requests[socks]~=2.28.2 -mdtex2html~=1.2.0 -Markdown~=3.4.3 -latex2mathml~=3.75.1 -bs4~=0.0.1 -lxml~=4.6.4 -beautifulsoup4~=4.12.0 -numpy~=1.24.2 -pdfminer.six \ No newline at end of file +requests[socks] +mdtex2html +Markdown +latex2mathml \ No newline at end of file diff --git a/toolbox.py b/toolbox.py index b30c255..d96b3f6 100644 --- a/toolbox.py +++ b/toolbox.py @@ -1,14 +1,6 @@ import markdown, mdtex2html, threading, importlib, traceback from show_math import convert as convert_math from functools import wraps -import pdfminer -from pdfminer.pdfparser import PDFParser -from pdfminer.pdfdocument import PDFDocument -from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed -from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter -from pdfminer.pdfdevice import PDFDevice -from pdfminer.layout import LAParams -from pdfminer.converter import PDFPageAggregator def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], sys_prompt=''): """ @@ -243,52 +235,4 @@ def clear_line_break(txt): txt = txt.replace('\n', ' ') txt = txt.replace(' ', ' ') txt = txt.replace(' ', ' ') - return txt - -def readPdf(pdfPath): - """ - 读取pdf文件,返回文本内容 - """ - fp = open(pdfPath, 'rb') - - # Create a PDF parser object associated with the file object - parser = PDFParser(fp) - - # Create a PDF document object that stores the document structure. - # Password for initialization as 2nd parameter - document = PDFDocument(parser) - # Check if the document allows text extraction. If not, abort. - if not document.is_extractable: - raise PDFTextExtractionNotAllowed - - # Create a PDF resource manager object that stores shared resources. - rsrcmgr = PDFResourceManager() - - # Create a PDF device object. - # device = PDFDevice(rsrcmgr) - - # BEGIN LAYOUT ANALYSIS. - # Set parameters for analysis. - laparams = LAParams( - char_margin=10.0, - line_margin=0.2, - boxes_flow=0.2, - all_texts=False, - ) - # Create a PDF page aggregator object. - device = PDFPageAggregator(rsrcmgr, laparams=laparams) - # Create a PDF interpreter object. - interpreter = PDFPageInterpreter(rsrcmgr, device) - - # loop over all pages in the document - outTextList = [] - for page in PDFPage.create_pages(document): - # read the page into a layout object - interpreter.process_page(page) - layout = device.get_result() - for obj in layout._objs: - if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal): - # print(obj.get_text()) - outTextList.append(obj.get_text()) - - return outTextList \ No newline at end of file + return txt \ No newline at end of file