From 594f4b24f66c008eba69130f5c52376cf7b7fcbc Mon Sep 17 00:00:00 2001 From: wangyu <916491013@qq.com> Date: Sat, 1 Apr 2023 19:19:36 +0800 Subject: [PATCH 01/15] feat: add function to parse Golang projects This commit adds a new function to parse Golang projects to the collection of crazy functions. --- crazy_functions/解析项目源代码.py | 17 +++++++++++++++++ functional_crazy.py | 5 +++++ 2 files changed, 22 insertions(+) diff --git a/crazy_functions/解析项目源代码.py b/crazy_functions/解析项目源代码.py index 9ae53a7..e9d964b 100644 --- a/crazy_functions/解析项目源代码.py +++ b/crazy_functions/解析项目源代码.py @@ -148,3 +148,20 @@ def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptT return yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) +@CatchException +def 解析一个Golang项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): + history = [] # 清空历史,以免输入溢出 + import glob, os + if os.path.exists(txt): + project_folder = txt + else: + if txt == "": txt = '空空如也的输入栏' + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + yield chatbot, history, '正常' + return + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + if len(file_manifest) == 0: + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何golang文件: {txt}") + yield chatbot, history, '正常' + return + yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) diff --git a/functional_crazy.py b/functional_crazy.py index 456bdcb..a3f46ba 100644 --- a/functional_crazy.py +++ b/functional_crazy.py @@ -14,6 +14,7 @@ def get_crazy_functionals(): from crazy_functions.解析项目源代码 import 解析一个Python项目 from crazy_functions.解析项目源代码 import 解析一个C项目的头文件 from crazy_functions.解析项目源代码 import 解析一个C项目 + from crazy_functions.解析项目源代码 import 解析一个Golang项目 from crazy_functions.高级功能函数模板 import 高阶功能模板函数 from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文 @@ -35,6 +36,10 @@ def get_crazy_functionals(): "AsButton": False, # 加入下拉菜单中 "Function": 解析一个C项目 }, + "解析整个Go项目": { + "Color": "stop", # 按钮颜色 + "Function": 解析一个Golang项目 + }, "读Tex论文写摘要": { "Color": "stop", # 按钮颜色 "Function": 读文章写摘要 From a0841c6e6c0163cb3e6c87befcaaa99920e14462 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Sat, 1 Apr 2023 19:37:39 +0800 Subject: [PATCH 02/15] Update functional_crazy.py --- functional_crazy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/functional_crazy.py b/functional_crazy.py index a3f46ba..deca4e0 100644 --- a/functional_crazy.py +++ b/functional_crazy.py @@ -38,6 +38,7 @@ def get_crazy_functionals(): }, "解析整个Go项目": { "Color": "stop", # 按钮颜色 + "AsButton": False, # 加入下拉菜单中 "Function": 解析一个Golang项目 }, "读Tex论文写摘要": { From a51cfbc625a23a3f80af940de4df58989ae292b7 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 1 Apr 2023 19:43:56 +0800 Subject: [PATCH 03/15] =?UTF-8?q?=E6=96=B0=E7=9A=84arxiv=E8=AE=BA=E6=96=87?= =?UTF-8?q?=E6=8F=92=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/下载arxiv论文翻译摘要.py | 187 +++++++++++++++++++++++ 1 file changed, 187 insertions(+) create mode 100644 crazy_functions/下载arxiv论文翻译摘要.py diff --git a/crazy_functions/下载arxiv论文翻译摘要.py b/crazy_functions/下载arxiv论文翻译摘要.py new file mode 100644 index 0000000..fbe726f --- /dev/null +++ b/crazy_functions/下载arxiv论文翻译摘要.py @@ -0,0 +1,187 @@ +from predict import predict_no_ui +from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down, get_conf +import re, requests, unicodedata, os + +def download_arxiv_(url_pdf): + if 'arxiv.org' not in url_pdf: + if ('.' in url_pdf) and ('/' not in url_pdf): + new_url = 'https://arxiv.org/abs/'+url_pdf + print('下载编号:', url_pdf, '自动定位:', new_url) + # download_arxiv_(new_url) + return download_arxiv_(new_url) + else: + print('不能识别的URL!') + return None + if 'abs' in url_pdf: + url_pdf = url_pdf.replace('abs', 'pdf') + url_pdf = url_pdf + '.pdf' + + url_abs = url_pdf.replace('.pdf', '').replace('pdf', 'abs') + title, other_info = get_name(_url_=url_abs) + + paper_id = title.split()[0] # '[1712.00559]' + if '2' in other_info['year']: + title = other_info['year'] + ' ' + title + + known_conf = ['NeurIPS', 'NIPS', 'Nature', 'Science', 'ICLR', 'AAAI'] + for k in known_conf: + if k in other_info['comment']: + title = k + ' ' + title + + download_dir = './gpt_log/arxiv/' + os.makedirs(download_dir, exist_ok=True) + + title_str = title.replace('?', '?')\ + .replace(':', ':')\ + .replace('\"', '“')\ + .replace('\n', '')\ + .replace(' ', ' ')\ + .replace(' ', ' ') + + requests_pdf_url = url_pdf + file_path = download_dir+title_str + # if os.path.exists(file_path): + # print('返回缓存文件') + # return './gpt_log/arxiv/'+title_str + + print('下载中') + proxies, = get_conf('proxies') + r = requests.get(requests_pdf_url, proxies=proxies) + with open(file_path, 'wb+') as f: + f.write(r.content) + print('下载完成') + + # print('输出下载命令:','aria2c -o \"%s\" %s'%(title_str,url_pdf)) + # subprocess.call('aria2c --all-proxy=\"172.18.116.150:11084\" -o \"%s\" %s'%(download_dir+title_str,url_pdf), shell=True) + + x = "%s %s %s.bib" % (paper_id, other_info['year'], other_info['authors']) + x = x.replace('?', '?')\ + .replace(':', ':')\ + .replace('\"', '“')\ + .replace('\n', '')\ + .replace(' ', ' ')\ + .replace(' ', ' ') + return './gpt_log/arxiv/'+title_str, other_info + + +def get_name(_url_): + import os + from bs4 import BeautifulSoup + print('正在获取文献名!') + print(_url_) + + # arxiv_recall = {} + # if os.path.exists('./arxiv_recall.pkl'): + # with open('./arxiv_recall.pkl', 'rb') as f: + # arxiv_recall = pickle.load(f) + + # if _url_ in arxiv_recall: + # print('在缓存中') + # return arxiv_recall[_url_] + + proxies, = get_conf('proxies') + res = requests.get(_url_, proxies=proxies) + + bs = BeautifulSoup(res.text, 'html.parser') + other_details = {} + + # get year + try: + year = bs.find_all(class_='dateline')[0].text + year = re.search(r'(\d{4})', year, re.M | re.I).group(1) + other_details['year'] = year + abstract = bs.find_all(class_='abstract mathjax')[0].text + other_details['abstract'] = abstract + except: + other_details['year'] = '' + print('年份获取失败') + + # get author + try: + authors = bs.find_all(class_='authors')[0].text + authors = authors.split('Authors:')[1] + other_details['authors'] = authors + except: + other_details['authors'] = '' + print('authors获取失败') + + # get comment + try: + comment = bs.find_all(class_='metatable')[0].text + real_comment = None + for item in comment.replace('\n', ' ').split(' '): + if 'Comments' in item: + real_comment = item + if real_comment is not None: + other_details['comment'] = real_comment + else: + other_details['comment'] = '' + except: + other_details['comment'] = '' + print('年份获取失败') + + title_str = BeautifulSoup( + res.text, 'html.parser').find('title').contents[0] + print('获取成功:', title_str) + # arxiv_recall[_url_] = (title_str+'.pdf', other_details) + # with open('./arxiv_recall.pkl', 'wb') as f: + # pickle.dump(arxiv_recall, f) + + return title_str+'.pdf', other_details + + + +@CatchException +def 下载arxiv论文并翻译摘要(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): + + CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,作者 binary-husky。正在提取摘要并下载PDF文档……" + raise RuntimeError() + import glob + import os + + # 基本信息:功能、贡献者 + chatbot.append(["函数插件功能?", CRAZY_FUNCTION_INFO]) + yield chatbot, history, '正常' + + # 尝试导入依赖,如果缺少依赖,则给出安装建议 + try: + import pdfminer, bs4 + except: + report_execption(chatbot, history, + a = f"解析项目: {txt}", + b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") + yield chatbot, history, '正常' + return + + # 清空历史,以免输入溢出 + history = [] + + # 提取摘要,下载PDF文档 + try: + pdf_path, info = download_arxiv_(txt) + except: + report_execption(chatbot, history, + a = f"解析项目: {txt}", + b = f"下载pdf文件未成功") + yield chatbot, history, '正常' + return + + # 翻译摘要等 + i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}" + i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}' + chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) + yield chatbot, history, '正常' + msg = '正常' + # ** gpt request ** + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 + chatbot[-1] = (i_say_show_user, gpt_say) + history.append(i_say_show_user); history.append(gpt_say) + yield chatbot, history, msg + # 写入文件 + import shutil + # 重置文件的创建时间 + shutil.copyfile(pdf_path, pdf_path.replace('.pdf', '.autodownload.pdf')); os.remove(pdf_path) + res = write_results_to_file(history) + chatbot.append(("完成了吗?", res)) + yield chatbot, history, msg + From a8bd564cd1d712513c70cf0d5884debd77e09c04 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 1 Apr 2023 19:48:14 +0800 Subject: [PATCH 04/15] advanced theme --- theme.py | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/theme.py b/theme.py index 14f16fb..fa943da 100644 --- a/theme.py +++ b/theme.py @@ -83,12 +83,61 @@ def adjust_theme(): advanced_css = """ .markdown-body table { - border: 1px solid #ddd; + margin: 1em 0; border-collapse: collapse; + empty-cells: show; } - .markdown-body th, .markdown-body td { - border: 1px solid #ddd; + border: 1.2px solid var(--border-color-primary); padding: 5px; } +.markdown-body thead { + background-color: rgba(175,184,193,0.2); +} +.markdown-body thead th { + padding: .5em .2em; +} +# 以下 CSS 来自对 https://github.com/GaiZhenbiao/ChuanhuChatGPT 的移植。 +/* list */ +ol:not(.options), ul:not(.options) { + padding-inline-start: 2em !important; +} +/* 对话气泡 */ +[class *= "message"] { + border-radius: var(--radius-xl) !important; + padding: var(--spacing-xl) !important; + font-size: var(--text-md) !important; + line-height: var(--line-md) !important; + min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); + min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); +} +[data-testid = "bot"] { + max-width: 85%; + width: auto !important; + border-bottom-left-radius: 0 !important; +} +[data-testid = "user"] { + max-width: 85%; + width: auto !important; + border-bottom-right-radius: 0 !important; +} +/* 行内代码 */ +code { + display: inline; + white-space: break-spaces; + border-radius: 6px; + margin: 0 2px 0 2px; + padding: .2em .4em .1em .4em; + background-color: rgba(175,184,193,0.2); +} +/* 代码块 */ +pre code { + display: block; + overflow: auto; + white-space: pre; + background-color: rgba(175,184,193,0.2); + border-radius: 10px; + padding: 1em; + margin: 1em 2em 1em 0.5em; +} """ \ No newline at end of file From 919b15b24268f533ca126c36cc387958e12c76aa Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 1 Apr 2023 21:45:58 +0800 Subject: [PATCH 05/15] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E5=90=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/__init__.py | 0 project_self_analysis.md => self_analysis.md | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 crazy_functions/__init__.py rename project_self_analysis.md => self_analysis.md (100%) diff --git a/crazy_functions/__init__.py b/crazy_functions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/project_self_analysis.md b/self_analysis.md similarity index 100% rename from project_self_analysis.md rename to self_analysis.md From d79dfe2fc7278f3c7aa84ec40004603e3496406c Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 1 Apr 2023 21:56:55 +0800 Subject: [PATCH 06/15] wait new pr --- theme.py | 55 +++---------------------------------------------------- 1 file changed, 3 insertions(+), 52 deletions(-) diff --git a/theme.py b/theme.py index fa943da..14f16fb 100644 --- a/theme.py +++ b/theme.py @@ -83,61 +83,12 @@ def adjust_theme(): advanced_css = """ .markdown-body table { - margin: 1em 0; + border: 1px solid #ddd; border-collapse: collapse; - empty-cells: show; } + .markdown-body th, .markdown-body td { - border: 1.2px solid var(--border-color-primary); + border: 1px solid #ddd; padding: 5px; } -.markdown-body thead { - background-color: rgba(175,184,193,0.2); -} -.markdown-body thead th { - padding: .5em .2em; -} -# 以下 CSS 来自对 https://github.com/GaiZhenbiao/ChuanhuChatGPT 的移植。 -/* list */ -ol:not(.options), ul:not(.options) { - padding-inline-start: 2em !important; -} -/* 对话气泡 */ -[class *= "message"] { - border-radius: var(--radius-xl) !important; - padding: var(--spacing-xl) !important; - font-size: var(--text-md) !important; - line-height: var(--line-md) !important; - min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); - min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); -} -[data-testid = "bot"] { - max-width: 85%; - width: auto !important; - border-bottom-left-radius: 0 !important; -} -[data-testid = "user"] { - max-width: 85%; - width: auto !important; - border-bottom-right-radius: 0 !important; -} -/* 行内代码 */ -code { - display: inline; - white-space: break-spaces; - border-radius: 6px; - margin: 0 2px 0 2px; - padding: .2em .4em .1em .4em; - background-color: rgba(175,184,193,0.2); -} -/* 代码块 */ -pre code { - display: block; - overflow: auto; - white-space: pre; - background-color: rgba(175,184,193,0.2); - border-radius: 10px; - padding: 1em; - margin: 1em 2em 1em 0.5em; -} """ \ No newline at end of file From bfa6661367b7592e82225515e5e4845c4aad95bb Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 1 Apr 2023 23:46:32 +0800 Subject: [PATCH 07/15] up --- config.py | 2 +- predict.py | 8 ++- request_llm/bridge_tgui.py | 137 +++++++++++++++++++++++++++++++++++++ 3 files changed, 144 insertions(+), 3 deletions(-) create mode 100644 request_llm/bridge_tgui.py diff --git a/config.py b/config.py index f4e1bc8..1b397ba 100644 --- a/config.py +++ b/config.py @@ -34,7 +34,7 @@ WEB_PORT = -1 MAX_RETRY = 2 # OpenAI模型选择是(gpt4现在只对申请成功的人开放) -LLM_MODEL = "gpt-3.5-turbo" +LLM_MODEL = "pygmalion-1.3b@localhost@7860" # "gpt-3.5-turbo" # OpenAI的API_URL API_URL = "https://api.openai.com/v1/chat/completions" diff --git a/predict.py b/predict.py index 31a5861..10e58bb 100644 --- a/predict.py +++ b/predict.py @@ -112,8 +112,7 @@ def predict_no_ui_long_connection(inputs, top_p, temperature, history=[], sys_pr return result -def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='', - stream = True, additional_fn=None): +def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='', stream = True, additional_fn=None): """ 发送至chatGPT,流式获取输出。 用于基础的对话功能。 @@ -244,3 +243,8 @@ def generate_payload(inputs, top_p, temperature, history, system_prompt, stream) return headers,payload +if not LLM_MODEL.startswith('gpt'): + from request_llm.bridge_tgui import predict_tgui + predict = predict_tgui + + \ No newline at end of file diff --git a/request_llm/bridge_tgui.py b/request_llm/bridge_tgui.py new file mode 100644 index 0000000..37f3826 --- /dev/null +++ b/request_llm/bridge_tgui.py @@ -0,0 +1,137 @@ +''' +Contributed by SagsMug. Modified by binary-husky +https://github.com/oobabooga/text-generation-webui/pull/175 +''' + +import asyncio +import json +import random +import string +import websockets +import logging +import time +import threading +from toolbox import get_conf +LLM_MODEL, = get_conf('LLM_MODEL') + +model_name, addr, port = LLM_MODEL.split('@') + +def random_hash(): + letters = string.ascii_lowercase + string.digits + return ''.join(random.choice(letters) for i in range(9)) + +async def run(context): + params = { + 'max_new_tokens': 200, + 'do_sample': True, + 'temperature': 0.5, + 'top_p': 0.9, + 'typical_p': 1, + 'repetition_penalty': 1.05, + 'encoder_repetition_penalty': 1.0, + 'top_k': 0, + 'min_length': 0, + 'no_repeat_ngram_size': 0, + 'num_beams': 1, + 'penalty_alpha': 0, + 'length_penalty': 1, + 'early_stopping': False, + 'seed': -1, + } + session = random_hash() + + async with websockets.connect(f"ws://{addr}:{port}/queue/join") as websocket: + while content := json.loads(await websocket.recv()): + #Python3.10 syntax, replace with if elif on older + if content["msg"] == "send_hash": + await websocket.send(json.dumps({ + "session_hash": session, + "fn_index": 12 + })) + elif content["msg"] == "estimation": + pass + elif content["msg"] == "send_data": + await websocket.send(json.dumps({ + "session_hash": session, + "fn_index": 12, + "data": [ + context, + params['max_new_tokens'], + params['do_sample'], + params['temperature'], + params['top_p'], + params['typical_p'], + params['repetition_penalty'], + params['encoder_repetition_penalty'], + params['top_k'], + params['min_length'], + params['no_repeat_ngram_size'], + params['num_beams'], + params['penalty_alpha'], + params['length_penalty'], + params['early_stopping'], + params['seed'], + ] + })) + elif content["msg"] == "process_starts": + pass + elif content["msg"] in ["process_generating", "process_completed"]: + yield content["output"]["data"][0] + # You can search for your desired end indicator and + # stop generation by closing the websocket here + if (content["msg"] == "process_completed"): + break + + + + + +def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='', stream = True, additional_fn=None): + """ + 发送至chatGPT,流式获取输出。 + 用于基础的对话功能。 + inputs 是本次问询的输入 + top_p, temperature是chatGPT的内部调优参数 + history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误) + chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 + additional_fn代表点击的哪个按钮,按钮见functional.py + """ + if additional_fn is not None: + import functional + importlib.reload(functional) # 热更新prompt + functional = functional.get_functionals() + if "PreProcess" in functional[additional_fn]: inputs = functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) + inputs = functional[additional_fn]["Prefix"] + inputs + functional[additional_fn]["Suffix"] + + raw_input = inputs + logging.info(f'[raw_input] {raw_input}') + chatbot.append((inputs, "")) + yield chatbot, history, "等待响应" + + prompt = inputs + tgui_say = "" + + mutable = [""] + def run_coorotine(mutable): + async def get_result(): + async for response in run(prompt): + # Print intermediate steps + mutable += response + asyncio.run(get_result()) + + thread_listen = threading.Thread(target=run_coorotine, args=(mutable,)) + thread_listen.start() + + while thread_listen.is_alive(): + time.sleep(1) + # Print intermediate steps + if tgui_say != mutable[0]: + tgui_say = mutable[0] + history[-1] = tgui_say + chatbot[-1] = (history[-2], history[-1]) + yield chatbot, history, status_text + + logging.info(f'[response] {tgui_say}') + + + \ No newline at end of file From 3af0bbdbe479144e4d6f98f5fd2e0ff948b00314 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 2 Apr 2023 00:22:41 +0800 Subject: [PATCH 08/15] =?UTF-8?q?=E6=88=90=E5=8A=9F=E5=80=9F=E5=8A=A9tgui?= =?UTF-8?q?=E8=B0=83=E7=94=A8=E6=9B=B4=E5=A4=9ALLM?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 2 +- predict.py | 5 ++++- request_llm/README.md | 39 ++++++++++++++++++++++++++++++++++++++ request_llm/bridge_tgui.py | 34 +++++++++++++++++++++++++-------- 4 files changed, 70 insertions(+), 10 deletions(-) create mode 100644 request_llm/README.md diff --git a/config.py b/config.py index 1b397ba..803129f 100644 --- a/config.py +++ b/config.py @@ -1,5 +1,5 @@ # [step 1]>> 例如: API_KEY = "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" (此key无效) -API_KEY = "sk-此处填API密钥" +API_KEY = "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" # [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改 USE_PROXY = False diff --git a/predict.py b/predict.py index 10e58bb..1310d3f 100644 --- a/predict.py +++ b/predict.py @@ -244,7 +244,10 @@ def generate_payload(inputs, top_p, temperature, history, system_prompt, stream) if not LLM_MODEL.startswith('gpt'): - from request_llm.bridge_tgui import predict_tgui + # 函数重载到另一个文件 + from request_llm.bridge_tgui import predict_tgui, predict_tgui_no_ui predict = predict_tgui + predict_no_ui = predict_tgui_no_ui + predict_no_ui_long_connection = predict_tgui_no_ui \ No newline at end of file diff --git a/request_llm/README.md b/request_llm/README.md new file mode 100644 index 0000000..26f0dde --- /dev/null +++ b/request_llm/README.md @@ -0,0 +1,39 @@ +# 如何使用其他大语言模型 + +## 1. 先运行text-generation +``` sh +# 下载模型 +git clone https://github.com/oobabooga/text-generation-webui.git + +# 安装text-generation的额外依赖 +pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers + +# 切换路径 +cd text-generation-webui + +# 下载模型 +python download-model.py facebook/opt-1.3b + +# 其他可选如 facebook/galactica-1.3b +# facebook/galactica-6.7b +# facebook/galactica-120b + +# Pymalion 6B is a proof-of-concept dialogue model based on EleutherAI's GPT-J-6B. +# facebook/pygmalion-1.3b + +# 启动text-generation,注意把模型的斜杠改成下划线 +python server.py --cpu --listen --listen-port 7860 --model facebook_galactica-1.3b +``` + +## 2. 修改config.py +``` +# LLM_MODEL格式为 [模型]@[ws地址] @[ws端口] +LLM_MODEL = "pygmalion-1.3b@localhost@7860" +``` + + +## 3. 运行! +``` +cd chatgpt-academic +python main.py +``` diff --git a/request_llm/bridge_tgui.py b/request_llm/bridge_tgui.py index 37f3826..1c7103f 100644 --- a/request_llm/bridge_tgui.py +++ b/request_llm/bridge_tgui.py @@ -11,6 +11,7 @@ import websockets import logging import time import threading +import importlib from toolbox import get_conf LLM_MODEL, = get_conf('LLM_MODEL') @@ -22,7 +23,7 @@ def random_hash(): async def run(context): params = { - 'max_new_tokens': 200, + 'max_new_tokens': 1024, 'do_sample': True, 'temperature': 0.5, 'top_p': 0.9, @@ -103,9 +104,10 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom if "PreProcess" in functional[additional_fn]: inputs = functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) inputs = functional[additional_fn]["Prefix"] + inputs + functional[additional_fn]["Suffix"] - raw_input = inputs + raw_input = "What I would like to say is the following: " + inputs logging.info(f'[raw_input] {raw_input}') - chatbot.append((inputs, "")) + history.extend([inputs, ""]) + chatbot.append([inputs, ""]) yield chatbot, history, "等待响应" prompt = inputs @@ -113,11 +115,11 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom mutable = [""] def run_coorotine(mutable): - async def get_result(): + async def get_result(mutable): async for response in run(prompt): # Print intermediate steps - mutable += response - asyncio.run(get_result()) + mutable[0] = response + asyncio.run(get_result(mutable)) thread_listen = threading.Thread(target=run_coorotine, args=(mutable,)) thread_listen.start() @@ -129,9 +131,25 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom tgui_say = mutable[0] history[-1] = tgui_say chatbot[-1] = (history[-2], history[-1]) - yield chatbot, history, status_text + yield chatbot, history, "status_text" logging.info(f'[response] {tgui_say}') - \ No newline at end of file + +def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""): + raw_input = "What I would like to say is the following: " + inputs + prompt = inputs + tgui_say = "" + mutable = [""] + def run_coorotine(mutable): + async def get_result(mutable): + async for response in run(prompt): + # Print intermediate steps + mutable[0] = response + asyncio.run(get_result(mutable)) + thread_listen = threading.Thread(target=run_coorotine, args=(mutable,)) + thread_listen.start() + thread_listen.join() + tgui_say = mutable[0] + return tgui_say From 2420d62a332d0759849e7546ee5e3fc9b4efb1e4 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 2 Apr 2023 00:40:05 +0800 Subject: [PATCH 09/15] =?UTF-8?q?=E6=8E=A5=E5=85=A5TGUI?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 2 +- request_llm/README.md | 21 +++++++++------------ request_llm/bridge_tgui.py | 11 +++++++---- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/config.py b/config.py index 803129f..ad9cfd9 100644 --- a/config.py +++ b/config.py @@ -34,7 +34,7 @@ WEB_PORT = -1 MAX_RETRY = 2 # OpenAI模型选择是(gpt4现在只对申请成功的人开放) -LLM_MODEL = "pygmalion-1.3b@localhost@7860" # "gpt-3.5-turbo" +LLM_MODEL = "TGUI:galactica-1.3b@localhost:7860" # "gpt-3.5-turbo" # OpenAI的API_URL API_URL = "https://api.openai.com/v1/chat/completions" diff --git a/request_llm/README.md b/request_llm/README.md index 26f0dde..a539f1f 100644 --- a/request_llm/README.md +++ b/request_llm/README.md @@ -2,7 +2,7 @@ ## 1. 先运行text-generation ``` sh -# 下载模型 +# 下载模型( text-generation 这么牛的项目,别忘了给人家star ) git clone https://github.com/oobabooga/text-generation-webui.git # 安装text-generation的额外依赖 @@ -12,28 +12,25 @@ pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft cd text-generation-webui # 下载模型 -python download-model.py facebook/opt-1.3b - -# 其他可选如 facebook/galactica-1.3b +python download-model.py facebook/galactica-1.3b +# 其他可选如 facebook/opt-1.3b # facebook/galactica-6.7b # facebook/galactica-120b - -# Pymalion 6B is a proof-of-concept dialogue model based on EleutherAI's GPT-J-6B. -# facebook/pygmalion-1.3b +# facebook/pygmalion-1.3b 等 +# 详情见 https://github.com/oobabooga/text-generation-webui # 启动text-generation,注意把模型的斜杠改成下划线 python server.py --cpu --listen --listen-port 7860 --model facebook_galactica-1.3b ``` ## 2. 修改config.py +``` sh +# LLM_MODEL格式较复杂 TGUI:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致 +LLM_MODEL = "TGUI:galactica-1.3b@localhost:7860" ``` -# LLM_MODEL格式为 [模型]@[ws地址] @[ws端口] -LLM_MODEL = "pygmalion-1.3b@localhost@7860" -``` - ## 3. 运行! -``` +``` sh cd chatgpt-academic python main.py ``` diff --git a/request_llm/bridge_tgui.py b/request_llm/bridge_tgui.py index 1c7103f..916416b 100644 --- a/request_llm/bridge_tgui.py +++ b/request_llm/bridge_tgui.py @@ -15,7 +15,10 @@ import importlib from toolbox import get_conf LLM_MODEL, = get_conf('LLM_MODEL') -model_name, addr, port = LLM_MODEL.split('@') +# "TGUI:galactica-1.3b@localhost:7860" +model_name, addr_port = LLM_MODEL.split('@') +assert ':' in addr_port, "LLM_MODEL 格式不正确!" + LLM_MODEL +addr, port = addr_port.split(':') def random_hash(): letters = string.ascii_lowercase + string.digits @@ -117,11 +120,11 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom def run_coorotine(mutable): async def get_result(mutable): async for response in run(prompt): - # Print intermediate steps + print(response[len(mutable[0]):]) mutable[0] = response asyncio.run(get_result(mutable)) - thread_listen = threading.Thread(target=run_coorotine, args=(mutable,)) + thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True) thread_listen.start() while thread_listen.is_alive(): @@ -145,7 +148,7 @@ def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""): def run_coorotine(mutable): async def get_result(mutable): async for response in run(prompt): - # Print intermediate steps + print(response[len(mutable[0]):]) mutable[0] = response asyncio.run(get_result(mutable)) thread_listen = threading.Thread(target=run_coorotine, args=(mutable,)) From eee4cb361c13bcece6691416a0f70184f3c52cf2 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 2 Apr 2023 00:51:17 +0800 Subject: [PATCH 10/15] q --- request_llm/bridge_tgui.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/request_llm/bridge_tgui.py b/request_llm/bridge_tgui.py index 916416b..20a6352 100644 --- a/request_llm/bridge_tgui.py +++ b/request_llm/bridge_tgui.py @@ -26,7 +26,7 @@ def random_hash(): async def run(context): params = { - 'max_new_tokens': 1024, + 'max_new_tokens': 512, 'do_sample': True, 'temperature': 0.5, 'top_p': 0.9, @@ -39,7 +39,7 @@ async def run(context): 'num_beams': 1, 'penalty_alpha': 0, 'length_penalty': 1, - 'early_stopping': False, + 'early_stopping': True, 'seed': -1, } session = random_hash() @@ -144,7 +144,7 @@ def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""): raw_input = "What I would like to say is the following: " + inputs prompt = inputs tgui_say = "" - mutable = [""] + mutable = ["", time.time()] def run_coorotine(mutable): async def get_result(mutable): async for response in run(prompt): From 190b5473738eaf00b2816a1bd1828200d5e31c92 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 2 Apr 2023 01:18:51 +0800 Subject: [PATCH 11/15] stage llm model interface --- README.md | 12 +++++++----- main.py | 5 +++-- request_llm/bridge_tgui.py | 19 ++++++++++++++----- 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 39b091f..859a4c4 100644 --- a/README.md +++ b/README.md @@ -36,14 +36,16 @@ https://github.com/polarwinkel/mdtex2html 自定义快捷键 | 支持自定义快捷键 配置代理服务器 | 支持配置代理服务器 模块化设计 | 支持自定义高阶的实验性功能 -自我程序剖析 | [实验性功能] 一键读懂本项目的源代码 -程序剖析 | [实验性功能] 一键可以剖析其他Python/C++项目 -读论文 | [实验性功能] 一键解读latex论文全文并生成摘要 -批量注释生成 | [实验性功能] 一键批量生成函数注释 -chat分析报告生成 | [实验性功能] 运行后自动生成总结汇报 +自我程序剖析 | [函数插件] 一键读懂本项目的源代码 +程序剖析 | [函数插件] 一键可以剖析其他Python/C++等项目 +读论文 | [函数插件] 一键解读latex论文全文并生成摘要 +arxiv小助手 | [函数插件] 输入url一键翻译摘要+下载论文 +批量注释生成 | [函数插件] 一键批量生成函数注释 +chat分析报告生成 | [函数插件] 运行后自动生成总结汇报 公式显示 | 可以同时显示公式的tex形式和渲染形式 图片显示 | 可以在markdown中显示图片 支持GPT输出的markdown表格 | 可以输出支持GPT的markdown表格 +本地大语言模型接口 | 借助[TGUI](https://github.com/oobabooga/text-generation-webui)接入galactica等本地语言模型 …… | …… diff --git a/main.py b/main.py index 533e590..0016d9c 100644 --- a/main.py +++ b/main.py @@ -11,8 +11,9 @@ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT = PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT if not AUTHENTICATION: AUTHENTICATION = None +title = "ChatGPT 学术优化" if LLM_MODEL.startswith('gpt') else "ChatGPT / LLM 学术优化" initial_prompt = "Serve me as a writing and programming assistant." -title_html = """

ChatGPT 学术优化

""" +title_html = f"

{title}

" # 问询记录, python 版本建议3.9+(越新越好) import logging @@ -140,5 +141,5 @@ def auto_opentab_delay(): threading.Thread(target=open, name="open-browser", daemon=True).start() auto_opentab_delay() -demo.title = "ChatGPT 学术优化" +demo.title = title demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=True, server_port=PORT, auth=AUTHENTICATION) diff --git a/request_llm/bridge_tgui.py b/request_llm/bridge_tgui.py index 20a6352..d7cbe10 100644 --- a/request_llm/bridge_tgui.py +++ b/request_llm/bridge_tgui.py @@ -24,9 +24,9 @@ def random_hash(): letters = string.ascii_lowercase + string.digits return ''.join(random.choice(letters) for i in range(9)) -async def run(context): +async def run(context, max_token=512): params = { - 'max_new_tokens': 512, + 'max_new_tokens': max_token, 'do_sample': True, 'temperature': 0.5, 'top_p': 0.9, @@ -116,12 +116,15 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom prompt = inputs tgui_say = "" - mutable = [""] + mutable = ["", time.time()] def run_coorotine(mutable): async def get_result(mutable): async for response in run(prompt): print(response[len(mutable[0]):]) mutable[0] = response + if (time.time() - mutable[1]) > 3: + print('exit when no listener') + break asyncio.run(get_result(mutable)) thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True) @@ -129,6 +132,7 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom while thread_listen.is_alive(): time.sleep(1) + mutable[1] = time.time() # Print intermediate steps if tgui_say != mutable[0]: tgui_say = mutable[0] @@ -147,12 +151,17 @@ def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""): mutable = ["", time.time()] def run_coorotine(mutable): async def get_result(mutable): - async for response in run(prompt): + async for response in run(prompt, max_token=20): print(response[len(mutable[0]):]) mutable[0] = response + if (time.time() - mutable[1]) > 3: + print('exit when no listener') + break asyncio.run(get_result(mutable)) thread_listen = threading.Thread(target=run_coorotine, args=(mutable,)) thread_listen.start() - thread_listen.join() + while thread_listen.is_alive(): + time.sleep(1) + mutable[1] = time.time() tgui_say = mutable[0] return tgui_say From 99e13e5895cab20a507664799175e84c08ff883e Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 2 Apr 2023 01:23:15 +0800 Subject: [PATCH 12/15] update --- config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config.py b/config.py index ad9cfd9..f4e1bc8 100644 --- a/config.py +++ b/config.py @@ -1,5 +1,5 @@ # [step 1]>> 例如: API_KEY = "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" (此key无效) -API_KEY = "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" +API_KEY = "sk-此处填API密钥" # [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改 USE_PROXY = False @@ -34,7 +34,7 @@ WEB_PORT = -1 MAX_RETRY = 2 # OpenAI模型选择是(gpt4现在只对申请成功的人开放) -LLM_MODEL = "TGUI:galactica-1.3b@localhost:7860" # "gpt-3.5-turbo" +LLM_MODEL = "gpt-3.5-turbo" # OpenAI的API_URL API_URL = "https://api.openai.com/v1/chat/completions" From 55ef4acea9289a1295b334b554339d12df4d7c1a Mon Sep 17 00:00:00 2001 From: RongkangXiong Date: Sun, 2 Apr 2023 02:59:03 +0800 Subject: [PATCH 13/15] =?UTF-8?q?add=20crazy=5Ffunctions=20=E8=A7=A3?= =?UTF-8?q?=E6=9E=90=E4=B8=80=E4=B8=AAJava=E9=A1=B9=E7=9B=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/解析项目源代码.py | 22 ++++++++++++++++++++++ functional_crazy.py | 6 ++++++ 2 files changed, 28 insertions(+) diff --git a/crazy_functions/解析项目源代码.py b/crazy_functions/解析项目源代码.py index e9d964b..742c5ce 100644 --- a/crazy_functions/解析项目源代码.py +++ b/crazy_functions/解析项目源代码.py @@ -148,6 +148,28 @@ def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptT return yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) + +@CatchException +def 解析一个Java项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): + history = [] # 清空历史,以免输入溢出 + import glob, os + if os.path.exists(txt): + project_folder = txt + else: + if txt == "": txt = '空空如也的输入栏' + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + yield chatbot, history, '正常' + return + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \ + [f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \ + [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ + [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)] + if len(file_manifest) == 0: + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何java头文件: {txt}") + yield chatbot, history, '正常' + return + yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) + @CatchException def 解析一个Golang项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): history = [] # 清空历史,以免输入溢出 diff --git a/functional_crazy.py b/functional_crazy.py index deca4e0..57d1350 100644 --- a/functional_crazy.py +++ b/functional_crazy.py @@ -15,6 +15,7 @@ def get_crazy_functionals(): from crazy_functions.解析项目源代码 import 解析一个C项目的头文件 from crazy_functions.解析项目源代码 import 解析一个C项目 from crazy_functions.解析项目源代码 import 解析一个Golang项目 + from crazy_functions.解析项目源代码 import 解析一个Java项目 from crazy_functions.高级功能函数模板 import 高阶功能模板函数 from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文 @@ -41,6 +42,11 @@ def get_crazy_functionals(): "AsButton": False, # 加入下拉菜单中 "Function": 解析一个Golang项目 }, + "解析整个Java项目": { + "Color": "stop", # 按钮颜色 + "AsButton": False, # 加入下拉菜单中 + "Function": 解析一个Java项目 + }, "读Tex论文写摘要": { "Color": "stop", # 按钮颜色 "Function": 读文章写摘要 From 9540cf9448026a1c8135c750866b63d320909718 Mon Sep 17 00:00:00 2001 From: RongkangXiong Date: Sun, 2 Apr 2023 03:07:21 +0800 Subject: [PATCH 14/15] =?UTF-8?q?add=20crazy=5Ffunctions=20=E8=A7=A3?= =?UTF-8?q?=E6=9E=90=E4=B8=80=E4=B8=AARect=E9=A1=B9=E7=9B=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/解析项目源代码.py | 115 +++++++++++++++++++----------- functional_crazy.py | 6 ++ 2 files changed, 81 insertions(+), 40 deletions(-) diff --git a/crazy_functions/解析项目源代码.py b/crazy_functions/解析项目源代码.py index 742c5ce..713b09d 100644 --- a/crazy_functions/解析项目源代码.py +++ b/crazy_functions/解析项目源代码.py @@ -1,7 +1,9 @@ from predict import predict_no_ui from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down + fast_debug = False + def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt): import time, glob, os print('begin analysis on:', file_manifest) @@ -9,20 +11,22 @@ def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, with open(fp, 'r', encoding='utf-8') as f: file_content = f.read() - prefix = "接下来请你逐文件分析下面的工程" if index==0 else "" + prefix = "接下来请你逐文件分析下面的工程" if index == 0 else "" i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```' i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) yield chatbot, history, '正常' - if not fast_debug: + if not fast_debug: msg = '正常' # ** gpt request ** - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, + history=[]) # 带超时倒计时 chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) + history.append(i_say_show_user); + history.append(gpt_say) yield chatbot, history, msg if not fast_debug: time.sleep(2) @@ -31,45 +35,48 @@ def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, chatbot.append((i_say, "[Local Message] waiting gpt response.")) yield chatbot, history, '正常' - if not fast_debug: + if not fast_debug: msg = '正常' # ** gpt request ** - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时 - + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, + history=history) # 带超时倒计时 + chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) + history.append(i_say); + history.append(gpt_say) yield chatbot, history, msg res = write_results_to_file(history) chatbot.append(("完成了吗?", res)) yield chatbot, history, msg - - @CatchException def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 + history = [] # 清空历史,以免输入溢出 import time, glob, os file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \ - [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + [f for f in glob.glob('./crazy_functions/*.py') if + ('test_project' not in f) and ('gpt_log' not in f)] for index, fp in enumerate(file_manifest): # if 'test_project' in fp: continue with open(fp, 'r', encoding='utf-8') as f: file_content = f.read() - prefix = "接下来请你分析自己的程序构成,别紧张," if index==0 else "" + prefix = "接下来请你分析自己的程序构成,别紧张," if index == 0 else "" i_say = prefix + f'请对下面的程序文件做一个概述文件名是{fp},文件代码是 ```{file_content}```' i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) yield chatbot, history, '正常' - if not fast_debug: + if not fast_debug: # ** gpt request ** # gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature) - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], long_connection=True) # 带超时倒计时 + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, + history=[], long_connection=True) # 带超时倒计时 chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) + history.append(i_say_show_user); + history.append(gpt_say) yield chatbot, history, '正常' time.sleep(2) @@ -77,32 +84,35 @@ def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTx chatbot.append((i_say, "[Local Message] waiting gpt response.")) yield chatbot, history, '正常' - if not fast_debug: + if not fast_debug: # ** gpt request ** # gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history) - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history, long_connection=True) # 带超时倒计时 + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history, + long_connection=True) # 带超时倒计时 chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) + history.append(i_say); + history.append(gpt_say) yield chatbot, history, '正常' res = write_results_to_file(history) chatbot.append(("完成了吗?", res)) yield chatbot, history, '正常' + @CatchException def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 + history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield chatbot, history, '正常' return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何python文件: {txt}") yield chatbot, history, '正常' return yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) @@ -110,40 +120,41 @@ def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPr @CatchException def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 + history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield chatbot, history, '正常' return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] # + \ + # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ + # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.h头文件: {txt}") yield chatbot, history, '正常' return yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) + @CatchException def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 + history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield chatbot, history, '正常' return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.h头文件: {txt}") yield chatbot, history, '正常' return yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) @@ -151,39 +162,63 @@ def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptT @CatchException def 解析一个Java项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 + history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield chatbot, history, '正常' return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \ + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.jar', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何java头文件: {txt}") + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}") yield chatbot, history, '正常' return yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) + @CatchException -def 解析一个Golang项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 +def 解析一个Rect项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): + history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + yield chatbot, history, '正常' + return + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \ + [f for f in glob.glob(f'{project_folder}/**/*.tsx', recursive=True)] + \ + [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \ + [f for f in glob.glob(f'{project_folder}/**/*.js', recursive=True)] + \ + [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)] + if len(file_manifest) == 0: + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何Rect文件: {txt}") + yield chatbot, history, '正常' + return + yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) + + +@CatchException +def 解析一个Golang项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): + history = [] # 清空历史,以免输入溢出 + import glob, os + if os.path.exists(txt): + project_folder = txt + else: + if txt == "": txt = '空空如也的输入栏' + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") yield chatbot, history, '正常' return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何golang文件: {txt}") + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}") yield chatbot, history, '正常' return yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) diff --git a/functional_crazy.py b/functional_crazy.py index 57d1350..fa62881 100644 --- a/functional_crazy.py +++ b/functional_crazy.py @@ -16,6 +16,7 @@ def get_crazy_functionals(): from crazy_functions.解析项目源代码 import 解析一个C项目 from crazy_functions.解析项目源代码 import 解析一个Golang项目 from crazy_functions.解析项目源代码 import 解析一个Java项目 + from crazy_functions.解析项目源代码 import 解析一个Rect项目 from crazy_functions.高级功能函数模板 import 高阶功能模板函数 from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文 @@ -47,6 +48,11 @@ def get_crazy_functionals(): "AsButton": False, # 加入下拉菜单中 "Function": 解析一个Java项目 }, + "解析整个Java项目": { + "Color": "stop", # 按钮颜色 + "AsButton": False, # 加入下拉菜单中 + "Function": 解析一个Rect项目 + }, "读Tex论文写摘要": { "Color": "stop", # 按钮颜色 "Function": 读文章写摘要 From 3387b5acb0dd17d088b74c9a728377b2e69b3475 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sun, 2 Apr 2023 15:33:09 +0800 Subject: [PATCH 15/15] =?UTF-8?q?=E6=B7=BB=E5=8A=A0Golang=E3=80=81Java?= =?UTF-8?q?=E7=AD=89=E9=A1=B9=E7=9B=AE=E7=9A=84=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 12 ++-- crazy_functions/下载arxiv论文翻译摘要.py | 7 +-- crazy_functions/解析项目源代码.py | 75 ++++++++++-------------- functional_crazy.py | 60 ++++++++++--------- main.py | 7 +-- predict.py | 11 +--- 6 files changed, 78 insertions(+), 94 deletions(-) diff --git a/README.md b/README.md index d4ba8a4..96a9cb8 100644 --- a/README.md +++ b/README.md @@ -36,16 +36,14 @@ https://github.com/polarwinkel/mdtex2html 自定义快捷键 | 支持自定义快捷键 配置代理服务器 | 支持配置代理服务器 模块化设计 | 支持自定义高阶的实验性功能 -自我程序剖析 | [函数插件] 一键读懂本项目的源代码 -程序剖析 | [函数插件] 一键可以剖析其他Python/C++等项目 -读论文 | [函数插件] 一键解读latex论文全文并生成摘要 -arxiv小助手 | [函数插件] 输入url一键翻译摘要+下载论文 -批量注释生成 | [函数插件] 一键批量生成函数注释 -chat分析报告生成 | [函数插件] 运行后自动生成总结汇报 +自我程序剖析 | [实验性功能] 一键读懂本项目的源代码 +程序剖析 | [实验性功能] 一键可以剖析其他Python/C++项目 +读论文 | [实验性功能] 一键解读latex论文全文并生成摘要 +批量注释生成 | [实验性功能] 一键批量生成函数注释 +chat分析报告生成 | [实验性功能] 运行后自动生成总结汇报 公式显示 | 可以同时显示公式的tex形式和渲染形式 图片显示 | 可以在markdown中显示图片 支持GPT输出的markdown表格 | 可以输出支持GPT的markdown表格 -本地大语言模型接口 | 借助[TGUI](https://github.com/oobabooga/text-generation-webui)接入galactica等本地语言模型 …… | …… diff --git a/crazy_functions/下载arxiv论文翻译摘要.py b/crazy_functions/下载arxiv论文翻译摘要.py index fbe726f..38f00c4 100644 --- a/crazy_functions/下载arxiv论文翻译摘要.py +++ b/crazy_functions/下载arxiv论文翻译摘要.py @@ -134,8 +134,7 @@ def get_name(_url_): @CatchException def 下载arxiv论文并翻译摘要(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,作者 binary-husky。正在提取摘要并下载PDF文档……" - raise RuntimeError() + CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……" import glob import os @@ -180,8 +179,8 @@ def 下载arxiv论文并翻译摘要(txt, top_p, temperature, chatbot, history, # 写入文件 import shutil # 重置文件的创建时间 - shutil.copyfile(pdf_path, pdf_path.replace('.pdf', '.autodownload.pdf')); os.remove(pdf_path) + shutil.copyfile(pdf_path, f'./gpt_log/{os.path.basename(pdf_path)}'); os.remove(pdf_path) res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) + chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载")) yield chatbot, history, msg diff --git a/crazy_functions/解析项目源代码.py b/crazy_functions/解析项目源代码.py index 713b09d..172be24 100644 --- a/crazy_functions/解析项目源代码.py +++ b/crazy_functions/解析项目源代码.py @@ -1,9 +1,7 @@ from predict import predict_no_ui from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down - fast_debug = False - def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt): import time, glob, os print('begin analysis on:', file_manifest) @@ -11,22 +9,20 @@ def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, with open(fp, 'r', encoding='utf-8') as f: file_content = f.read() - prefix = "接下来请你逐文件分析下面的工程" if index == 0 else "" + prefix = "接下来请你逐文件分析下面的工程" if index==0 else "" i_say = prefix + f'请对下面的程序文件做一个概述文件名是{os.path.relpath(fp, project_folder)},文件代码是 ```{file_content}```' i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) yield chatbot, history, '正常' - if not fast_debug: + if not fast_debug: msg = '正常' # ** gpt request ** - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, - history=[]) # 带超时倒计时 + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[]) # 带超时倒计时 chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); - history.append(gpt_say) + history.append(i_say_show_user); history.append(gpt_say) yield chatbot, history, msg if not fast_debug: time.sleep(2) @@ -35,48 +31,45 @@ def 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, chatbot.append((i_say, "[Local Message] waiting gpt response.")) yield chatbot, history, '正常' - if not fast_debug: + if not fast_debug: msg = '正常' # ** gpt request ** - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, - history=history) # 带超时倒计时 - + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history) # 带超时倒计时 + chatbot[-1] = (i_say, gpt_say) - history.append(i_say); - history.append(gpt_say) + history.append(i_say); history.append(gpt_say) yield chatbot, history, msg res = write_results_to_file(history) chatbot.append(("完成了吗?", res)) yield chatbot, history, msg + + @CatchException def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 + history = [] # 清空历史,以免输入溢出 import time, glob, os file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \ - [f for f in glob.glob('./crazy_functions/*.py') if - ('test_project' not in f) and ('gpt_log' not in f)] + [f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)] for index, fp in enumerate(file_manifest): # if 'test_project' in fp: continue with open(fp, 'r', encoding='utf-8') as f: file_content = f.read() - prefix = "接下来请你分析自己的程序构成,别紧张," if index == 0 else "" + prefix = "接下来请你分析自己的程序构成,别紧张," if index==0 else "" i_say = prefix + f'请对下面的程序文件做一个概述文件名是{fp},文件代码是 ```{file_content}```' i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的程序文件做一个概述: {os.path.abspath(fp)}' chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) yield chatbot, history, '正常' - if not fast_debug: + if not fast_debug: # ** gpt request ** # gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature) - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, - history=[], long_connection=True) # 带超时倒计时 + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temperature, history=[], long_connection=True) # 带超时倒计时 chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); - history.append(gpt_say) + history.append(i_say_show_user); history.append(gpt_say) yield chatbot, history, '正常' time.sleep(2) @@ -84,35 +77,32 @@ def 解析项目本身(txt, top_p, temperature, chatbot, history, systemPromptTx chatbot.append((i_say, "[Local Message] waiting gpt response.")) yield chatbot, history, '正常' - if not fast_debug: + if not fast_debug: # ** gpt request ** # gpt_say = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history) - gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history, - long_connection=True) # 带超时倒计时 + gpt_say = yield from predict_no_ui_but_counting_down(i_say, i_say, chatbot, top_p, temperature, history=history, long_connection=True) # 带超时倒计时 chatbot[-1] = (i_say, gpt_say) - history.append(i_say); - history.append(gpt_say) + history.append(i_say); history.append(gpt_say) yield chatbot, history, '正常' res = write_results_to_file(history) chatbot.append(("完成了吗?", res)) yield chatbot, history, '正常' - @CatchException def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 + history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield chatbot, history, '正常' return file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何python文件: {txt}") + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}") yield chatbot, history, '正常' return yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) @@ -120,41 +110,40 @@ def 解析一个Python项目(txt, top_p, temperature, chatbot, history, systemPr @CatchException def 解析一个C项目的头文件(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 + history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield chatbot, history, '正常' return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] # + \ + # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ + # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.h头文件: {txt}") + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") yield chatbot, history, '正常' return yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) - @CatchException def 解析一个C项目(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT): - history = [] # 清空历史,以免输入溢出 + history = [] # 清空历史,以免输入溢出 import glob, os if os.path.exists(txt): project_folder = txt else: if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") yield chatbot, history, '正常' return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ + file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] if len(file_manifest) == 0: - report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.h头文件: {txt}") + report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}") yield chatbot, history, '正常' return yield from 解析源代码(file_manifest, project_folder, top_p, temperature, chatbot, history, systemPromptTxt) diff --git a/functional_crazy.py b/functional_crazy.py index fa62881..e04a342 100644 --- a/functional_crazy.py +++ b/functional_crazy.py @@ -1,13 +1,8 @@ from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效 -# UserVisibleLevel是过滤器参数。 -# 由于UI界面空间有限,所以通过这种方式决定UI界面中显示哪些插件 -# 默认函数插件 VisibleLevel 是 0 -# 当 UserVisibleLevel >= 函数插件的 VisibleLevel 时,该函数插件才会被显示出来 -UserVisibleLevel = 1 - - def get_crazy_functionals(): + ###################### 第一组插件 ########################### + # [第一组插件]: 最早期编写的项目插件和一些demo from crazy_functions.读文章写摘要 import 读文章写摘要 from crazy_functions.生成函数注释 import 批量生成函数注释 from crazy_functions.解析项目源代码 import 解析项目本身 @@ -70,33 +65,44 @@ def get_crazy_functionals(): "Function": HotReload(高阶功能模板函数) }, } + ###################### 第二组插件 ########################### + # [第二组插件]: 经过充分测试,但功能上距离达到完美状态还差一点点 + from crazy_functions.批量总结PDF文档 import 批量总结PDF文档 + from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer + from crazy_functions.总结word文档 import 总结word文档 + function_plugins.update({ + "[仅供开发调试] 批量总结PDF文档": { + "Color": "stop", + "Function": HotReload(批量总结PDF文档) # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效 + }, + "[仅供开发调试] 批量总结PDF文档pdfminer": { + "Color": "stop", + "AsButton": False, # 加入下拉菜单中 + "Function": HotReload(批量总结PDF文档pdfminer) + }, + "[仅供开发调试] 批量总结Word文档": { + "Color": "stop", + "Function": HotReload(总结word文档) + }, + }) - # VisibleLevel=1 经过测试,但功能上距离达到完美状态还差一点点 - if UserVisibleLevel >= 1: - from crazy_functions.批量总结PDF文档 import 批量总结PDF文档 - from crazy_functions.批量总结PDF文档pdfminer import 批量总结PDF文档pdfminer - from crazy_functions.总结word文档 import 总结word文档 + ###################### 第三组插件 ########################### + # [第三组插件]: 尚未充分测试的函数插件,放在这里 + try: + from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要 function_plugins.update({ - "[仅供开发调试] 批量总结PDF文档": { - "Color": "stop", - "Function": HotReload(批量总结PDF文档) # HotReload 的意思是热更新,修改函数插件代码后,不需要重启程序,代码直接生效 - }, - "[仅供开发调试] 批量总结PDF文档pdfminer": { + "下载arxiv论文并翻译摘要": { "Color": "stop", "AsButton": False, # 加入下拉菜单中 - "Function": HotReload(批量总结PDF文档pdfminer) - }, - "[仅供开发调试] 批量总结Word文档": { - "Color": "stop", - "Function": HotReload(总结word文档) - }, + "Function": HotReload(下载arxiv论文并翻译摘要) + } }) + except Exception as err: + print(f'[下载arxiv论文并翻译摘要] 插件导入失败 {str(err)}') - # VisibleLevel=2 尚未充分测试的函数插件,放在这里 - if UserVisibleLevel >= 2: - function_plugins.update({ - }) + + ###################### 第n组插件 ########################### return function_plugins diff --git a/main.py b/main.py index 0016d9c..3de76a7 100644 --- a/main.py +++ b/main.py @@ -11,9 +11,8 @@ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT = PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT if not AUTHENTICATION: AUTHENTICATION = None -title = "ChatGPT 学术优化" if LLM_MODEL.startswith('gpt') else "ChatGPT / LLM 学术优化" initial_prompt = "Serve me as a writing and programming assistant." -title_html = f"

{title}

" +title_html = """

ChatGPT 学术优化

""" # 问询记录, python 版本建议3.9+(越新越好) import logging @@ -120,7 +119,7 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False, css=advanced_css) as de dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] ) # 随变按钮的回调函数注册 def route(k, *args, **kwargs): - if k in [r"打开插件列表", r"先从插件列表中选择"]: return + if k in [r"打开插件列表", r"请先从插件列表中选择"]: return yield from crazy_fns[k]["Function"](*args, **kwargs) click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo) click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot]) @@ -141,5 +140,5 @@ def auto_opentab_delay(): threading.Thread(target=open, name="open-browser", daemon=True).start() auto_opentab_delay() -demo.title = title +demo.title = "ChatGPT 学术优化" demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=True, server_port=PORT, auth=AUTHENTICATION) diff --git a/predict.py b/predict.py index 1310d3f..31a5861 100644 --- a/predict.py +++ b/predict.py @@ -112,7 +112,8 @@ def predict_no_ui_long_connection(inputs, top_p, temperature, history=[], sys_pr return result -def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='', stream = True, additional_fn=None): +def predict(inputs, top_p, temperature, chatbot=[], history=[], system_prompt='', + stream = True, additional_fn=None): """ 发送至chatGPT,流式获取输出。 用于基础的对话功能。 @@ -243,11 +244,3 @@ def generate_payload(inputs, top_p, temperature, history, system_prompt, stream) return headers,payload -if not LLM_MODEL.startswith('gpt'): - # 函数重载到另一个文件 - from request_llm.bridge_tgui import predict_tgui, predict_tgui_no_ui - predict = predict_tgui - predict_no_ui = predict_tgui_no_ui - predict_no_ui_long_connection = predict_tgui_no_ui - - \ No newline at end of file