把上传文件路径和日志路径修改为统一可配置的变量
This commit is contained in:
parent
14de282302
commit
ec9d030457
@ -179,7 +179,10 @@ GROBID_URLS = [
|
|||||||
|
|
||||||
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
|
# 是否允许通过自然语言描述修改本页的配置,该功能具有一定的危险性,默认关闭
|
||||||
ALLOW_RESET_CONFIG = False
|
ALLOW_RESET_CONFIG = False
|
||||||
|
# 临时的上传文件夹位置,请勿修改
|
||||||
|
PATH_PRIVATE_UPLOAD = "private_upload"
|
||||||
|
# 日志文件夹的位置,请勿修改
|
||||||
|
PATH_LOGGING = "gpt_log"
|
||||||
|
|
||||||
"""
|
"""
|
||||||
在线大模型配置关联关系示意图
|
在线大模型配置关联关系示意图
|
||||||
|
@ -13,7 +13,6 @@ def get_crazy_functions():
|
|||||||
from crazy_functions.解析项目源代码 import 解析一个Java项目
|
from crazy_functions.解析项目源代码 import 解析一个Java项目
|
||||||
from crazy_functions.解析项目源代码 import 解析一个前端项目
|
from crazy_functions.解析项目源代码 import 解析一个前端项目
|
||||||
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
from crazy_functions.高级功能函数模板 import 高阶功能模板函数
|
||||||
from crazy_functions.代码重写为全英文_多线程 import 全项目切换英文
|
|
||||||
from crazy_functions.Latex全文润色 import Latex英文润色
|
from crazy_functions.Latex全文润色 import Latex英文润色
|
||||||
from crazy_functions.询问多个大语言模型 import 同时问询
|
from crazy_functions.询问多个大语言模型 import 同时问询
|
||||||
from crazy_functions.解析项目源代码 import 解析一个Lua项目
|
from crazy_functions.解析项目源代码 import 解析一个Lua项目
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from collections.abc import Callable, Iterable, Mapping
|
from collections.abc import Callable, Iterable, Mapping
|
||||||
from typing import Any
|
from typing import Any
|
||||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, promote_file_to_downloadzone, clear_file_downloadzone
|
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc
|
||||||
|
from toolbox import promote_file_to_downloadzone, get_log_folder
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from .crazy_utils import input_clipping, try_install_deps
|
from .crazy_utils import input_clipping, try_install_deps
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
@ -92,7 +93,7 @@ def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
|
|||||||
|
|
||||||
def make_module(code):
|
def make_module(code):
|
||||||
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
||||||
with open(f'gpt_log/{module_file}.py', 'w', encoding='utf8') as f:
|
with open(f'{get_log_folder()}/{module_file}.py', 'w', encoding='utf8') as f:
|
||||||
f.write(code)
|
f.write(code)
|
||||||
|
|
||||||
def get_class_name(class_string):
|
def get_class_name(class_string):
|
||||||
@ -102,7 +103,7 @@ def make_module(code):
|
|||||||
return class_name
|
return class_name
|
||||||
|
|
||||||
class_name = get_class_name(code)
|
class_name = get_class_name(code)
|
||||||
return f"gpt_log.{module_file}->{class_name}"
|
return f"{get_log_folder().replace('/', '.')}.{module_file}->{class_name}"
|
||||||
|
|
||||||
def init_module_instance(module):
|
def init_module_instance(module):
|
||||||
import importlib
|
import importlib
|
||||||
@ -171,7 +172,7 @@ def 虚空终端CodeInterpreter(txt, llm_kwargs, plugin_kwargs, chatbot, history
|
|||||||
file_type = file_path.split('.')[-1]
|
file_type = file_path.split('.')[-1]
|
||||||
|
|
||||||
# 粗心检查
|
# 粗心检查
|
||||||
if 'private_upload' in txt:
|
if is_the_upload_folder(txt):
|
||||||
chatbot.append([
|
chatbot.append([
|
||||||
"...",
|
"...",
|
||||||
f"请在输入框内填写需求,然后再次点击该插件(文件路径 {file_path} 已经被记忆)"
|
f"请在输入框内填写需求,然后再次点击该插件(文件路径 {file_path} 已经被记忆)"
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui, trimmed_format_exc
|
from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder
|
||||||
from toolbox import CatchException, report_execption, write_results_to_file, zip_folder
|
from toolbox import CatchException, report_execption, write_history_to_file, zip_folder
|
||||||
|
|
||||||
|
|
||||||
class PaperFileGroup():
|
class PaperFileGroup():
|
||||||
@ -51,7 +51,7 @@ class PaperFileGroup():
|
|||||||
import os, time
|
import os, time
|
||||||
folder = os.path.dirname(self.file_paths[0])
|
folder = os.path.dirname(self.file_paths[0])
|
||||||
t = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
t = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||||
zip_folder(folder, './gpt_log/', f'{t}-polished.zip')
|
zip_folder(folder, get_log_folder(), f'{t}-polished.zip')
|
||||||
|
|
||||||
|
|
||||||
def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='polish'):
|
def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='polish'):
|
||||||
@ -126,7 +126,9 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
|
|
||||||
# <-------- 整理结果,退出 ---------->
|
# <-------- 整理结果,退出 ---------->
|
||||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
res = write_history_to_file(gpt_response_collection, file_basename=create_report_file_name)
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
|
|
||||||
history = gpt_response_collection
|
history = gpt_response_collection
|
||||||
chatbot.append((f"{fp}完成了吗?", res))
|
chatbot.append((f"{fp}完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui, promote_file_to_downloadzone
|
||||||
from toolbox import CatchException, report_execption, write_results_to_file
|
from toolbox import CatchException, report_execption, write_history_to_file
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
|
|
||||||
class PaperFileGroup():
|
class PaperFileGroup():
|
||||||
@ -95,7 +95,8 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|||||||
|
|
||||||
# <-------- 整理结果,退出 ---------->
|
# <-------- 整理结果,退出 ---------->
|
||||||
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
|
||||||
res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
|
res = write_history_to_file(gpt_response_collection, create_report_file_name)
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
history = gpt_response_collection
|
history = gpt_response_collection
|
||||||
chatbot.append((f"{fp}完成了吗?", res))
|
chatbot.append((f"{fp}完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from toolbox import update_ui, trimmed_format_exc, get_conf, objdump, objload, promote_file_to_downloadzone
|
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
||||||
from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str
|
from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str
|
||||||
from functools import partial
|
from functools import partial
|
||||||
import glob, os, requests, time
|
import glob, os, requests, time
|
||||||
@ -65,7 +65,7 @@ def move_project(project_folder, arxiv_id=None):
|
|||||||
if arxiv_id is not None:
|
if arxiv_id is not None:
|
||||||
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
|
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
|
||||||
else:
|
else:
|
||||||
new_workfolder = f'gpt_log/{gen_time_str()}'
|
new_workfolder = f'{get_log_folder()}/{gen_time_str()}'
|
||||||
try:
|
try:
|
||||||
shutil.rmtree(new_workfolder)
|
shutil.rmtree(new_workfolder)
|
||||||
except:
|
except:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from toolbox import update_ui, update_ui_lastest_msg # 刷新Gradio前端界面
|
from toolbox import update_ui, update_ui_lastest_msg, get_log_folder
|
||||||
from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone
|
from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone
|
||||||
from .latex_toolbox import PRESERVE, TRANSFORM
|
from .latex_toolbox import PRESERVE, TRANSFORM
|
||||||
from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
|
||||||
@ -439,9 +439,9 @@ def write_html(sp_file_contents, sp_file_result, chatbot, project_folder):
|
|||||||
trans = k
|
trans = k
|
||||||
ch.add_row(a=orig, b=trans)
|
ch.add_row(a=orig, b=trans)
|
||||||
create_report_file_name = f"{gen_time_str()}.trans.html"
|
create_report_file_name = f"{gen_time_str()}.trans.html"
|
||||||
ch.save_file(create_report_file_name)
|
res = ch.save_file(create_report_file_name)
|
||||||
shutil.copyfile(pj('./gpt_log/', create_report_file_name), pj(project_folder, create_report_file_name))
|
shutil.copyfile(res, pj(project_folder, create_report_file_name))
|
||||||
promote_file_to_downloadzone(file=f'./gpt_log/{create_report_file_name}', chatbot=chatbot)
|
promote_file_to_downloadzone(file=res, chatbot=chatbot)
|
||||||
except:
|
except:
|
||||||
from toolbox import trimmed_format_exc
|
from toolbox import trimmed_format_exc
|
||||||
print('writing html result failed:', trimmed_format_exc())
|
print('writing html result failed:', trimmed_format_exc())
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui, get_log_folder
|
||||||
from toolbox import CatchException, report_execption, write_results_to_file, get_conf
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
|
from toolbox import CatchException, report_execption, get_conf
|
||||||
import re, requests, unicodedata, os
|
import re, requests, unicodedata, os
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
def download_arxiv_(url_pdf):
|
def download_arxiv_(url_pdf):
|
||||||
@ -28,7 +29,7 @@ def download_arxiv_(url_pdf):
|
|||||||
if k in other_info['comment']:
|
if k in other_info['comment']:
|
||||||
title = k + ' ' + title
|
title = k + ' ' + title
|
||||||
|
|
||||||
download_dir = './gpt_log/arxiv/'
|
download_dir = get_log_folder(plugin_name='arxiv')
|
||||||
os.makedirs(download_dir, exist_ok=True)
|
os.makedirs(download_dir, exist_ok=True)
|
||||||
|
|
||||||
title_str = title.replace('?', '?')\
|
title_str = title.replace('?', '?')\
|
||||||
@ -40,9 +41,6 @@ def download_arxiv_(url_pdf):
|
|||||||
|
|
||||||
requests_pdf_url = url_pdf
|
requests_pdf_url = url_pdf
|
||||||
file_path = download_dir+title_str
|
file_path = download_dir+title_str
|
||||||
# if os.path.exists(file_path):
|
|
||||||
# print('返回缓存文件')
|
|
||||||
# return './gpt_log/arxiv/'+title_str
|
|
||||||
|
|
||||||
print('下载中')
|
print('下载中')
|
||||||
proxies, = get_conf('proxies')
|
proxies, = get_conf('proxies')
|
||||||
@ -61,7 +59,7 @@ def download_arxiv_(url_pdf):
|
|||||||
.replace('\n', '')\
|
.replace('\n', '')\
|
||||||
.replace(' ', ' ')\
|
.replace(' ', ' ')\
|
||||||
.replace(' ', ' ')
|
.replace(' ', ' ')
|
||||||
return './gpt_log/arxiv/'+title_str, other_info
|
return file_path, other_info
|
||||||
|
|
||||||
|
|
||||||
def get_name(_url_):
|
def get_name(_url_):
|
||||||
@ -184,11 +182,10 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi
|
|||||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||||
history.append(i_say_show_user); history.append(gpt_say)
|
history.append(i_say_show_user); history.append(gpt_say)
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||||
# 写入文件
|
res = write_history_to_file(history)
|
||||||
import shutil
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
# 重置文件的创建时间
|
promote_file_to_downloadzone(pdf_path, chatbot=chatbot)
|
||||||
shutil.copyfile(pdf_path, f'./gpt_log/{os.path.basename(pdf_path)}'); os.remove(pdf_path)
|
|
||||||
res = write_results_to_file(history)
|
|
||||||
chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载"))
|
chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载"))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||||
|
|
||||||
|
@ -1,138 +0,0 @@
|
|||||||
import threading
|
|
||||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
|
||||||
from toolbox import update_ui
|
|
||||||
from toolbox import CatchException, write_results_to_file, report_execption
|
|
||||||
from .crazy_utils import breakdown_txt_to_satisfy_token_limit
|
|
||||||
|
|
||||||
def extract_code_block_carefully(txt):
|
|
||||||
splitted = txt.split('```')
|
|
||||||
n_code_block_seg = len(splitted) - 1
|
|
||||||
if n_code_block_seg <= 1: return txt
|
|
||||||
# 剩下的情况都开头除去 ``` 结尾除去一次 ```
|
|
||||||
txt_out = '```'.join(splitted[1:-1])
|
|
||||||
return txt_out
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def break_txt_into_half_at_some_linebreak(txt):
|
|
||||||
lines = txt.split('\n')
|
|
||||||
n_lines = len(lines)
|
|
||||||
pre = lines[:(n_lines//2)]
|
|
||||||
post = lines[(n_lines//2):]
|
|
||||||
return "\n".join(pre), "\n".join(post)
|
|
||||||
|
|
||||||
|
|
||||||
@CatchException
|
|
||||||
def 全项目切换英文(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys_prompt, web_port):
|
|
||||||
# 第1步:清空历史,以免输入溢出
|
|
||||||
history = []
|
|
||||||
|
|
||||||
# 第2步:尝试导入依赖,如果缺少依赖,则给出安装建议
|
|
||||||
try:
|
|
||||||
import tiktoken
|
|
||||||
except:
|
|
||||||
report_execption(chatbot, history,
|
|
||||||
a = f"解析项目: {txt}",
|
|
||||||
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
return
|
|
||||||
|
|
||||||
# 第3步:集合文件
|
|
||||||
import time, glob, os, shutil, re
|
|
||||||
os.makedirs('gpt_log/generated_english_version', exist_ok=True)
|
|
||||||
os.makedirs('gpt_log/generated_english_version/crazy_functions', exist_ok=True)
|
|
||||||
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
|
||||||
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
|
||||||
# file_manifest = ['./toolbox.py']
|
|
||||||
i_say_show_user_buffer = []
|
|
||||||
|
|
||||||
# 第4步:随便显示点什么防止卡顿的感觉
|
|
||||||
for index, fp in enumerate(file_manifest):
|
|
||||||
# if 'test_project' in fp: continue
|
|
||||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
|
||||||
file_content = f.read()
|
|
||||||
i_say_show_user =f'[{index}/{len(file_manifest)}] 接下来请将以下代码中包含的所有中文转化为英文,只输出转化后的英文代码,请用代码块输出代码: {os.path.abspath(fp)}'
|
|
||||||
i_say_show_user_buffer.append(i_say_show_user)
|
|
||||||
chatbot.append((i_say_show_user, "[Local Message] 等待多线程操作,中间过程不予显示."))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
|
|
||||||
# 第5步:Token限制下的截断与处理
|
|
||||||
MAX_TOKEN = 3000
|
|
||||||
from request_llm.bridge_all import model_info
|
|
||||||
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
|
||||||
def get_token_fn(txt): return len(enc.encode(txt, disallowed_special=()))
|
|
||||||
|
|
||||||
|
|
||||||
# 第6步:任务函数
|
|
||||||
mutable_return = [None for _ in file_manifest]
|
|
||||||
observe_window = [[""] for _ in file_manifest]
|
|
||||||
def thread_worker(fp,index):
|
|
||||||
if index > 10:
|
|
||||||
time.sleep(60)
|
|
||||||
print('Openai 限制免费用户每分钟20次请求,降低请求频率中。')
|
|
||||||
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
|
||||||
file_content = f.read()
|
|
||||||
i_say_template = lambda fp, file_content: f'接下来请将以下代码中包含的所有中文转化为英文,只输出代码,文件名是{fp},文件代码是 ```{file_content}```'
|
|
||||||
try:
|
|
||||||
gpt_say = ""
|
|
||||||
# 分解代码文件
|
|
||||||
file_content_breakdown = breakdown_txt_to_satisfy_token_limit(file_content, get_token_fn, MAX_TOKEN)
|
|
||||||
for file_content_partial in file_content_breakdown:
|
|
||||||
i_say = i_say_template(fp, file_content_partial)
|
|
||||||
# # ** gpt request **
|
|
||||||
gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=[], sys_prompt=sys_prompt, observe_window=observe_window[index])
|
|
||||||
gpt_say_partial = extract_code_block_carefully(gpt_say_partial)
|
|
||||||
gpt_say += gpt_say_partial
|
|
||||||
mutable_return[index] = gpt_say
|
|
||||||
except ConnectionAbortedError as token_exceed_err:
|
|
||||||
print('至少一个线程任务Token溢出而失败', e)
|
|
||||||
except Exception as e:
|
|
||||||
print('至少一个线程任务意外失败', e)
|
|
||||||
|
|
||||||
# 第7步:所有线程同时开始执行任务函数
|
|
||||||
handles = [threading.Thread(target=thread_worker, args=(fp,index)) for index, fp in enumerate(file_manifest)]
|
|
||||||
for h in handles:
|
|
||||||
h.daemon = True
|
|
||||||
h.start()
|
|
||||||
chatbot.append(('开始了吗?', f'多线程操作已经开始'))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
# 第8步:循环轮询各个线程是否执行完毕
|
|
||||||
cnt = 0
|
|
||||||
while True:
|
|
||||||
cnt += 1
|
|
||||||
time.sleep(0.2)
|
|
||||||
th_alive = [h.is_alive() for h in handles]
|
|
||||||
if not any(th_alive): break
|
|
||||||
# 更好的UI视觉效果
|
|
||||||
observe_win = []
|
|
||||||
for thread_index, alive in enumerate(th_alive):
|
|
||||||
observe_win.append("[ ..."+observe_window[thread_index][0][-60:].replace('\n','').replace('```','...').replace(' ','.').replace('<br/>','.....').replace('$','.')+"... ]")
|
|
||||||
stat = [f'执行中: {obs}\n\n' if alive else '已完成\n\n' for alive, obs in zip(th_alive, observe_win)]
|
|
||||||
stat_str = ''.join(stat)
|
|
||||||
chatbot[-1] = (chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt%10+1)))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
|
|
||||||
# 第9步:把结果写入文件
|
|
||||||
for index, h in enumerate(handles):
|
|
||||||
h.join() # 这里其实不需要join了,肯定已经都结束了
|
|
||||||
fp = file_manifest[index]
|
|
||||||
gpt_say = mutable_return[index]
|
|
||||||
i_say_show_user = i_say_show_user_buffer[index]
|
|
||||||
|
|
||||||
where_to_relocate = f'gpt_log/generated_english_version/{fp}'
|
|
||||||
if gpt_say is not None:
|
|
||||||
with open(where_to_relocate, 'w+', encoding='utf-8') as f:
|
|
||||||
f.write(gpt_say)
|
|
||||||
else: # 失败
|
|
||||||
shutil.copyfile(file_manifest[index], where_to_relocate)
|
|
||||||
chatbot.append((i_say_show_user, f'[Local Message] 已完成{os.path.abspath(fp)}的转化,\n\n存入{os.path.abspath(where_to_relocate)}'))
|
|
||||||
history.append(i_say_show_user); history.append(gpt_say)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# 第10步:备份一个文件
|
|
||||||
res = write_results_to_file(history)
|
|
||||||
chatbot.append(("生成一份任务执行报告", res))
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
@ -1,4 +1,4 @@
|
|||||||
from toolbox import CatchException, update_ui, get_conf, select_api_key
|
from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
@ -33,7 +33,7 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"):
|
|||||||
raise RuntimeError(response.content.decode())
|
raise RuntimeError(response.content.decode())
|
||||||
# 文件保存到本地
|
# 文件保存到本地
|
||||||
r = requests.get(image_url, proxies=proxies)
|
r = requests.get(image_url, proxies=proxies)
|
||||||
file_path = 'gpt_log/image_gen/'
|
file_path = f'{get_log_folder()}/image_gen/'
|
||||||
os.makedirs(file_path, exist_ok=True)
|
os.makedirs(file_path, exist_ok=True)
|
||||||
file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png'
|
file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png'
|
||||||
with open(file_path+file_name, 'wb+') as f: f.write(r.content)
|
with open(file_path+file_name, 'wb+') as f: f.write(r.content)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
from toolbox import CatchException, update_ui, promote_file_to_downloadzone
|
from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
import re
|
import re
|
||||||
|
|
||||||
@ -10,8 +10,8 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
|
|||||||
import time
|
import time
|
||||||
if file_name is None:
|
if file_name is None:
|
||||||
file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
|
file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
|
||||||
os.makedirs('./gpt_log/', exist_ok=True)
|
fp = os.path.join(get_log_folder(), file_name)
|
||||||
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
|
with open(fp, 'w', encoding='utf8') as f:
|
||||||
from themes.theme import advanced_css
|
from themes.theme import advanced_css
|
||||||
f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
|
f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
|
||||||
for i, contents in enumerate(chatbot):
|
for i, contents in enumerate(chatbot):
|
||||||
@ -29,8 +29,8 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
|
|||||||
for h in history:
|
for h in history:
|
||||||
f.write("\n>>>" + h)
|
f.write("\n>>>" + h)
|
||||||
f.write('</code>')
|
f.write('</code>')
|
||||||
promote_file_to_downloadzone(f'./gpt_log/{file_name}', rename_file=file_name, chatbot=chatbot)
|
promote_file_to_downloadzone(fp, rename_file=file_name, chatbot=chatbot)
|
||||||
return '对话历史写入:' + os.path.abspath(f'./gpt_log/{file_name}')
|
return '对话历史写入:' + os.path.abspath(fp)
|
||||||
|
|
||||||
def gen_file_preview(file_name):
|
def gen_file_preview(file_name):
|
||||||
try:
|
try:
|
||||||
@ -106,7 +106,7 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|||||||
if not success:
|
if not success:
|
||||||
if txt == "": txt = '空空如也的输入栏'
|
if txt == "": txt = '空空如也的输入栏'
|
||||||
import glob
|
import glob
|
||||||
local_history = "<br/>".join(["`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" for f in glob.glob(f'gpt_log/**/chatGPT对话历史*.html', recursive=True)])
|
local_history = "<br/>".join(["`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True)])
|
||||||
chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:<br/>{local_history}"])
|
chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:<br/>{local_history}"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
@ -132,8 +132,8 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import glob, os
|
import glob, os
|
||||||
local_history = "<br/>".join(["`"+hide_cwd(f)+"`" for f in glob.glob(f'gpt_log/**/chatGPT对话历史*.html', recursive=True)])
|
local_history = "<br/>".join(["`"+hide_cwd(f)+"`" for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True)])
|
||||||
for f in glob.glob(f'gpt_log/**/chatGPT对话历史*.html', recursive=True):
|
for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True):
|
||||||
os.remove(f)
|
os.remove(f)
|
||||||
chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
|
chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption, write_results_to_file
|
from toolbox import CatchException, report_execption
|
||||||
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
|
|
||||||
@ -71,11 +72,13 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
|
|||||||
history.extend([i_say,gpt_say])
|
history.extend([i_say,gpt_say])
|
||||||
this_paper_history.extend([i_say,gpt_say])
|
this_paper_history.extend([i_say,gpt_say])
|
||||||
|
|
||||||
res = write_results_to_file(history)
|
res = write_history_to_file(history)
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
chatbot.append(("完成了吗?", res))
|
chatbot.append(("完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
res = write_results_to_file(history)
|
res = write_history_to_file(history)
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
chatbot.append(("所有文件都总结完成了吗?", res))
|
chatbot.append(("所有文件都总结完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file, get_conf
|
from toolbox import CatchException, report_execption, select_api_key, update_ui, get_conf
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder
|
||||||
|
|
||||||
def split_audio_file(filename, split_duration=1000):
|
def split_audio_file(filename, split_duration=1000):
|
||||||
"""
|
"""
|
||||||
@ -15,7 +16,7 @@ def split_audio_file(filename, split_duration=1000):
|
|||||||
"""
|
"""
|
||||||
from moviepy.editor import AudioFileClip
|
from moviepy.editor import AudioFileClip
|
||||||
import os
|
import os
|
||||||
os.makedirs('gpt_log/mp3/cut/', exist_ok=True) # 创建存储切割音频的文件夹
|
os.makedirs(f"{get_log_folder(plugin_name='audio')}/mp3/cut/", exist_ok=True) # 创建存储切割音频的文件夹
|
||||||
|
|
||||||
# 读取音频文件
|
# 读取音频文件
|
||||||
audio = AudioFileClip(filename)
|
audio = AudioFileClip(filename)
|
||||||
@ -31,8 +32,8 @@ def split_audio_file(filename, split_duration=1000):
|
|||||||
start_time = split_points[i]
|
start_time = split_points[i]
|
||||||
end_time = split_points[i + 1]
|
end_time = split_points[i + 1]
|
||||||
split_audio = audio.subclip(start_time, end_time)
|
split_audio = audio.subclip(start_time, end_time)
|
||||||
split_audio.write_audiofile(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3")
|
split_audio.write_audiofile(f"{get_log_folder(plugin_name='audio')}/mp3/cut/{filename[0]}_{i}.mp3")
|
||||||
filelist.append(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3")
|
filelist.append(f"{get_log_folder(plugin_name='audio')}/mp3/cut/{filename[0]}_{i}.mp3")
|
||||||
|
|
||||||
audio.close()
|
audio.close()
|
||||||
return filelist
|
return filelist
|
||||||
@ -52,7 +53,7 @@ def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
|
|||||||
'Authorization': f"Bearer {api_key}"
|
'Authorization': f"Bearer {api_key}"
|
||||||
}
|
}
|
||||||
|
|
||||||
os.makedirs('gpt_log/mp3/', exist_ok=True)
|
os.makedirs(f"{get_log_folder(plugin_name='audio')}/mp3/", exist_ok=True)
|
||||||
for index, fp in enumerate(file_manifest):
|
for index, fp in enumerate(file_manifest):
|
||||||
audio_history = []
|
audio_history = []
|
||||||
# 提取文件扩展名
|
# 提取文件扩展名
|
||||||
@ -60,8 +61,8 @@ def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
|
|||||||
# 提取视频中的音频
|
# 提取视频中的音频
|
||||||
if ext not in [".mp3", ".wav", ".m4a", ".mpga"]:
|
if ext not in [".mp3", ".wav", ".m4a", ".mpga"]:
|
||||||
audio_clip = AudioFileClip(fp)
|
audio_clip = AudioFileClip(fp)
|
||||||
audio_clip.write_audiofile(f'gpt_log/mp3/output{index}.mp3')
|
audio_clip.write_audiofile(f"{get_log_folder(plugin_name='audio')}/mp3/output{index}.mp3")
|
||||||
fp = f'gpt_log/mp3/output{index}.mp3'
|
fp = f"{get_log_folder(plugin_name='audio')}/mp3/output{index}.mp3"
|
||||||
# 调用whisper模型音频转文字
|
# 调用whisper模型音频转文字
|
||||||
voice = split_audio_file(fp)
|
voice = split_audio_file(fp)
|
||||||
for j, i in enumerate(voice):
|
for j, i in enumerate(voice):
|
||||||
@ -113,18 +114,19 @@ def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
|
|||||||
history=audio_history,
|
history=audio_history,
|
||||||
sys_prompt="总结文章。"
|
sys_prompt="总结文章。"
|
||||||
)
|
)
|
||||||
|
|
||||||
history.extend([i_say, gpt_say])
|
history.extend([i_say, gpt_say])
|
||||||
audio_history.extend([i_say, gpt_say])
|
audio_history.extend([i_say, gpt_say])
|
||||||
|
|
||||||
res = write_results_to_file(history)
|
res = write_history_to_file(history)
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
chatbot.append((f"第{index + 1}段音频完成了吗?", res))
|
chatbot.append((f"第{index + 1}段音频完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
# 删除中间文件夹
|
# 删除中间文件夹
|
||||||
import shutil
|
import shutil
|
||||||
shutil.rmtree('gpt_log/mp3')
|
shutil.rmtree(f"{get_log_folder(plugin_name='audio')}/mp3")
|
||||||
res = write_results_to_file(history)
|
res = write_history_to_file(history)
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
chatbot.append(("所有音频都总结完成了吗?", res))
|
chatbot.append(("所有音频都总结完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history)
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import glob, time, os, re
|
import glob, time, os, re
|
||||||
from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
|
from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
|
||||||
from toolbox import CatchException, report_execption, write_history_to_file
|
from toolbox import CatchException, report_execption, get_log_folder
|
||||||
from toolbox import promote_file_to_downloadzone, get_log_folder
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
|
|
||||||
class PaperFileGroup():
|
class PaperFileGroup():
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str
|
from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str
|
||||||
from toolbox import CatchException, report_execption, write_results_to_file
|
from toolbox import CatchException, report_execption
|
||||||
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from .crazy_utils import read_and_clean_pdf_text
|
from .crazy_utils import read_and_clean_pdf_text
|
||||||
from .crazy_utils import input_clipping
|
from .crazy_utils import input_clipping
|
||||||
@ -99,8 +100,8 @@ do not have too much repetitive information, numerical values using the original
|
|||||||
_, final_results = input_clipping("", final_results, max_token_limit=3200)
|
_, final_results = input_clipping("", final_results, max_token_limit=3200)
|
||||||
yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了
|
yield from update_ui(chatbot=chatbot, history=final_results) # 注意这里的历史记录被替代了
|
||||||
|
|
||||||
res = write_results_to_file(file_write_buffer, file_name=gen_time_str())
|
res = write_history_to_file(file_write_buffer)
|
||||||
promote_file_to_downloadzone(res.split('\t')[-1], chatbot=chatbot)
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
yield from update_ui(chatbot=chatbot, history=final_results) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=final_results) # 刷新界面
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption, write_results_to_file
|
from toolbox import CatchException, report_execption
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
|
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
|
|
||||||
@ -115,7 +116,8 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|||||||
chatbot[-1] = (i_say, gpt_say)
|
chatbot[-1] = (i_say, gpt_say)
|
||||||
history.append(i_say); history.append(gpt_say)
|
history.append(i_say); history.append(gpt_say)
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||||
res = write_results_to_file(history)
|
res = write_history_to_file(history)
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
chatbot.append(("完成了吗?", res))
|
chatbot.append(("完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
from toolbox import CatchException, report_execption, write_results_to_file
|
from toolbox import CatchException, report_execption, get_log_folder
|
||||||
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
|
||||||
from toolbox import write_history_to_file, get_log_folder
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||||
from .crazy_utils import read_and_clean_pdf_text
|
from .crazy_utils import read_and_clean_pdf_text
|
||||||
@ -218,10 +218,11 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
|||||||
final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""]
|
final = ["一、论文概况\n\n---\n\n", paper_meta_info.replace('# ', '### ') + '\n\n---\n\n', "二、论文翻译", ""]
|
||||||
final.extend(gpt_response_collection_md)
|
final.extend(gpt_response_collection_md)
|
||||||
create_report_file_name = f"{os.path.basename(fp)}.trans.md"
|
create_report_file_name = f"{os.path.basename(fp)}.trans.md"
|
||||||
res = write_results_to_file(final, file_name=create_report_file_name)
|
res = write_history_to_file(final, create_report_file_name)
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
|
|
||||||
# 更新UI
|
# 更新UI
|
||||||
generated_conclusion_files.append(f'./gpt_log/{create_report_file_name}')
|
generated_conclusion_files.append(f'{get_log_folder()}/{create_report_file_name}')
|
||||||
chatbot.append((f"{fp}完成了吗?", res))
|
chatbot.append((f"{fp}完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption, write_results_to_file
|
from toolbox import CatchException, report_execption
|
||||||
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
fast_debug = False
|
fast_debug = False
|
||||||
|
|
||||||
@ -27,7 +28,8 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|||||||
if not fast_debug: time.sleep(2)
|
if not fast_debug: time.sleep(2)
|
||||||
|
|
||||||
if not fast_debug:
|
if not fast_debug:
|
||||||
res = write_results_to_file(history)
|
res = write_history_to_file(history)
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
chatbot.append(("完成了吗?", res))
|
chatbot.append(("完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ explain_msg = """
|
|||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing import List
|
from typing import List
|
||||||
from toolbox import CatchException, update_ui, gen_time_str
|
from toolbox import CatchException, update_ui, is_the_upload_folder
|
||||||
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
from toolbox import update_ui_lastest_msg, disable_auto_promotion
|
||||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
@ -112,7 +112,7 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
|
|
||||||
# 用简单的关键词检测用户意图
|
# 用简单的关键词检测用户意图
|
||||||
is_certain, _ = analyze_intention_with_simple_rules(txt)
|
is_certain, _ = analyze_intention_with_simple_rules(txt)
|
||||||
if txt.startswith('private_upload/') and len(txt) == 34:
|
if is_the_upload_folder(txt):
|
||||||
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=False)
|
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=False)
|
||||||
appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。"
|
appendix_msg = "\n\n**很好,您已经上传了文件**,现在请您描述您的需求。"
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption, write_results_to_file
|
from toolbox import CatchException, report_execption
|
||||||
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
fast_debug = True
|
fast_debug = True
|
||||||
|
|
||||||
|
|
||||||
@ -110,7 +111,8 @@ def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
# <-------- 写入文件,退出 ---------->
|
# <-------- 写入文件,退出 ---------->
|
||||||
res = write_results_to_file(history)
|
res = write_history_to_file(history)
|
||||||
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
chatbot.append(("完成了吗?", res))
|
chatbot.append(("完成了吗?", res))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
@ -109,9 +109,9 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|||||||
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
history = [] # 清空历史,以免输入溢出
|
history = [] # 清空历史,以免输入溢出
|
||||||
import glob
|
import glob
|
||||||
file_manifest = [f for f in glob.glob('./*.py') if ('test_project' not in f) and ('gpt_log' not in f)] + \
|
file_manifest = [f for f in glob.glob('./*.py')] + \
|
||||||
[f for f in glob.glob('./crazy_functions/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]+ \
|
[f for f in glob.glob('./crazy_functions/**/*.py')]+ \
|
||||||
[f for f in glob.glob('./request_llm/*.py') if ('test_project' not in f) and ('gpt_log' not in f)]
|
[f for f in glob.glob('./request_llm/**/*.py')]
|
||||||
project_folder = './'
|
project_folder = './'
|
||||||
if len(file_manifest) == 0:
|
if len(file_manifest) == 0:
|
||||||
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from toolbox import update_ui
|
from toolbox import update_ui
|
||||||
from toolbox import CatchException, report_execption, write_results_to_file
|
from toolbox import CatchException, report_execption
|
||||||
|
from toolbox import write_history_to_file, promote_file_to_downloadzone
|
||||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
fast_debug = False
|
|
||||||
|
|
||||||
|
|
||||||
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||||
@ -17,32 +17,29 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|||||||
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
if not fast_debug:
|
msg = '正常'
|
||||||
msg = '正常'
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
|
||||||
# ** gpt request **
|
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, llm_kwargs, chatbot, history=[], sys_prompt=system_prompt) # 带超时倒计时
|
history.append(i_say_show_user); history.append(gpt_say)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
time.sleep(2)
|
||||||
history.append(i_say_show_user); history.append(gpt_say)
|
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
|
||||||
if not fast_debug: time.sleep(2)
|
|
||||||
|
|
||||||
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
|
||||||
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
|
||||||
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
chatbot.append((i_say, "[Local Message] waiting gpt response."))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
if not fast_debug:
|
msg = '正常'
|
||||||
msg = '正常'
|
# ** gpt request **
|
||||||
# ** gpt request **
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时
|
||||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say, llm_kwargs, chatbot, history=history, sys_prompt=system_prompt) # 带超时倒计时
|
|
||||||
|
|
||||||
chatbot[-1] = (i_say, gpt_say)
|
chatbot[-1] = (i_say, gpt_say)
|
||||||
history.append(i_say); history.append(gpt_say)
|
history.append(i_say); history.append(gpt_say)
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||||
res = write_results_to_file(history)
|
res = write_history_to_file(history)
|
||||||
chatbot.append(("完成了吗?", res))
|
promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
chatbot.append(("完成了吗?", res))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
# @Time : 2023/4/19
|
# @Time : 2023/4/19
|
||||||
# @Author : Spike
|
# @Author : Spike
|
||||||
# @Descr :
|
# @Descr :
|
||||||
from toolbox import update_ui
|
from toolbox import update_ui, get_conf
|
||||||
from toolbox import CatchException, report_execption, write_results_to_file, get_log_folder
|
from toolbox import CatchException
|
||||||
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||||
|
|
||||||
|
|
||||||
@ -30,14 +30,13 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
|
|
||||||
@CatchException
|
@CatchException
|
||||||
def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
chatbot.append(['清除本地缓存数据', '执行中. 删除 gpt_log & private_upload'])
|
chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
import shutil, os
|
import shutil, os
|
||||||
gpt_log_dir = os.path.join(os.path.dirname(__file__), '..', 'gpt_log')
|
PATH_PRIVATE_UPLOAD, PATH_LOGGING = get_conf('PATH_PRIVATE_UPLOAD', 'PATH_LOGGING')
|
||||||
private_upload_dir = os.path.join(os.path.dirname(__file__), '..', 'private_upload')
|
shutil.rmtree(PATH_LOGGING, ignore_errors=True)
|
||||||
shutil.rmtree(gpt_log_dir, ignore_errors=True)
|
shutil.rmtree(PATH_PRIVATE_UPLOAD, ignore_errors=True)
|
||||||
shutil.rmtree(private_upload_dir, ignore_errors=True)
|
|
||||||
|
|
||||||
chatbot.append(['清除本地缓存数据', '执行完成'])
|
chatbot.append(['清除本地缓存数据', '执行完成'])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
@ -299,7 +299,6 @@
|
|||||||
"地址🚀": "Address 🚀",
|
"地址🚀": "Address 🚀",
|
||||||
"感谢热情的": "Thanks to the enthusiastic",
|
"感谢热情的": "Thanks to the enthusiastic",
|
||||||
"开发者们❤️": "Developers ❤️",
|
"开发者们❤️": "Developers ❤️",
|
||||||
"所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log": "All inquiry records will be automatically saved in the local directory ./gpt_log/chat_secrets.log",
|
|
||||||
"请注意自我隐私保护哦!": "Please pay attention to self-privacy protection!",
|
"请注意自我隐私保护哦!": "Please pay attention to self-privacy protection!",
|
||||||
"当前模型": "Current model",
|
"当前模型": "Current model",
|
||||||
"输入区": "Input area",
|
"输入区": "Input area",
|
||||||
@ -892,7 +891,6 @@
|
|||||||
"保存当前对话": "Save current conversation",
|
"保存当前对话": "Save current conversation",
|
||||||
"您可以调用“LoadConversationHistoryArchive”还原当下的对话": "You can call 'LoadConversationHistoryArchive' to restore the current conversation",
|
"您可以调用“LoadConversationHistoryArchive”还原当下的对话": "You can call 'LoadConversationHistoryArchive' to restore the current conversation",
|
||||||
"警告!被保存的对话历史可以被使用该系统的任何人查阅": "Warning! The saved conversation history can be viewed by anyone using this system",
|
"警告!被保存的对话历史可以被使用该系统的任何人查阅": "Warning! The saved conversation history can be viewed by anyone using this system",
|
||||||
"gpt_log/**/chatGPT对话历史*.html": "gpt_log/**/chatGPT conversation history *.html",
|
|
||||||
"正在查找对话历史文件": "Looking for conversation history file",
|
"正在查找对话历史文件": "Looking for conversation history file",
|
||||||
"html格式": "HTML format",
|
"html格式": "HTML format",
|
||||||
"找不到任何html文件": "No HTML files found",
|
"找不到任何html文件": "No HTML files found",
|
||||||
@ -908,7 +906,6 @@
|
|||||||
"pip install pywin32 用于doc格式": "pip install pywin32 for doc format",
|
"pip install pywin32 用于doc格式": "pip install pywin32 for doc format",
|
||||||
"仅支持Win平台": "Only supports Win platform",
|
"仅支持Win平台": "Only supports Win platform",
|
||||||
"打开文件": "Open file",
|
"打开文件": "Open file",
|
||||||
"private_upload里面的文件名在解压zip后容易出现乱码": "The file name in private_upload is prone to garbled characters after unzipping",
|
|
||||||
"rar和7z格式正常": "RAR and 7z formats are normal",
|
"rar和7z格式正常": "RAR and 7z formats are normal",
|
||||||
"故可以只分析文章内容": "So you can only analyze the content of the article",
|
"故可以只分析文章内容": "So you can only analyze the content of the article",
|
||||||
"不输入文件名": "Do not enter the file name",
|
"不输入文件名": "Do not enter the file name",
|
||||||
@ -1364,7 +1361,6 @@
|
|||||||
"注意文章中的每一句话都要翻译": "Please translate every sentence in the article",
|
"注意文章中的每一句话都要翻译": "Please translate every sentence in the article",
|
||||||
"一、论文概况": "I. Overview of the paper",
|
"一、论文概况": "I. Overview of the paper",
|
||||||
"二、论文翻译": "II. Translation of the paper",
|
"二、论文翻译": "II. Translation of the paper",
|
||||||
"/gpt_log/总结论文-": "/gpt_log/Summary of the paper-",
|
|
||||||
"给出输出文件清单": "Provide a list of output files",
|
"给出输出文件清单": "Provide a list of output files",
|
||||||
"第 0 步": "Step 0",
|
"第 0 步": "Step 0",
|
||||||
"切割PDF": "Split PDF",
|
"切割PDF": "Split PDF",
|
||||||
@ -1564,7 +1560,6 @@
|
|||||||
"广义速度": "Generalized velocity",
|
"广义速度": "Generalized velocity",
|
||||||
"粒子的固有": "Intrinsic of particle",
|
"粒子的固有": "Intrinsic of particle",
|
||||||
"一个包含所有切割音频片段文件路径的列表": "A list containing the file paths of all segmented audio clips",
|
"一个包含所有切割音频片段文件路径的列表": "A list containing the file paths of all segmented audio clips",
|
||||||
"/gpt_log/翻译-": "Translation log-",
|
|
||||||
"计算文件总时长和切割点": "Calculate total duration and cutting points of the file",
|
"计算文件总时长和切割点": "Calculate total duration and cutting points of the file",
|
||||||
"总结音频": "Summarize audio",
|
"总结音频": "Summarize audio",
|
||||||
"作者": "Author",
|
"作者": "Author",
|
||||||
@ -2339,7 +2334,6 @@
|
|||||||
"将文件拖动到文件上传区": "Drag and drop the file to the file upload area",
|
"将文件拖动到文件上传区": "Drag and drop the file to the file upload area",
|
||||||
"如果意图模糊": "If the intent is ambiguous",
|
"如果意图模糊": "If the intent is ambiguous",
|
||||||
"星火认知大模型": "Spark Cognitive Big Model",
|
"星火认知大模型": "Spark Cognitive Big Model",
|
||||||
"执行中. 删除 gpt_log & private_upload": "Executing. Delete gpt_log & private_upload",
|
|
||||||
"默认 Color = secondary": "Default Color = secondary",
|
"默认 Color = secondary": "Default Color = secondary",
|
||||||
"此处也不需要修改": "No modification is needed here",
|
"此处也不需要修改": "No modification is needed here",
|
||||||
"⭐ ⭐ ⭐ 分析用户意图": "⭐ ⭐ ⭐ Analyze user intent",
|
"⭐ ⭐ ⭐ 分析用户意图": "⭐ ⭐ ⭐ Analyze user intent",
|
||||||
|
@ -301,7 +301,6 @@
|
|||||||
"缺少的依赖": "不足している依存関係",
|
"缺少的依赖": "不足している依存関係",
|
||||||
"紫色": "紫色",
|
"紫色": "紫色",
|
||||||
"唤起高级参数输入区": "高度なパラメータ入力エリアを呼び出す",
|
"唤起高级参数输入区": "高度なパラメータ入力エリアを呼び出す",
|
||||||
"所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log": "すべての問い合わせ記録は自動的にローカルディレクトリ./gpt_log/chat_secrets.logに保存されます",
|
|
||||||
"则换行符更有可能表示段落分隔": "したがって、改行記号は段落の区切りを表す可能性がより高いです",
|
"则换行符更有可能表示段落分隔": "したがって、改行記号は段落の区切りを表す可能性がより高いです",
|
||||||
";4、引用数量": ";4、引用数量",
|
";4、引用数量": ";4、引用数量",
|
||||||
"中转网址预览": "中継ウェブサイトのプレビュー",
|
"中转网址预览": "中継ウェブサイトのプレビュー",
|
||||||
@ -448,7 +447,6 @@
|
|||||||
"表示函数是否成功执行": "関数が正常に実行されたかどうかを示す",
|
"表示函数是否成功执行": "関数が正常に実行されたかどうかを示す",
|
||||||
"一般原样传递下去就行": "通常はそのまま渡すだけでよい",
|
"一般原样传递下去就行": "通常はそのまま渡すだけでよい",
|
||||||
"琥珀色": "琥珀色",
|
"琥珀色": "琥珀色",
|
||||||
"gpt_log/**/chatGPT对话历史*.html": "gpt_log/**/chatGPT対話履歴*.html",
|
|
||||||
"jittorllms 没有 sys_prompt 接口": "jittorllmsにはsys_promptインターフェースがありません",
|
"jittorllms 没有 sys_prompt 接口": "jittorllmsにはsys_promptインターフェースがありません",
|
||||||
"清除": "クリア",
|
"清除": "クリア",
|
||||||
"小于正文的": "本文より小さい",
|
"小于正文的": "本文より小さい",
|
||||||
@ -1234,7 +1232,6 @@
|
|||||||
"找不到任何前端相关文件": "No frontend-related files can be found",
|
"找不到任何前端相关文件": "No frontend-related files can be found",
|
||||||
"Not enough point. API2D账户点数不足": "Not enough points. API2D account points are insufficient",
|
"Not enough point. API2D账户点数不足": "Not enough points. API2D account points are insufficient",
|
||||||
"当前版本": "Current version",
|
"当前版本": "Current version",
|
||||||
"/gpt_log/总结论文-": "/gpt_log/Summary paper-",
|
|
||||||
"1. 临时解决方案": "1. Temporary solution",
|
"1. 临时解决方案": "1. Temporary solution",
|
||||||
"第8步": "Step 8",
|
"第8步": "Step 8",
|
||||||
"历史": "History",
|
"历史": "History",
|
||||||
|
@ -314,7 +314,6 @@
|
|||||||
"请用markdown格式输出": "請用 Markdown 格式輸出",
|
"请用markdown格式输出": "請用 Markdown 格式輸出",
|
||||||
"模仿ChatPDF": "模仿 ChatPDF",
|
"模仿ChatPDF": "模仿 ChatPDF",
|
||||||
"等待多久判定为超时": "等待多久判定為超時",
|
"等待多久判定为超时": "等待多久判定為超時",
|
||||||
"/gpt_log/总结论文-": "/gpt_log/總結論文-",
|
|
||||||
"请结合互联网信息回答以下问题": "請結合互聯網信息回答以下問題",
|
"请结合互联网信息回答以下问题": "請結合互聯網信息回答以下問題",
|
||||||
"IP查询频率受限": "IP查詢頻率受限",
|
"IP查询频率受限": "IP查詢頻率受限",
|
||||||
"高级参数输入区的显示提示": "高級參數輸入區的顯示提示",
|
"高级参数输入区的显示提示": "高級參數輸入區的顯示提示",
|
||||||
@ -511,7 +510,6 @@
|
|||||||
"將生成的報告自動投射到文件上傳區": "將生成的報告自動上傳到文件區",
|
"將生成的報告自動投射到文件上傳區": "將生成的報告自動上傳到文件區",
|
||||||
"函數插件作者": "函數插件作者",
|
"函數插件作者": "函數插件作者",
|
||||||
"將要匹配的模式": "將要匹配的模式",
|
"將要匹配的模式": "將要匹配的模式",
|
||||||
"所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log": "所有詢問記錄將自動保存在本地目錄./gpt_log/chat_secrets.log",
|
|
||||||
"正在分析一个项目的源代码": "正在分析一個專案的源代碼",
|
"正在分析一个项目的源代码": "正在分析一個專案的源代碼",
|
||||||
"使每个段落之间有两个换行符分隔": "使每個段落之間有兩個換行符分隔",
|
"使每个段落之间有两个换行符分隔": "使每個段落之間有兩個換行符分隔",
|
||||||
"并在被装饰的函数上执行": "並在被裝飾的函數上執行",
|
"并在被装饰的函数上执行": "並在被裝飾的函數上執行",
|
||||||
@ -1059,7 +1057,6 @@
|
|||||||
"重试中": "重試中",
|
"重试中": "重試中",
|
||||||
"月": "月份",
|
"月": "月份",
|
||||||
"localhost意思是代理软件安装在本机上": "localhost意思是代理軟體安裝在本機上",
|
"localhost意思是代理软件安装在本机上": "localhost意思是代理軟體安裝在本機上",
|
||||||
"gpt_log/**/chatGPT对话历史*.html": "gpt_log/**/chatGPT對話歷史*.html",
|
|
||||||
"的长度必须小于 2500 个 Token": "長度必須小於 2500 個 Token",
|
"的长度必须小于 2500 个 Token": "長度必須小於 2500 個 Token",
|
||||||
"抽取可用的api-key": "提取可用的api-key",
|
"抽取可用的api-key": "提取可用的api-key",
|
||||||
"增强报告的可读性": "增強報告的可讀性",
|
"增强报告的可读性": "增強報告的可讀性",
|
||||||
|
10
main.py
10
main.py
@ -8,7 +8,7 @@ def main():
|
|||||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||||
ENABLE_AUDIO, AUTO_CLEAR_TXT = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT')
|
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING')
|
||||||
|
|
||||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||||
@ -21,12 +21,12 @@ def main():
|
|||||||
|
|
||||||
# 问询记录, python 版本建议3.9+(越新越好)
|
# 问询记录, python 版本建议3.9+(越新越好)
|
||||||
import logging, uuid
|
import logging, uuid
|
||||||
os.makedirs("gpt_log", exist_ok=True)
|
os.makedirs(PATH_LOGGING, exist_ok=True)
|
||||||
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
try:logging.basicConfig(filename=f"{PATH_LOGGING}/chat_secrets.log", level=logging.INFO, encoding="utf-8", format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||||
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
except:logging.basicConfig(filename=f"{PATH_LOGGING}/chat_secrets.log", level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
|
||||||
# Disable logging output from the 'httpx' logger
|
# Disable logging output from the 'httpx' logger
|
||||||
logging.getLogger("httpx").setLevel(logging.WARNING)
|
logging.getLogger("httpx").setLevel(logging.WARNING)
|
||||||
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
print(f"所有问询记录将自动保存在本地目录./{PATH_LOGGING}/chat_secrets.log, 请注意自我隐私保护哦!")
|
||||||
|
|
||||||
# 一些普通功能模块
|
# 一些普通功能模块
|
||||||
from core_functional import get_core_functions
|
from core_functional import get_core_functions
|
||||||
|
@ -33,9 +33,11 @@ import functools
|
|||||||
import re
|
import re
|
||||||
import pickle
|
import pickle
|
||||||
import time
|
import time
|
||||||
|
from toolbox import get_conf
|
||||||
|
|
||||||
CACHE_FOLDER = "gpt_log"
|
CACHE_FOLDER, = get_conf('PATH_LOGGING')
|
||||||
blacklist = ['multi-language', 'gpt_log', '.git', 'private_upload', 'multi_language.py', 'build', '.github', '.vscode', '__pycache__', 'venv']
|
|
||||||
|
blacklist = ['multi-language', CACHE_FOLDER, '.git', 'private_upload', 'multi_language.py', 'build', '.github', '.vscode', '__pycache__', 'venv']
|
||||||
|
|
||||||
# LANG = "TraditionalChinese"
|
# LANG = "TraditionalChinese"
|
||||||
# TransPrompt = f"Replace each json value `#` with translated results in Traditional Chinese, e.g., \"原始文本\":\"翻譯後文字\". Keep Json format. Do not answer #."
|
# TransPrompt = f"Replace each json value `#` with translated results in Traditional Chinese, e.g., \"原始文本\":\"翻譯後文字\". Keep Json format. Do not answer #."
|
||||||
|
@ -21,7 +21,7 @@ import importlib
|
|||||||
|
|
||||||
# config_private.py放自己的秘密如API和代理网址
|
# config_private.py放自己的秘密如API和代理网址
|
||||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||||
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc
|
from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder
|
||||||
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG = \
|
proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG = \
|
||||||
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG')
|
get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG')
|
||||||
|
|
||||||
@ -138,7 +138,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
|
||||||
# check mis-behavior
|
# check mis-behavior
|
||||||
if raw_input.startswith('private_upload/') and len(raw_input) == 34:
|
if is_the_upload_folder(raw_input):
|
||||||
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需要点击“函数插件区”按钮进行处理,而不是点击“提交”按钮。")
|
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需要点击“函数插件区”按钮进行处理,而不是点击“提交”按钮。")
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
|
134
toolbox.py
134
toolbox.py
@ -5,6 +5,8 @@ import inspect
|
|||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
import gradio
|
import gradio
|
||||||
|
import shutil
|
||||||
|
import glob
|
||||||
from latex2mathml.converter import convert as tex2mathml
|
from latex2mathml.converter import convert as tex2mathml
|
||||||
from functools import wraps, lru_cache
|
from functools import wraps, lru_cache
|
||||||
pj = os.path.join
|
pj = os.path.join
|
||||||
@ -171,7 +173,7 @@ def HotReload(f):
|
|||||||
========================================================================
|
========================================================================
|
||||||
第二部分
|
第二部分
|
||||||
其他小工具:
|
其他小工具:
|
||||||
- write_results_to_file: 将结果写入markdown文件中
|
- write_history_to_file: 将结果写入markdown文件中
|
||||||
- regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。
|
- regular_txt_to_markdown: 将普通文本转换为Markdown格式的文本。
|
||||||
- report_execption: 向chatbot中添加简单的意外错误信息
|
- report_execption: 向chatbot中添加简单的意外错误信息
|
||||||
- text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
- text_divide_paragraph: 将文本按照段落分隔符分割开,生成带有段落标签的HTML代码。
|
||||||
@ -203,36 +205,6 @@ def get_reduce_token_percent(text):
|
|||||||
return 0.5, '不详'
|
return 0.5, '不详'
|
||||||
|
|
||||||
|
|
||||||
def write_results_to_file(history, file_name=None):
|
|
||||||
"""
|
|
||||||
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
|
||||||
"""
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
if file_name is None:
|
|
||||||
# file_name = time.strftime("chatGPT分析报告%Y-%m-%d-%H-%M-%S", time.localtime()) + '.md'
|
|
||||||
file_name = 'GPT-Report-' + gen_time_str() + '.md'
|
|
||||||
os.makedirs('./gpt_log/', exist_ok=True)
|
|
||||||
with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f:
|
|
||||||
f.write('# GPT-Academic Report\n')
|
|
||||||
for i, content in enumerate(history):
|
|
||||||
try:
|
|
||||||
if type(content) != str: content = str(content)
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
if i % 2 == 0:
|
|
||||||
f.write('## ')
|
|
||||||
try:
|
|
||||||
f.write(content)
|
|
||||||
except:
|
|
||||||
# remove everything that cannot be handled by utf8
|
|
||||||
f.write(content.encode('utf-8', 'ignore').decode())
|
|
||||||
f.write('\n\n')
|
|
||||||
res = '以上材料已经被写入:\t' + os.path.abspath(f'./gpt_log/{file_name}')
|
|
||||||
print(res)
|
|
||||||
return res
|
|
||||||
|
|
||||||
|
|
||||||
def write_history_to_file(history, file_basename=None, file_fullname=None):
|
def write_history_to_file(history, file_basename=None, file_fullname=None):
|
||||||
"""
|
"""
|
||||||
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
|
||||||
@ -241,9 +213,9 @@ def write_history_to_file(history, file_basename=None, file_fullname=None):
|
|||||||
import time
|
import time
|
||||||
if file_fullname is None:
|
if file_fullname is None:
|
||||||
if file_basename is not None:
|
if file_basename is not None:
|
||||||
file_fullname = os.path.join(get_log_folder(), file_basename)
|
file_fullname = pj(get_log_folder(), file_basename)
|
||||||
else:
|
else:
|
||||||
file_fullname = os.path.join(get_log_folder(), f'GPT-Academic-{gen_time_str()}.md')
|
file_fullname = pj(get_log_folder(), f'GPT-Academic-{gen_time_str()}.md')
|
||||||
os.makedirs(os.path.dirname(file_fullname), exist_ok=True)
|
os.makedirs(os.path.dirname(file_fullname), exist_ok=True)
|
||||||
with open(file_fullname, 'w', encoding='utf8') as f:
|
with open(file_fullname, 'w', encoding='utf8') as f:
|
||||||
f.write('# GPT-Academic Report\n')
|
f.write('# GPT-Academic Report\n')
|
||||||
@ -519,7 +491,7 @@ def find_recent_files(directory):
|
|||||||
if not os.path.exists(directory):
|
if not os.path.exists(directory):
|
||||||
os.makedirs(directory, exist_ok=True)
|
os.makedirs(directory, exist_ok=True)
|
||||||
for filename in os.listdir(directory):
|
for filename in os.listdir(directory):
|
||||||
file_path = os.path.join(directory, filename)
|
file_path = pj(directory, filename)
|
||||||
if file_path.endswith('.log'):
|
if file_path.endswith('.log'):
|
||||||
continue
|
continue
|
||||||
created_time = os.path.getmtime(file_path)
|
created_time = os.path.getmtime(file_path)
|
||||||
@ -534,7 +506,7 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
|
|||||||
# 将文件复制一份到下载区
|
# 将文件复制一份到下载区
|
||||||
import shutil
|
import shutil
|
||||||
if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}'
|
if rename_file is None: rename_file = f'{gen_time_str()}-{os.path.basename(file)}'
|
||||||
new_path = os.path.join(get_log_folder(), rename_file)
|
new_path = pj(get_log_folder(), rename_file)
|
||||||
# 如果已经存在,先删除
|
# 如果已经存在,先删除
|
||||||
if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path)
|
if os.path.exists(new_path) and not os.path.samefile(new_path, file): os.remove(new_path)
|
||||||
# 把文件复制过去
|
# 把文件复制过去
|
||||||
@ -549,44 +521,70 @@ def disable_auto_promotion(chatbot):
|
|||||||
chatbot._cookies.update({'files_to_promote': []})
|
chatbot._cookies.update({'files_to_promote': []})
|
||||||
return
|
return
|
||||||
|
|
||||||
def on_file_uploaded(files, chatbot, txt, txt2, checkboxes, cookies):
|
def is_the_upload_folder(string):
|
||||||
|
PATH_PRIVATE_UPLOAD, = get_conf('PATH_PRIVATE_UPLOAD')
|
||||||
|
pattern = r'^PATH_PRIVATE_UPLOAD/[A-Za-z0-9_-]+/\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}$'
|
||||||
|
pattern = pattern.replace('PATH_PRIVATE_UPLOAD', PATH_PRIVATE_UPLOAD)
|
||||||
|
if re.match(pattern, string): return True
|
||||||
|
else: return False
|
||||||
|
|
||||||
|
def del_outdated_uploads(outdate_time_seconds):
|
||||||
|
PATH_PRIVATE_UPLOAD, = get_conf('PATH_PRIVATE_UPLOAD')
|
||||||
|
current_time = time.time()
|
||||||
|
one_hour_ago = current_time - outdate_time_seconds
|
||||||
|
# Get a list of all subdirectories in the PATH_PRIVATE_UPLOAD folder
|
||||||
|
# Remove subdirectories that are older than one hour
|
||||||
|
for subdirectory in glob.glob(f'{PATH_PRIVATE_UPLOAD}/*/*'):
|
||||||
|
subdirectory_time = os.path.getmtime(subdirectory)
|
||||||
|
if subdirectory_time < one_hour_ago:
|
||||||
|
try: shutil.rmtree(subdirectory)
|
||||||
|
except: pass
|
||||||
|
return
|
||||||
|
|
||||||
|
def on_file_uploaded(request: gradio.Request, files, chatbot, txt, txt2, checkboxes, cookies):
|
||||||
"""
|
"""
|
||||||
当文件被上传时的回调函数
|
当文件被上传时的回调函数
|
||||||
"""
|
"""
|
||||||
if len(files) == 0:
|
if len(files) == 0:
|
||||||
return chatbot, txt
|
return chatbot, txt
|
||||||
import shutil
|
|
||||||
import os
|
# 移除过时的旧文件从而节省空间&保护隐私
|
||||||
import time
|
outdate_time_seconds = 60
|
||||||
import glob
|
del_outdated_uploads(outdate_time_seconds)
|
||||||
from toolbox import extract_archive
|
|
||||||
try:
|
# 创建工作路径
|
||||||
shutil.rmtree('./private_upload/')
|
user_name = "default" if not request.username else request.username
|
||||||
except:
|
|
||||||
pass
|
|
||||||
time_tag = gen_time_str()
|
time_tag = gen_time_str()
|
||||||
os.makedirs(f'private_upload/{time_tag}', exist_ok=True)
|
PATH_PRIVATE_UPLOAD, = get_conf('PATH_PRIVATE_UPLOAD')
|
||||||
err_msg = ''
|
target_path_base = pj(PATH_PRIVATE_UPLOAD, user_name, time_tag)
|
||||||
|
os.makedirs(target_path_base, exist_ok=True)
|
||||||
|
|
||||||
|
# 逐个文件转移到目标路径
|
||||||
|
upload_msg = ''
|
||||||
for file in files:
|
for file in files:
|
||||||
file_origin_name = os.path.basename(file.orig_name)
|
file_origin_name = os.path.basename(file.orig_name)
|
||||||
shutil.copy(file.name, f'private_upload/{time_tag}/{file_origin_name}')
|
this_file_path = pj(target_path_base, file_origin_name)
|
||||||
err_msg += extract_archive(f'private_upload/{time_tag}/{file_origin_name}',
|
shutil.move(file.name, this_file_path)
|
||||||
dest_dir=f'private_upload/{time_tag}/{file_origin_name}.extract')
|
upload_msg += extract_archive(file_path=this_file_path, dest_dir=this_file_path+'.extract')
|
||||||
moved_files = [fp for fp in glob.glob('private_upload/**/*', recursive=True)]
|
|
||||||
if "底部输入区" in checkboxes:
|
# 整理文件集合
|
||||||
txt = ""
|
moved_files = [fp for fp in glob.glob(f'{target_path_base}/**/*', recursive=True)]
|
||||||
txt2 = f'private_upload/{time_tag}'
|
if "底部输入区" in checkboxes:
|
||||||
|
txt, txt2 = "", target_path_base
|
||||||
else:
|
else:
|
||||||
txt = f'private_upload/{time_tag}'
|
txt, txt2 = target_path_base, ""
|
||||||
txt2 = ""
|
|
||||||
|
# 输出消息
|
||||||
moved_files_str = '\t\n\n'.join(moved_files)
|
moved_files_str = '\t\n\n'.join(moved_files)
|
||||||
chatbot.append(['我上传了文件,请查收',
|
chatbot.append(['我上传了文件,请查收',
|
||||||
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
f'[Local Message] 收到以下文件: \n\n{moved_files_str}' +
|
||||||
f'\n\n调用路径参数已自动修正到: \n\n{txt}' +
|
f'\n\n调用路径参数已自动修正到: \n\n{txt}' +
|
||||||
f'\n\n现在您点击任意函数插件时,以上文件将被作为输入参数'+err_msg])
|
f'\n\n现在您点击任意函数插件时,以上文件将被作为输入参数'+upload_msg])
|
||||||
|
|
||||||
|
# 记录近期文件
|
||||||
cookies.update({
|
cookies.update({
|
||||||
'most_recent_uploaded': {
|
'most_recent_uploaded': {
|
||||||
'path': f'private_upload/{time_tag}',
|
'path': target_path_base,
|
||||||
'time': time.time(),
|
'time': time.time(),
|
||||||
'time_str': time_tag
|
'time_str': time_tag
|
||||||
}})
|
}})
|
||||||
@ -595,11 +593,12 @@ def on_file_uploaded(files, chatbot, txt, txt2, checkboxes, cookies):
|
|||||||
|
|
||||||
def on_report_generated(cookies, files, chatbot):
|
def on_report_generated(cookies, files, chatbot):
|
||||||
from toolbox import find_recent_files
|
from toolbox import find_recent_files
|
||||||
|
PATH_LOGGING, = get_conf('PATH_LOGGING')
|
||||||
if 'files_to_promote' in cookies:
|
if 'files_to_promote' in cookies:
|
||||||
report_files = cookies['files_to_promote']
|
report_files = cookies['files_to_promote']
|
||||||
cookies.pop('files_to_promote')
|
cookies.pop('files_to_promote')
|
||||||
else:
|
else:
|
||||||
report_files = find_recent_files('gpt_log')
|
report_files = find_recent_files(PATH_LOGGING)
|
||||||
if len(report_files) == 0:
|
if len(report_files) == 0:
|
||||||
return cookies, None, chatbot
|
return cookies, None, chatbot
|
||||||
# files.extend(report_files)
|
# files.extend(report_files)
|
||||||
@ -909,34 +908,35 @@ def zip_folder(source_folder, dest_folder, zip_name):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# Create the name for the zip file
|
# Create the name for the zip file
|
||||||
zip_file = os.path.join(dest_folder, zip_name)
|
zip_file = pj(dest_folder, zip_name)
|
||||||
|
|
||||||
# Create a ZipFile object
|
# Create a ZipFile object
|
||||||
with zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
with zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
||||||
# Walk through the source folder and add files to the zip file
|
# Walk through the source folder and add files to the zip file
|
||||||
for foldername, subfolders, filenames in os.walk(source_folder):
|
for foldername, subfolders, filenames in os.walk(source_folder):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
filepath = os.path.join(foldername, filename)
|
filepath = pj(foldername, filename)
|
||||||
zipf.write(filepath, arcname=os.path.relpath(filepath, source_folder))
|
zipf.write(filepath, arcname=os.path.relpath(filepath, source_folder))
|
||||||
|
|
||||||
# Move the zip file to the destination folder (if it wasn't already there)
|
# Move the zip file to the destination folder (if it wasn't already there)
|
||||||
if os.path.dirname(zip_file) != dest_folder:
|
if os.path.dirname(zip_file) != dest_folder:
|
||||||
os.rename(zip_file, os.path.join(dest_folder, os.path.basename(zip_file)))
|
os.rename(zip_file, pj(dest_folder, os.path.basename(zip_file)))
|
||||||
zip_file = os.path.join(dest_folder, os.path.basename(zip_file))
|
zip_file = pj(dest_folder, os.path.basename(zip_file))
|
||||||
|
|
||||||
print(f"Zip file created at {zip_file}")
|
print(f"Zip file created at {zip_file}")
|
||||||
|
|
||||||
def zip_result(folder):
|
def zip_result(folder):
|
||||||
t = gen_time_str()
|
t = gen_time_str()
|
||||||
zip_folder(folder, './gpt_log/', f'{t}-result.zip')
|
zip_folder(folder, get_log_folder(), f'{t}-result.zip')
|
||||||
return pj('./gpt_log/', f'{t}-result.zip')
|
return pj(get_log_folder(), f'{t}-result.zip')
|
||||||
|
|
||||||
def gen_time_str():
|
def gen_time_str():
|
||||||
import time
|
import time
|
||||||
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
return time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
|
||||||
|
|
||||||
def get_log_folder(user='default', plugin_name='shared'):
|
def get_log_folder(user='default', plugin_name='shared'):
|
||||||
_dir = os.path.join(os.path.dirname(__file__), 'gpt_log', user, plugin_name)
|
PATH_LOGGING, = get_conf('PATH_LOGGING')
|
||||||
|
_dir = pj(PATH_LOGGING, user, plugin_name)
|
||||||
if not os.path.exists(_dir): os.makedirs(_dir)
|
if not os.path.exists(_dir): os.makedirs(_dir)
|
||||||
return _dir
|
return _dir
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user