Merge branch 'frontier'
This commit is contained in:
commit
80b209fa0c
@ -74,13 +74,13 @@ MAX_RETRY = 2
|
|||||||
|
|
||||||
|
|
||||||
# 插件分类默认选项
|
# 插件分类默认选项
|
||||||
DEFAULT_FN_GROUPS = ['对话', '编程', '学术']
|
DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
||||||
|
|
||||||
|
|
||||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
|
||||||
"gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
||||||
# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613",
|
# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613",
|
||||||
# "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
# "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
|
||||||
@ -183,6 +183,9 @@ ALLOW_RESET_CONFIG = False
|
|||||||
PATH_PRIVATE_UPLOAD = "private_upload"
|
PATH_PRIVATE_UPLOAD = "private_upload"
|
||||||
# 日志文件夹的位置,请勿修改
|
# 日志文件夹的位置,请勿修改
|
||||||
PATH_LOGGING = "gpt_log"
|
PATH_LOGGING = "gpt_log"
|
||||||
|
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改
|
||||||
|
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme"]
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
在线大模型配置关联关系示意图
|
在线大模型配置关联关系示意图
|
||||||
|
@ -11,7 +11,8 @@ def get_core_functions():
|
|||||||
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
# 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
|
||||||
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
|
"Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
|
||||||
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
|
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
|
||||||
r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n",
|
r"Firstly, you should provide the polished paragraph. "
|
||||||
|
r"Secondly, you should list all your modification and explain the reasons to do so in markdown table." + "\n\n",
|
||||||
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
||||||
"Suffix": r"",
|
"Suffix": r"",
|
||||||
# 按钮颜色 (默认 secondary)
|
# 按钮颜色 (默认 secondary)
|
||||||
@ -27,17 +28,18 @@ def get_core_functions():
|
|||||||
"Suffix": r"",
|
"Suffix": r"",
|
||||||
},
|
},
|
||||||
"查找语法错误": {
|
"查找语法错误": {
|
||||||
"Prefix": r"Can you help me ensure that the grammar and the spelling is correct? " +
|
"Prefix": r"Help me ensure that the grammar and the spelling is correct. "
|
||||||
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good." +
|
r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. "
|
||||||
r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " +
|
r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, "
|
||||||
r"put the original text the first column, " +
|
r"put the original text the first column, "
|
||||||
r"put the corrected text in the second column and highlight the key words you fixed.""\n"
|
r"put the corrected text in the second column and highlight the key words you fixed. "
|
||||||
|
r"Finally, please provide the proofreaded text.""\n\n"
|
||||||
r"Example:""\n"
|
r"Example:""\n"
|
||||||
r"Paragraph: How is you? Do you knows what is it?""\n"
|
r"Paragraph: How is you? Do you knows what is it?""\n"
|
||||||
r"| Original sentence | Corrected sentence |""\n"
|
r"| Original sentence | Corrected sentence |""\n"
|
||||||
r"| :--- | :--- |""\n"
|
r"| :--- | :--- |""\n"
|
||||||
r"| How **is** you? | How **are** you? |""\n"
|
r"| How **is** you? | How **are** you? |""\n"
|
||||||
r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n"
|
r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n\n"
|
||||||
r"Below is a paragraph from an academic paper. "
|
r"Below is a paragraph from an academic paper. "
|
||||||
r"You need to report all grammar and spelling mistakes as the example before."
|
r"You need to report all grammar and spelling mistakes as the example before."
|
||||||
+ "\n\n",
|
+ "\n\n",
|
||||||
|
@ -39,7 +39,7 @@ def get_crazy_functions():
|
|||||||
|
|
||||||
function_plugins = {
|
function_plugins = {
|
||||||
"虚空终端": {
|
"虚空终端": {
|
||||||
"Group": "对话|编程|学术",
|
"Group": "对话|编程|学术|智能体",
|
||||||
"Color": "stop",
|
"Color": "stop",
|
||||||
"AsButton": True,
|
"AsButton": True,
|
||||||
"Function": HotReload(虚空终端)
|
"Function": HotReload(虚空终端)
|
||||||
@ -521,6 +521,18 @@ def get_crazy_functions():
|
|||||||
except:
|
except:
|
||||||
print('Load function plugin failed')
|
print('Load function plugin failed')
|
||||||
|
|
||||||
|
try:
|
||||||
|
from crazy_functions.函数动态生成 import 函数动态生成
|
||||||
|
function_plugins.update({
|
||||||
|
"动态代码解释器(CodeInterpreter)": {
|
||||||
|
"Group": "智能体",
|
||||||
|
"Color": "stop",
|
||||||
|
"AsButton": True,
|
||||||
|
"Function": HotReload(函数动态生成)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
except:
|
||||||
|
print('Load function plugin failed')
|
||||||
|
|
||||||
# try:
|
# try:
|
||||||
# from crazy_functions.CodeInterpreter import 虚空终端CodeInterpreter
|
# from crazy_functions.CodeInterpreter import 虚空终端CodeInterpreter
|
||||||
|
@ -53,14 +53,14 @@ def 知识库问答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
print('Checking Text2vec ...')
|
print('Checking Text2vec ...')
|
||||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||||
with ProxyNetworkActivate(): # 临时地激活代理网络
|
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||||
HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
||||||
|
|
||||||
# < -------------------构建知识库--------------- >
|
# < -------------------构建知识库--------------- >
|
||||||
chatbot.append(['<br/>'.join(file_manifest), "正在构建知识库..."])
|
chatbot.append(['<br/>'.join(file_manifest), "正在构建知识库..."])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
print('Establishing knowledge archive ...')
|
print('Establishing knowledge archive ...')
|
||||||
with ProxyNetworkActivate(): # 临时地激活代理网络
|
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||||
kai = knowledge_archive_interface()
|
kai = knowledge_archive_interface()
|
||||||
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
|
kai.feed_archive(file_manifest=file_manifest, id=kai_id)
|
||||||
kai_files = kai.get_loaded_file()
|
kai_files = kai.get_loaded_file()
|
||||||
|
@ -651,7 +651,7 @@ class knowledge_archive_interface():
|
|||||||
from toolbox import ProxyNetworkActivate
|
from toolbox import ProxyNetworkActivate
|
||||||
print('Checking Text2vec ...')
|
print('Checking Text2vec ...')
|
||||||
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
|
||||||
with ProxyNetworkActivate(): # 临时地激活代理网络
|
with ProxyNetworkActivate('Download_LLM'): # 临时地激活代理网络
|
||||||
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
self.text2vec_large_chinese = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese")
|
||||||
|
|
||||||
return self.text2vec_large_chinese
|
return self.text2vec_large_chinese
|
||||||
@ -807,3 +807,10 @@ class construct_html():
|
|||||||
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
|
with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
|
||||||
f.write(self.html_string.encode('utf-8', 'ignore').decode())
|
f.write(self.html_string.encode('utf-8', 'ignore').decode())
|
||||||
return os.path.join(get_log_folder(), file_name)
|
return os.path.join(get_log_folder(), file_name)
|
||||||
|
|
||||||
|
|
||||||
|
def get_plugin_arg(plugin_kwargs, key, default):
|
||||||
|
# 如果参数是空的
|
||||||
|
if (key in plugin_kwargs) and (plugin_kwargs[key] == ""): plugin_kwargs.pop(key)
|
||||||
|
# 正常情况
|
||||||
|
return plugin_kwargs.get(key, default)
|
||||||
|
70
crazy_functions/gen_fns/gen_fns_shared.py
Normal file
70
crazy_functions/gen_fns/gen_fns_shared.py
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
import time
|
||||||
|
import importlib
|
||||||
|
from toolbox import trimmed_format_exc, gen_time_str, get_log_folder
|
||||||
|
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
|
||||||
|
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg
|
||||||
|
import multiprocessing
|
||||||
|
|
||||||
|
def get_class_name(class_string):
|
||||||
|
import re
|
||||||
|
# Use regex to extract the class name
|
||||||
|
class_name = re.search(r'class (\w+)\(', class_string).group(1)
|
||||||
|
return class_name
|
||||||
|
|
||||||
|
def try_make_module(code, chatbot):
|
||||||
|
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
||||||
|
fn_path = f'{get_log_folder(plugin_name="gen_plugin_verify")}/{module_file}.py'
|
||||||
|
with open(fn_path, 'w', encoding='utf8') as f: f.write(code)
|
||||||
|
promote_file_to_downloadzone(fn_path, chatbot=chatbot)
|
||||||
|
class_name = get_class_name(code)
|
||||||
|
manager = multiprocessing.Manager()
|
||||||
|
return_dict = manager.dict()
|
||||||
|
p = multiprocessing.Process(target=is_function_successfully_generated, args=(fn_path, class_name, return_dict))
|
||||||
|
# only has 10 seconds to run
|
||||||
|
p.start(); p.join(timeout=10)
|
||||||
|
if p.is_alive(): p.terminate(); p.join()
|
||||||
|
p.close()
|
||||||
|
return return_dict["success"], return_dict['traceback']
|
||||||
|
|
||||||
|
# check is_function_successfully_generated
|
||||||
|
def is_function_successfully_generated(fn_path, class_name, return_dict):
|
||||||
|
return_dict['success'] = False
|
||||||
|
return_dict['traceback'] = ""
|
||||||
|
try:
|
||||||
|
# Create a spec for the module
|
||||||
|
module_spec = importlib.util.spec_from_file_location('example_module', fn_path)
|
||||||
|
# Load the module
|
||||||
|
example_module = importlib.util.module_from_spec(module_spec)
|
||||||
|
module_spec.loader.exec_module(example_module)
|
||||||
|
# Now you can use the module
|
||||||
|
some_class = getattr(example_module, class_name)
|
||||||
|
# Now you can create an instance of the class
|
||||||
|
instance = some_class()
|
||||||
|
return_dict['success'] = True
|
||||||
|
return
|
||||||
|
except:
|
||||||
|
return_dict['traceback'] = trimmed_format_exc()
|
||||||
|
return
|
||||||
|
|
||||||
|
def subprocess_worker(code, file_path, return_dict):
|
||||||
|
return_dict['result'] = None
|
||||||
|
return_dict['success'] = False
|
||||||
|
return_dict['traceback'] = ""
|
||||||
|
try:
|
||||||
|
module_file = 'gpt_fn_' + gen_time_str().replace('-','_')
|
||||||
|
fn_path = f'{get_log_folder(plugin_name="gen_plugin_run")}/{module_file}.py'
|
||||||
|
with open(fn_path, 'w', encoding='utf8') as f: f.write(code)
|
||||||
|
class_name = get_class_name(code)
|
||||||
|
# Create a spec for the module
|
||||||
|
module_spec = importlib.util.spec_from_file_location('example_module', fn_path)
|
||||||
|
# Load the module
|
||||||
|
example_module = importlib.util.module_from_spec(module_spec)
|
||||||
|
module_spec.loader.exec_module(example_module)
|
||||||
|
# Now you can use the module
|
||||||
|
some_class = getattr(example_module, class_name)
|
||||||
|
# Now you can create an instance of the class
|
||||||
|
instance = some_class()
|
||||||
|
return_dict['result'] = instance.run(file_path)
|
||||||
|
return_dict['success'] = True
|
||||||
|
except:
|
||||||
|
return_dict['traceback'] = trimmed_format_exc()
|
252
crazy_functions/函数动态生成.py
Normal file
252
crazy_functions/函数动态生成.py
Normal file
@ -0,0 +1,252 @@
|
|||||||
|
# 本源代码中, ⭐ = 关键步骤
|
||||||
|
"""
|
||||||
|
测试:
|
||||||
|
- 裁剪图像,保留下半部分
|
||||||
|
- 交换图像的蓝色通道和红色通道
|
||||||
|
- 将图像转为灰度图像
|
||||||
|
- 将csv文件转excel表格
|
||||||
|
|
||||||
|
Testing:
|
||||||
|
- Crop the image, keeping the bottom half.
|
||||||
|
- Swap the blue channel and red channel of the image.
|
||||||
|
- Convert the image to grayscale.
|
||||||
|
- Convert the CSV file to an Excel spreadsheet.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, is_the_upload_folder
|
||||||
|
from toolbox import promote_file_to_downloadzone, get_log_folder, update_ui_lastest_msg
|
||||||
|
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg
|
||||||
|
from .crazy_utils import input_clipping, try_install_deps
|
||||||
|
from crazy_functions.gen_fns.gen_fns_shared import is_function_successfully_generated
|
||||||
|
from crazy_functions.gen_fns.gen_fns_shared import get_class_name
|
||||||
|
from crazy_functions.gen_fns.gen_fns_shared import subprocess_worker
|
||||||
|
from crazy_functions.gen_fns.gen_fns_shared import try_make_module
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import glob
|
||||||
|
import multiprocessing
|
||||||
|
|
||||||
|
templete = """
|
||||||
|
```python
|
||||||
|
import ... # Put dependencies here, e.g. import numpy as np.
|
||||||
|
|
||||||
|
class TerminalFunction(object): # Do not change the name of the class, The name of the class must be `TerminalFunction`
|
||||||
|
|
||||||
|
def run(self, path): # The name of the function must be `run`, it takes only a positional argument.
|
||||||
|
# rewrite the function you have just written here
|
||||||
|
...
|
||||||
|
return generated_file_path
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
|
||||||
|
def inspect_dependency(chatbot, history):
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_code_block(reply):
|
||||||
|
import re
|
||||||
|
pattern = r"```([\s\S]*?)```" # regex pattern to match code blocks
|
||||||
|
matches = re.findall(pattern, reply) # find all code blocks in text
|
||||||
|
if len(matches) == 1:
|
||||||
|
return matches[0].strip('python') # code block
|
||||||
|
for match in matches:
|
||||||
|
if 'class TerminalFunction' in match:
|
||||||
|
return match.strip('python') # code block
|
||||||
|
raise RuntimeError("GPT is not generating proper code.")
|
||||||
|
|
||||||
|
def gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history):
|
||||||
|
# 输入
|
||||||
|
prompt_compose = [
|
||||||
|
f'Your job:\n'
|
||||||
|
f'1. write a single Python function, which takes a path of a `{file_type}` file as the only argument and returns a `string` containing the result of analysis or the path of generated files. \n',
|
||||||
|
f"2. You should write this function to perform following task: " + txt + "\n",
|
||||||
|
f"3. Wrap the output python function with markdown codeblock."
|
||||||
|
]
|
||||||
|
i_say = "".join(prompt_compose)
|
||||||
|
demo = []
|
||||||
|
|
||||||
|
# 第一步
|
||||||
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs=i_say, inputs_show_user=i_say,
|
||||||
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=demo,
|
||||||
|
sys_prompt= r"You are a world-class programmer."
|
||||||
|
)
|
||||||
|
history.extend([i_say, gpt_say])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
|
||||||
|
# 第二步
|
||||||
|
prompt_compose = [
|
||||||
|
"If previous stage is successful, rewrite the function you have just written to satisfy following templete: \n",
|
||||||
|
templete
|
||||||
|
]
|
||||||
|
i_say = "".join(prompt_compose); inputs_show_user = "If previous stage is successful, rewrite the function you have just written to satisfy executable templete. "
|
||||||
|
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
inputs=i_say, inputs_show_user=inputs_show_user,
|
||||||
|
llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
|
sys_prompt= r"You are a programmer. You need to replace `...` with valid packages, do not give `...` in your answer!"
|
||||||
|
)
|
||||||
|
code_to_return = gpt_say
|
||||||
|
history.extend([i_say, gpt_say])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
|
||||||
|
# # 第三步
|
||||||
|
# i_say = "Please list to packages to install to run the code above. Then show me how to use `try_install_deps` function to install them."
|
||||||
|
# i_say += 'For instance. `try_install_deps(["opencv-python", "scipy", "numpy"])`'
|
||||||
|
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
# inputs=i_say, inputs_show_user=inputs_show_user,
|
||||||
|
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
|
# sys_prompt= r"You are a programmer."
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # # 第三步
|
||||||
|
# i_say = "Show me how to use `pip` to install packages to run the code above. "
|
||||||
|
# i_say += 'For instance. `pip install -r opencv-python scipy numpy`'
|
||||||
|
# installation_advance = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||||
|
# inputs=i_say, inputs_show_user=i_say,
|
||||||
|
# llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
|
||||||
|
# sys_prompt= r"You are a programmer."
|
||||||
|
# )
|
||||||
|
installation_advance = ""
|
||||||
|
|
||||||
|
return code_to_return, installation_advance, txt, file_type, llm_kwargs, chatbot, history
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def for_immediate_show_off_when_possible(file_type, fp, chatbot):
|
||||||
|
if file_type in ['png', 'jpg']:
|
||||||
|
image_path = os.path.abspath(fp)
|
||||||
|
chatbot.append(['这是一张图片, 展示如下:',
|
||||||
|
f'本地文件地址: <br/>`{image_path}`<br/>'+
|
||||||
|
f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
|
||||||
|
])
|
||||||
|
return chatbot
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def have_any_recent_upload_files(chatbot):
|
||||||
|
_5min = 5 * 60
|
||||||
|
if not chatbot: return False # chatbot is None
|
||||||
|
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||||
|
if not most_recent_uploaded: return False # most_recent_uploaded is None
|
||||||
|
if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new
|
||||||
|
else: return False # most_recent_uploaded is too old
|
||||||
|
|
||||||
|
def get_recent_file_prompt_support(chatbot):
|
||||||
|
most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
|
||||||
|
path = most_recent_uploaded['path']
|
||||||
|
return path
|
||||||
|
|
||||||
|
@CatchException
|
||||||
|
def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
||||||
|
"""
|
||||||
|
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
||||||
|
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
||||||
|
plugin_kwargs 插件模型的参数,暂时没有用武之地
|
||||||
|
chatbot 聊天显示框的句柄,用于显示给用户
|
||||||
|
history 聊天历史,前情提要
|
||||||
|
system_prompt 给gpt的静默提醒
|
||||||
|
web_port 当前软件运行的端口号
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 清空历史
|
||||||
|
history = []
|
||||||
|
|
||||||
|
# 基本信息:功能、贡献者
|
||||||
|
chatbot.append(["正在启动: 插件动态生成插件", "插件动态生成, 执行开始, 作者Binary-Husky."])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
# ⭐ 文件上传区是否有东西
|
||||||
|
# 1. 如果有文件: 作为函数参数
|
||||||
|
# 2. 如果没有文件:需要用GPT提取参数 (太懒了,以后再写,虚空终端已经实现了类似的代码)
|
||||||
|
file_list = []
|
||||||
|
if get_plugin_arg(plugin_kwargs, key="file_path_arg", default=False):
|
||||||
|
file_path = get_plugin_arg(plugin_kwargs, key="file_path_arg", default=None)
|
||||||
|
file_list.append(file_path)
|
||||||
|
yield from update_ui_lastest_msg(f"当前文件: {file_path}", chatbot, history, 1)
|
||||||
|
elif have_any_recent_upload_files(chatbot):
|
||||||
|
file_dir = get_recent_file_prompt_support(chatbot)
|
||||||
|
file_list = glob.glob(os.path.join(file_dir, '**/*'), recursive=True)
|
||||||
|
yield from update_ui_lastest_msg(f"当前文件处理列表: {file_list}", chatbot, history, 1)
|
||||||
|
else:
|
||||||
|
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
|
||||||
|
yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
|
||||||
|
return # 2. 如果没有文件
|
||||||
|
if len(file_list) == 0:
|
||||||
|
chatbot.append(["文件检索", "没有发现任何近期上传的文件。"])
|
||||||
|
yield from update_ui_lastest_msg("没有发现任何近期上传的文件。", chatbot, history, 1)
|
||||||
|
return # 2. 如果没有文件
|
||||||
|
|
||||||
|
# 读取文件
|
||||||
|
file_type = file_list[0].split('.')[-1]
|
||||||
|
|
||||||
|
# 粗心检查
|
||||||
|
if is_the_upload_folder(txt):
|
||||||
|
yield from update_ui_lastest_msg(f"请在输入框内填写需求, 然后再次点击该插件! 至于您的文件,不用担心, 文件路径 {txt} 已经被记忆. ", chatbot, history, 1)
|
||||||
|
return
|
||||||
|
|
||||||
|
# 开始干正事
|
||||||
|
MAX_TRY = 3
|
||||||
|
for j in range(MAX_TRY): # 最多重试5次
|
||||||
|
traceback = ""
|
||||||
|
try:
|
||||||
|
# ⭐ 开始啦 !
|
||||||
|
code, installation_advance, txt, file_type, llm_kwargs, chatbot, history = \
|
||||||
|
yield from gpt_interact_multi_step(txt, file_type, llm_kwargs, chatbot, history)
|
||||||
|
chatbot.append(["代码生成阶段结束", ""])
|
||||||
|
yield from update_ui_lastest_msg(f"正在验证上述代码的有效性 ...", chatbot, history, 1)
|
||||||
|
# ⭐ 分离代码块
|
||||||
|
code = get_code_block(code)
|
||||||
|
# ⭐ 检查模块
|
||||||
|
ok, traceback = try_make_module(code, chatbot)
|
||||||
|
# 搞定代码生成
|
||||||
|
if ok: break
|
||||||
|
except Exception as e:
|
||||||
|
if not traceback: traceback = trimmed_format_exc()
|
||||||
|
# 处理异常
|
||||||
|
if not traceback: traceback = trimmed_format_exc()
|
||||||
|
yield from update_ui_lastest_msg(f"第 {j+1}/{MAX_TRY} 次代码生成尝试, 失败了~ 别担心, 我们5秒后再试一次... \n\n此次我们的错误追踪是\n```\n{traceback}\n```\n", chatbot, history, 5)
|
||||||
|
|
||||||
|
# 代码生成结束, 开始执行
|
||||||
|
TIME_LIMIT = 15
|
||||||
|
yield from update_ui_lastest_msg(f"开始创建新进程并执行代码! 时间限制 {TIME_LIMIT} 秒. 请等待任务完成... ", chatbot, history, 1)
|
||||||
|
manager = multiprocessing.Manager()
|
||||||
|
return_dict = manager.dict()
|
||||||
|
|
||||||
|
# ⭐ 到最后一步了,开始逐个文件进行处理
|
||||||
|
for file_path in file_list:
|
||||||
|
if os.path.exists(file_path):
|
||||||
|
chatbot.append([f"正在处理文件: {file_path}", f"请稍等..."])
|
||||||
|
chatbot = for_immediate_show_off_when_possible(file_type, file_path, chatbot)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# ⭐⭐⭐ subprocess_worker ⭐⭐⭐
|
||||||
|
p = multiprocessing.Process(target=subprocess_worker, args=(code, file_path, return_dict))
|
||||||
|
# ⭐ 开始执行,时间限制TIME_LIMIT
|
||||||
|
p.start(); p.join(timeout=TIME_LIMIT)
|
||||||
|
if p.is_alive(): p.terminate(); p.join()
|
||||||
|
p.close()
|
||||||
|
res = return_dict['result']
|
||||||
|
success = return_dict['success']
|
||||||
|
traceback = return_dict['traceback']
|
||||||
|
if not success:
|
||||||
|
if not traceback: traceback = trimmed_format_exc()
|
||||||
|
chatbot.append(["执行失败了", f"错误追踪\n```\n{trimmed_format_exc()}\n```\n"])
|
||||||
|
# chatbot.append(["如果是缺乏依赖,请参考以下建议", installation_advance])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
return
|
||||||
|
|
||||||
|
# 顺利完成,收尾
|
||||||
|
res = str(res)
|
||||||
|
if os.path.exists(res):
|
||||||
|
chatbot.append(["执行成功了,结果是一个有效文件", "结果:" + res])
|
||||||
|
new_file_path = promote_file_to_downloadzone(res, chatbot=chatbot)
|
||||||
|
chatbot = for_immediate_show_off_when_possible(file_type, new_file_path, chatbot)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
else:
|
||||||
|
chatbot.append(["执行成功了,结果是一个字符串", "结果:" + res])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
||||||
|
|
3
main.py
3
main.py
@ -266,7 +266,7 @@ def main():
|
|||||||
cookies.update({'uuid': uuid.uuid4()})
|
cookies.update({'uuid': uuid.uuid4()})
|
||||||
return cookies
|
return cookies
|
||||||
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
|
demo.load(init_cookie, inputs=[cookies, chatbot], outputs=[cookies])
|
||||||
demo.load(lambda: 0, inputs=None, outputs=None, _js='()=>{ChatBotHeight();}')
|
demo.load(lambda: 0, inputs=None, outputs=None, _js='()=>{GptAcademicJavaScriptInit();}')
|
||||||
|
|
||||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||||
def auto_opentab_delay():
|
def auto_opentab_delay():
|
||||||
@ -285,6 +285,7 @@ def main():
|
|||||||
|
|
||||||
auto_opentab_delay()
|
auto_opentab_delay()
|
||||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
||||||
|
quiet=True,
|
||||||
server_name="0.0.0.0",
|
server_name="0.0.0.0",
|
||||||
server_port=PORT,
|
server_port=PORT,
|
||||||
favicon_path="docs/logo.png",
|
favicon_path="docs/logo.png",
|
||||||
|
@ -126,6 +126,15 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"gpt-4-32k": {
|
||||||
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
"fn_without_ui": chatgpt_noui,
|
||||||
|
"endpoint": openai_endpoint,
|
||||||
|
"max_token": 32768,
|
||||||
|
"tokenizer": tokenizer_gpt4,
|
||||||
|
"token_cnt": get_token_num_gpt4,
|
||||||
|
},
|
||||||
|
|
||||||
# azure openai
|
# azure openai
|
||||||
"azure-gpt-3.5":{
|
"azure-gpt-3.5":{
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
@ -136,6 +145,15 @@ model_info = {
|
|||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"azure-gpt-4":{
|
||||||
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
"fn_without_ui": chatgpt_noui,
|
||||||
|
"endpoint": azure_endpoint,
|
||||||
|
"max_token": 8192,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
|
||||||
# api_2d
|
# api_2d
|
||||||
"api2d-gpt-3.5-turbo": {
|
"api2d-gpt-3.5-turbo": {
|
||||||
"fn_with_ui": chatgpt_ui,
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
@ -3,7 +3,7 @@ from transformers import AutoModel, AutoTokenizer
|
|||||||
import time
|
import time
|
||||||
import threading
|
import threading
|
||||||
import importlib
|
import importlib
|
||||||
from toolbox import update_ui, get_conf
|
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
||||||
from multiprocessing import Process, Pipe
|
from multiprocessing import Process, Pipe
|
||||||
|
|
||||||
load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
||||||
@ -48,16 +48,17 @@ class GetGLMHandle(Process):
|
|||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
if self.chatglm_model is None:
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True)
|
if self.chatglm_model is None:
|
||||||
if device=='cpu':
|
self.chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True)
|
||||||
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float()
|
if device=='cpu':
|
||||||
|
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).float()
|
||||||
|
else:
|
||||||
|
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda()
|
||||||
|
self.chatglm_model = self.chatglm_model.eval()
|
||||||
|
break
|
||||||
else:
|
else:
|
||||||
self.chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True).half().cuda()
|
break
|
||||||
self.chatglm_model = self.chatglm_model.eval()
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
except:
|
except:
|
||||||
retry += 1
|
retry += 1
|
||||||
if retry > 3:
|
if retry > 3:
|
||||||
|
@ -30,7 +30,7 @@ class GetONNXGLMHandle(LocalLLMHandle):
|
|||||||
with open(os.path.expanduser('~/.cache/huggingface/token'), 'w') as f:
|
with open(os.path.expanduser('~/.cache/huggingface/token'), 'w') as f:
|
||||||
f.write(huggingface_token)
|
f.write(huggingface_token)
|
||||||
model_id = 'meta-llama/Llama-2-7b-chat-hf'
|
model_id = 'meta-llama/Llama-2-7b-chat-hf'
|
||||||
with ProxyNetworkActivate():
|
with ProxyNetworkActivate('Download_LLM'):
|
||||||
self._tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=huggingface_token)
|
self._tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=huggingface_token)
|
||||||
# use fp16
|
# use fp16
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=huggingface_token).eval()
|
model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=huggingface_token).eval()
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
protobuf
|
protobuf
|
||||||
transformers>=4.27.1
|
|
||||||
cpm_kernels
|
cpm_kernels
|
||||||
torch>=1.10
|
torch>=1.10
|
||||||
mdtex2html
|
mdtex2html
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
protobuf
|
protobuf
|
||||||
transformers>=4.27.1
|
|
||||||
cpm_kernels
|
cpm_kernels
|
||||||
torch>=1.10
|
torch>=1.10
|
||||||
mdtex2html
|
mdtex2html
|
||||||
|
@ -2,6 +2,5 @@ jittor >= 1.3.7.9
|
|||||||
jtorch >= 0.1.3
|
jtorch >= 0.1.3
|
||||||
torch
|
torch
|
||||||
torchvision
|
torchvision
|
||||||
transformers==4.26.1
|
|
||||||
pandas
|
pandas
|
||||||
jieba
|
jieba
|
@ -1,5 +1,4 @@
|
|||||||
torch
|
torch
|
||||||
transformers==4.25.1
|
|
||||||
sentencepiece
|
sentencepiece
|
||||||
datasets
|
datasets
|
||||||
accelerate
|
accelerate
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
pydantic==1.10.11
|
pydantic==1.10.11
|
||||||
tiktoken>=0.3.3
|
tiktoken>=0.3.3
|
||||||
requests[socks]
|
requests[socks]
|
||||||
transformers
|
transformers>=4.27.1
|
||||||
python-markdown-math
|
python-markdown-math
|
||||||
beautifulsoup4
|
beautifulsoup4
|
||||||
prompt_toolkit
|
prompt_toolkit
|
||||||
|
@ -6,11 +6,14 @@
|
|||||||
import os, sys
|
import os, sys
|
||||||
def validate_path(): dir_name = os.path.dirname(__file__); root_dir_assume = os.path.abspath(dir_name + '/..'); os.chdir(root_dir_assume); sys.path.append(root_dir_assume)
|
def validate_path(): dir_name = os.path.dirname(__file__); root_dir_assume = os.path.abspath(dir_name + '/..'); os.chdir(root_dir_assume); sys.path.append(root_dir_assume)
|
||||||
validate_path() # 返回项目根路径
|
validate_path() # 返回项目根路径
|
||||||
from tests.test_utils import plugin_test
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
from tests.test_utils import plugin_test
|
||||||
|
plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
|
||||||
|
|
||||||
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
|
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
|
||||||
plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
|
|
||||||
|
# plugin_test(plugin='crazy_functions.批量翻译PDF文档_NOUGAT->批量翻译PDF文档', main_input='crazy_functions/test_project/pdf_and_word/aaai.pdf')
|
||||||
|
|
||||||
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析')
|
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='调用插件,对C:/Users/fuqingxu/Desktop/旧文件/gpt/chatgpt_academic/crazy_functions/latex_fns中的python文件进行解析')
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ def plugin_test(main_input, plugin, advanced_arg=None):
|
|||||||
plugin_kwargs['plugin_kwargs'] = advanced_arg
|
plugin_kwargs['plugin_kwargs'] = advanced_arg
|
||||||
my_working_plugin = silence_stdout(plugin)(**plugin_kwargs)
|
my_working_plugin = silence_stdout(plugin)(**plugin_kwargs)
|
||||||
|
|
||||||
with Live(Markdown(""), auto_refresh=False) as live:
|
with Live(Markdown(""), auto_refresh=False, vertical_overflow="visible") as live:
|
||||||
for cookies, chat, hist, msg in my_working_plugin:
|
for cookies, chat, hist, msg in my_working_plugin:
|
||||||
md_str = vt.chat_to_markdown_str(chat)
|
md_str = vt.chat_to_markdown_str(chat)
|
||||||
md = Markdown(md_str)
|
md = Markdown(md_str)
|
||||||
|
@ -23,4 +23,63 @@
|
|||||||
/* status bar height */
|
/* status bar height */
|
||||||
.min.svelte-1yrv54 {
|
.min.svelte-1yrv54 {
|
||||||
min-height: var(--size-12);
|
min-height: var(--size-12);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* copy btn */
|
||||||
|
.message-btn-row {
|
||||||
|
width: 19px;
|
||||||
|
height: 19px;
|
||||||
|
position: absolute;
|
||||||
|
left: calc(100% + 3px);
|
||||||
|
top: 0;
|
||||||
|
display: flex;
|
||||||
|
justify-content: space-between;
|
||||||
|
}
|
||||||
|
/* .message-btn-row-leading, .message-btn-row-trailing {
|
||||||
|
display: inline-flex;
|
||||||
|
gap: 4px;
|
||||||
|
} */
|
||||||
|
.message-btn-row button {
|
||||||
|
font-size: 18px;
|
||||||
|
align-self: center;
|
||||||
|
align-items: center;
|
||||||
|
flex-wrap: nowrap;
|
||||||
|
white-space: nowrap;
|
||||||
|
display: inline-flex;
|
||||||
|
flex-direction: row;
|
||||||
|
gap: 4px;
|
||||||
|
padding-block: 2px !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Scrollbar Width */
|
||||||
|
::-webkit-scrollbar {
|
||||||
|
width: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Scrollbar Track */
|
||||||
|
::-webkit-scrollbar-track {
|
||||||
|
background: #f1f1f1;
|
||||||
|
border-radius: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Scrollbar Handle */
|
||||||
|
::-webkit-scrollbar-thumb {
|
||||||
|
background: #888;
|
||||||
|
border-radius: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Scrollbar Handle on hover */
|
||||||
|
::-webkit-scrollbar-thumb:hover {
|
||||||
|
background: #555;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* input btns: clear, reset, stop */
|
||||||
|
#input-panel button {
|
||||||
|
min-width: min(80px, 100%);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* input btns: clear, reset, stop */
|
||||||
|
#input-panel2 button {
|
||||||
|
min-width: min(80px, 100%);
|
||||||
}
|
}
|
@ -1,4 +1,85 @@
|
|||||||
function ChatBotHeight() {
|
function gradioApp() {
|
||||||
|
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||||
|
const elems = document.getElementsByTagName('gradio-app');
|
||||||
|
const elem = elems.length == 0 ? document : elems[0];
|
||||||
|
if (elem !== document) {
|
||||||
|
elem.getElementById = function(id) {
|
||||||
|
return document.getElementById(id);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return elem.shadowRoot ? elem.shadowRoot : elem;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
const copiedIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
|
||||||
|
const copyIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
|
||||||
|
|
||||||
|
|
||||||
|
function addCopyButton(botElement) {
|
||||||
|
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||||
|
// Copy bot button
|
||||||
|
const messageBtnColumnElement = botElement.querySelector('.message-btn-row');
|
||||||
|
if (messageBtnColumnElement) {
|
||||||
|
// Do something if .message-btn-column exists, for example, remove it
|
||||||
|
// messageBtnColumnElement.remove();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var copyButton = document.createElement('button');
|
||||||
|
copyButton.classList.add('copy-bot-btn');
|
||||||
|
copyButton.setAttribute('aria-label', 'Copy');
|
||||||
|
copyButton.innerHTML = copyIcon;
|
||||||
|
copyButton.addEventListener('click', async () => {
|
||||||
|
const textToCopy = botElement.innerText;
|
||||||
|
try {
|
||||||
|
if ("clipboard" in navigator) {
|
||||||
|
await navigator.clipboard.writeText(textToCopy);
|
||||||
|
copyButton.innerHTML = copiedIcon;
|
||||||
|
setTimeout(() => {
|
||||||
|
copyButton.innerHTML = copyIcon;
|
||||||
|
}, 1500);
|
||||||
|
} else {
|
||||||
|
const textArea = document.createElement("textarea");
|
||||||
|
textArea.value = textToCopy;
|
||||||
|
document.body.appendChild(textArea);
|
||||||
|
textArea.select();
|
||||||
|
try {
|
||||||
|
document.execCommand('copy');
|
||||||
|
copyButton.innerHTML = copiedIcon;
|
||||||
|
setTimeout(() => {
|
||||||
|
copyButton.innerHTML = copyIcon;
|
||||||
|
}, 1500);
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Copy failed: ", error);
|
||||||
|
}
|
||||||
|
document.body.removeChild(textArea);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Copy failed: ", error);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
var messageBtnColumn = document.createElement('div');
|
||||||
|
messageBtnColumn.classList.add('message-btn-row');
|
||||||
|
messageBtnColumn.appendChild(copyButton);
|
||||||
|
botElement.appendChild(messageBtnColumn);
|
||||||
|
}
|
||||||
|
|
||||||
|
function chatbotContentChanged(attempt = 1, force = false) {
|
||||||
|
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||||
|
for (var i = 0; i < attempt; i++) {
|
||||||
|
setTimeout(() => {
|
||||||
|
gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton);
|
||||||
|
}, i === 0 ? 0 : 200);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function GptAcademicJavaScriptInit() {
|
||||||
|
chatbotIndicator = gradioApp().querySelector('#gpt-chatbot > div.wrap');
|
||||||
|
var chatbotObserver = new MutationObserver(() => {
|
||||||
|
chatbotContentChanged(1);
|
||||||
|
});
|
||||||
|
chatbotObserver.observe(chatbotIndicator, { attributes: true, childList: true, subtree: true });
|
||||||
|
|
||||||
function update_height(){
|
function update_height(){
|
||||||
var { panel_height_target, chatbot_height, chatbot } = get_elements(true);
|
var { panel_height_target, chatbot_height, chatbot } = get_elements(true);
|
||||||
if (panel_height_target!=chatbot_height)
|
if (panel_height_target!=chatbot_height)
|
||||||
|
@ -5,7 +5,7 @@ CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf('CODE_HIGHLIGHT', 'ADD_WAIFU', 'LAY
|
|||||||
|
|
||||||
def dynamic_set_theme(THEME):
|
def dynamic_set_theme(THEME):
|
||||||
set_theme = gr.themes.ThemeClass()
|
set_theme = gr.themes.ThemeClass()
|
||||||
with ProxyNetworkActivate():
|
with ProxyNetworkActivate('Download_Gradio_Theme'):
|
||||||
logging.info('正在下载Gradio主题,请稍等。')
|
logging.info('正在下载Gradio主题,请稍等。')
|
||||||
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
||||||
if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-')
|
if THEME.startswith('huggingface-'): THEME = THEME.lstrip('huggingface-')
|
||||||
@ -16,7 +16,7 @@ def adjust_theme():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
set_theme = gr.themes.ThemeClass()
|
set_theme = gr.themes.ThemeClass()
|
||||||
with ProxyNetworkActivate():
|
with ProxyNetworkActivate('Download_Gradio_Theme'):
|
||||||
logging.info('正在下载Gradio主题,请稍等。')
|
logging.info('正在下载Gradio主题,请稍等。')
|
||||||
THEME, = get_conf('THEME')
|
THEME, = get_conf('THEME')
|
||||||
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
if THEME.startswith('Huggingface-'): THEME = THEME.lstrip('Huggingface-')
|
||||||
|
13
toolbox.py
13
toolbox.py
@ -527,6 +527,7 @@ def promote_file_to_downloadzone(file, rename_file=None, chatbot=None):
|
|||||||
if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote']
|
if 'files_to_promote' in chatbot._cookies: current = chatbot._cookies['files_to_promote']
|
||||||
else: current = []
|
else: current = []
|
||||||
chatbot._cookies.update({'files_to_promote': [new_path] + current})
|
chatbot._cookies.update({'files_to_promote': [new_path] + current})
|
||||||
|
return new_path
|
||||||
|
|
||||||
def disable_auto_promotion(chatbot):
|
def disable_auto_promotion(chatbot):
|
||||||
chatbot._cookies.update({'files_to_promote': []})
|
chatbot._cookies.update({'files_to_promote': []})
|
||||||
@ -955,7 +956,19 @@ class ProxyNetworkActivate():
|
|||||||
"""
|
"""
|
||||||
这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理
|
这段代码定义了一个名为TempProxy的空上下文管理器, 用于给一小段代码上代理
|
||||||
"""
|
"""
|
||||||
|
def __init__(self, task=None) -> None:
|
||||||
|
self.task = task
|
||||||
|
if not task:
|
||||||
|
# 不给定task, 那么我们默认代理生效
|
||||||
|
self.valid = True
|
||||||
|
else:
|
||||||
|
# 给定了task, 我们检查一下
|
||||||
|
from toolbox import get_conf
|
||||||
|
WHEN_TO_USE_PROXY, = get_conf('WHEN_TO_USE_PROXY')
|
||||||
|
self.valid = (task in WHEN_TO_USE_PROXY)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
|
if not self.valid: return self
|
||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
proxies, = get_conf('proxies')
|
proxies, = get_conf('proxies')
|
||||||
if 'no_proxy' in os.environ: os.environ.pop('no_proxy')
|
if 'no_proxy' in os.environ: os.environ.pop('no_proxy')
|
||||||
|
4
version
4
version
@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"version": 3.53,
|
"version": 3.54,
|
||||||
"show_feature": true,
|
"show_feature": true,
|
||||||
"new_feature": "支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率"
|
"new_feature": "新增动态代码解释器(CodeInterpreter) <-> 增加文本回答复制按钮 <-> 细分代理场合 <-> 支持动态选择不同界面主题 <-> 提高稳定性&解决多用户冲突问题 <-> 支持插件分类和更多UI皮肤外观 <-> 支持用户使用自然语言调度各个插件(虚空终端) ! <-> 改进UI,设计新主题 <-> 支持借助GROBID实现PDF高精度翻译 <-> 接入百度千帆平台和文心一言 <-> 接入阿里通义千问、讯飞星火、上海AI-Lab书生 <-> 优化一键升级 <-> 提高arxiv翻译速度和成功率"
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user