Merge branch 'frontier' into azure_multiple_models
This commit is contained in:
commit
349c399967
13
config.py
13
config.py
@ -83,11 +83,10 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
||||
|
||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = [
|
||||
"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
|
||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
||||
"chatglm", "moss", "newbing", "stack-claude"
|
||||
]
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||
"api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
|
||||
"chatglm", "moss", "newbing", "claude-2"]
|
||||
# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
|
||||
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
|
||||
@ -198,6 +197,10 @@ GROBID_URLS = [
|
||||
ALLOW_RESET_CONFIG = False
|
||||
|
||||
|
||||
# 在使用AutoGen插件时,是否使用Docker容器运行代码
|
||||
AUTOGEN_USE_DOCKER = True
|
||||
|
||||
|
||||
# 临时的上传文件夹位置,请勿修改
|
||||
PATH_PRIVATE_UPLOAD = "private_upload"
|
||||
|
||||
|
@ -539,18 +539,18 @@ def get_crazy_functions():
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
|
||||
# try:
|
||||
# from crazy_functions.多智能体 import 多智能体终端
|
||||
# function_plugins.update({
|
||||
# "多智能体终端(微软AutoGen)": {
|
||||
# "Group": "智能体",
|
||||
# "Color": "stop",
|
||||
# "AsButton": True,
|
||||
# "Function": HotReload(多智能体终端)
|
||||
# }
|
||||
# })
|
||||
# except:
|
||||
# print('Load function plugin failed')
|
||||
try:
|
||||
from crazy_functions.多智能体 import 多智能体终端
|
||||
function_plugins.update({
|
||||
"多智能体终端(微软AutoGen)": {
|
||||
"Group": "智能体",
|
||||
"Color": "stop",
|
||||
"AsButton": True,
|
||||
"Function": HotReload(多智能体终端)
|
||||
}
|
||||
})
|
||||
except:
|
||||
print('Load function plugin failed')
|
||||
|
||||
# try:
|
||||
# from crazy_functions.chatglm微调工具 import 微调数据集生成
|
||||
|
@ -1,7 +1,7 @@
|
||||
from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
|
||||
from toolbox import report_execption, get_log_folder, update_ui_lastest_msg, Singleton
|
||||
from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
|
||||
from crazy_functions.agent_fns.autogen_general import AutoGenGeneral
|
||||
from crazy_functions.agent_fns.general import AutoGenGeneral
|
||||
import time
|
||||
|
||||
|
||||
|
@ -36,12 +36,8 @@ class AutoGenGeneral(PluginMultiprocessManager):
|
||||
# ⭐⭐ 子进程执行
|
||||
input = input.content
|
||||
with ProxyNetworkActivate("AutoGen"):
|
||||
from autogen import AssistantAgent, UserProxyAgent
|
||||
config_list = [{
|
||||
'model': self.llm_kwargs['llm_model'],
|
||||
'api_key': self.llm_kwargs['api_key'],
|
||||
},]
|
||||
code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":True}
|
||||
config_list = self.get_config_list()
|
||||
code_execution_config={"work_dir": self.autogen_work_dir, "use_docker":self.use_docker}
|
||||
agents = self.define_agents()
|
||||
user_proxy = None
|
||||
assistant = None
|
||||
@ -67,6 +63,20 @@ class AutoGenGeneral(PluginMultiprocessManager):
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
|
||||
|
||||
def get_config_list(self):
|
||||
model = self.llm_kwargs['llm_model']
|
||||
api_base = None
|
||||
if self.llm_kwargs['llm_model'].startswith('api2d-'):
|
||||
model = self.llm_kwargs['llm_model'][len('api2d-'):]
|
||||
api_base = "https://openai.api2d.net/v1"
|
||||
config_list = [{
|
||||
'model': model,
|
||||
'api_key': self.llm_kwargs['api_key'],
|
||||
},]
|
||||
if api_base is not None:
|
||||
config_list[0]['api_base'] = api_base
|
||||
return config_list
|
||||
|
||||
def subprocess_worker(self, child_conn):
|
||||
# ⭐⭐ 子进程执行
|
||||
self.child_conn = child_conn
|
@ -1,4 +1,5 @@
|
||||
from toolbox import get_log_folder, update_ui, gen_time_str, trimmed_format_exc, promote_file_to_downloadzone
|
||||
from toolbox import get_log_folder, update_ui, gen_time_str, get_conf, promote_file_to_downloadzone
|
||||
from crazy_functions.agent_fns.watchdog import WatchDog
|
||||
import time, os
|
||||
|
||||
class PipeCom():
|
||||
@ -19,6 +20,16 @@ class PluginMultiprocessManager():
|
||||
self.system_prompt = system_prompt
|
||||
self.web_port = web_port
|
||||
self.alive = True
|
||||
self.use_docker, = get_conf('AUTOGEN_USE_DOCKER')
|
||||
|
||||
# create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time
|
||||
timeout_seconds = 5*60
|
||||
self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5)
|
||||
self.heartbeat_watchdog.begin_watch()
|
||||
|
||||
def feed_heartbeat_watchdog(self):
|
||||
# feed this `dog`, so the dog will not `bark` (bark_fn will terminate the instance)
|
||||
self.heartbeat_watchdog.feed()
|
||||
|
||||
def is_alive(self):
|
||||
return self.alive
|
||||
@ -50,7 +61,7 @@ class PluginMultiprocessManager():
|
||||
# 获取fp的拓展名
|
||||
file_type = fp.split('.')[-1]
|
||||
# 如果是文本文件, 则直接显示文本内容
|
||||
if file_type in ['png', 'jpg']:
|
||||
if file_type.lower() in ['png', 'jpg']:
|
||||
image_path = os.path.abspath(fp)
|
||||
self.chatbot.append(['检测到新生图像:', f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'])
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
@ -98,9 +109,17 @@ class PluginMultiprocessManager():
|
||||
self.terminate()
|
||||
return "terminate"
|
||||
|
||||
# patience = 10
|
||||
|
||||
while True:
|
||||
time.sleep(0.5)
|
||||
if not self.alive:
|
||||
# the heartbeat watchdog might have it killed
|
||||
self.terminate()
|
||||
return "terminate"
|
||||
|
||||
if self.parent_conn.poll():
|
||||
self.feed_heartbeat_watchdog()
|
||||
if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]:
|
||||
self.chatbot.pop(-1) # remove the last line
|
||||
msg = self.parent_conn.recv() # PipeCom
|
||||
@ -124,10 +143,17 @@ class PluginMultiprocessManager():
|
||||
# do not terminate here, leave the subprocess_worker instance alive
|
||||
return "wait_feedback"
|
||||
else:
|
||||
self.feed_heartbeat_watchdog()
|
||||
if '[GPT-Academic] 等待中' not in self.chatbot[-1][-1]:
|
||||
# begin_waiting_time = time.time()
|
||||
self.chatbot.append(["[GPT-Academic] 等待AutoGen执行结果 ...", "[GPT-Academic] 等待中"])
|
||||
self.chatbot[-1] = [self.chatbot[-1][0], self.chatbot[-1][1].replace("[GPT-Academic] 等待中", "[GPT-Academic] 等待中.")]
|
||||
yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
# if time.time() - begin_waiting_time > patience:
|
||||
# self.chatbot.append([f"结束", "等待超时, 终止AutoGen程序。"])
|
||||
# yield from update_ui(chatbot=self.chatbot, history=self.history)
|
||||
# self.terminate()
|
||||
# return "terminate"
|
||||
|
||||
self.terminate()
|
||||
return "terminate"
|
||||
|
28
crazy_functions/agent_fns/watchdog.py
Normal file
28
crazy_functions/agent_fns/watchdog.py
Normal file
@ -0,0 +1,28 @@
|
||||
import threading, time
|
||||
|
||||
class WatchDog():
|
||||
def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
|
||||
self.last_feed = None
|
||||
self.timeout = timeout
|
||||
self.bark_fn = bark_fn
|
||||
self.interval = interval
|
||||
self.msg = msg
|
||||
self.kill_dog = False
|
||||
|
||||
def watch(self):
|
||||
while True:
|
||||
if self.kill_dog: break
|
||||
if time.time() - self.last_feed > self.timeout:
|
||||
if len(self.msg) > 0: print(self.msg)
|
||||
self.bark_fn()
|
||||
break
|
||||
time.sleep(self.interval)
|
||||
|
||||
def begin_watch(self):
|
||||
self.last_feed = time.time()
|
||||
th = threading.Thread(target=self.watch)
|
||||
th.daemon = True
|
||||
th.start()
|
||||
|
||||
def feed(self):
|
||||
self.last_feed = time.time()
|
@ -1,10 +1,7 @@
|
||||
# 本源代码中, ⭐ = 关键步骤
|
||||
"""
|
||||
测试:
|
||||
- 裁剪图像,保留下半部分
|
||||
- 交换图像的蓝色通道和红色通道
|
||||
- 将图像转为灰度图像
|
||||
- 将csv文件转excel表格
|
||||
- show me the solution of $x^2=cos(x)$, solve this problem with figure, and plot and save image to t.jpg
|
||||
|
||||
Testing:
|
||||
- Crop the image, keeping the bottom half.
|
||||
@ -35,7 +32,8 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
||||
web_port 当前软件运行的端口号
|
||||
"""
|
||||
# 检查当前的模型是否符合要求
|
||||
supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k']
|
||||
supported_llms = ['gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k',
|
||||
'api2d-gpt-3.5-turbo-16k', 'api2d-gpt-4']
|
||||
llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||
if llm_kwargs['llm_model'] not in supported_llms:
|
||||
chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}."])
|
||||
|
@ -1,6 +1,7 @@
|
||||
from toolbox import update_ui
|
||||
from toolbox import CatchException, get_conf, markdown_convertion
|
||||
from crazy_functions.crazy_utils import input_clipping
|
||||
from crazy_functions.agent_fns.watchdog import WatchDog
|
||||
from request_llm.bridge_all import predict_no_ui_long_connection
|
||||
import threading, time
|
||||
import numpy as np
|
||||
@ -8,32 +9,6 @@ from .live_audio.aliyunASR import AliyunASR
|
||||
import json
|
||||
import re
|
||||
|
||||
class WatchDog():
|
||||
def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
|
||||
self.last_feed = None
|
||||
self.timeout = timeout
|
||||
self.bark_fn = bark_fn
|
||||
self.interval = interval
|
||||
self.msg = msg
|
||||
self.kill_dog = False
|
||||
|
||||
def watch(self):
|
||||
while True:
|
||||
if self.kill_dog: break
|
||||
if time.time() - self.last_feed > self.timeout:
|
||||
if len(self.msg) > 0: print(self.msg)
|
||||
self.bark_fn()
|
||||
break
|
||||
time.sleep(self.interval)
|
||||
|
||||
def begin_watch(self):
|
||||
self.last_feed = time.time()
|
||||
th = threading.Thread(target=self.watch)
|
||||
th.daemon = True
|
||||
th.start()
|
||||
|
||||
def feed(self):
|
||||
self.last_feed = time.time()
|
||||
|
||||
def chatbot2history(chatbot):
|
||||
history = []
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
4. Run `python multi_language.py`.
|
||||
Note: You need to run it multiple times to increase translation coverage because GPT makes mistakes sometimes.
|
||||
(You can also run `CACHE_ONLY=True python multi_language.py` to use cached translation mapping)
|
||||
|
||||
5. Find the translated program in `multi-language\English\*`
|
||||
|
||||
@ -35,6 +36,8 @@ import pickle
|
||||
import time
|
||||
from toolbox import get_conf
|
||||
|
||||
CACHE_ONLY = os.environ.get('CACHE_ONLY', False)
|
||||
|
||||
CACHE_FOLDER, = get_conf('PATH_LOGGING')
|
||||
|
||||
blacklist = ['multi-language', CACHE_FOLDER, '.git', 'private_upload', 'multi_language.py', 'build', '.github', '.vscode', '__pycache__', 'venv']
|
||||
@ -336,7 +339,10 @@ def step_1_core_key_translate():
|
||||
if d not in cached_translation_keys:
|
||||
need_translate.append(d)
|
||||
|
||||
need_translate_mapping = trans(need_translate, language=LANG_STD, special=True)
|
||||
if CACHE_ONLY:
|
||||
need_translate_mapping = {}
|
||||
else:
|
||||
need_translate_mapping = trans(need_translate, language=LANG_STD, special=True)
|
||||
map_to_json(need_translate_mapping, language=LANG_STD)
|
||||
cached_translation = read_map_from_json(language=LANG_STD)
|
||||
cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0])))
|
||||
@ -476,8 +482,10 @@ def step_2_core_key_translate():
|
||||
if d not in cached_translation_keys:
|
||||
need_translate.append(d)
|
||||
|
||||
|
||||
up = trans_json(need_translate, language=LANG, special=False)
|
||||
if CACHE_ONLY:
|
||||
up = {}
|
||||
else:
|
||||
up = trans_json(need_translate, language=LANG, special=False)
|
||||
map_to_json(up, language=LANG)
|
||||
cached_translation = read_map_from_json(language=LANG)
|
||||
LANG_STD = 'std'
|
||||
|
@ -182,6 +182,15 @@ model_info = {
|
||||
"token_cnt": get_token_num_gpt4,
|
||||
},
|
||||
|
||||
"api2d-gpt-3.5-turbo-16k": {
|
||||
"fn_with_ui": chatgpt_ui,
|
||||
"fn_without_ui": chatgpt_noui,
|
||||
"endpoint": api2d_endpoint,
|
||||
"max_token": 1024*16,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
# 将 chatglm 直接对齐到 chatglm2
|
||||
"chatglm": {
|
||||
"fn_with_ui": chatglm_ui,
|
||||
|
@ -318,7 +318,10 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||
what_i_ask_now["role"] = "user"
|
||||
what_i_ask_now["content"] = inputs
|
||||
messages.append(what_i_ask_now)
|
||||
model = llm_kwargs['llm_model'].strip('api2d-')
|
||||
model = llm_kwargs['llm_model']
|
||||
if llm_kwargs['llm_model'].startswith('api2d-'):
|
||||
model = llm_kwargs['llm_model'][len('api2d-'):]
|
||||
|
||||
if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制
|
||||
model = random.choice([
|
||||
"gpt-3.5-turbo",
|
||||
|
Loading…
x
Reference in New Issue
Block a user