Merge remote-tracking branch 'origin/master' into multi_language
This commit is contained in:
commit
d29f524cec
3
.gitignore
vendored
3
.gitignore
vendored
@ -147,4 +147,5 @@ private*
|
||||
crazy_functions/test_project/pdf_and_word
|
||||
crazy_functions/test_samples
|
||||
request_llm/jittorllms
|
||||
multi-language
|
||||
multi-language
|
||||
request_llm/moss
|
||||
|
@ -267,6 +267,12 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
|
||||
</div>
|
||||
|
||||
9. OpenAI音频解析与总结
|
||||
<div align="center">
|
||||
<img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
## 版本:
|
||||
- version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级)
|
||||
|
@ -44,9 +44,10 @@ WEB_PORT = -1
|
||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
||||
MAX_RETRY = 2
|
||||
|
||||
# OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
|
||||
# 模型选择是
|
||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing"]
|
||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
||||
# P.S. 其他可用的模型还包括 ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||
|
||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||
@ -75,3 +76,7 @@ NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
||||
NEWBING_COOKIES = """
|
||||
your bing cookies here
|
||||
"""
|
||||
|
||||
# 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md
|
||||
SLACK_CLAUDE_BOT_ID = ''
|
||||
SLACK_CLAUDE_USER_TOKEN = ''
|
||||
|
@ -68,4 +68,11 @@ def get_core_functions():
|
||||
"Prefix": r"请解释以下代码:" + "\n```\n",
|
||||
"Suffix": "\n```\n",
|
||||
},
|
||||
"参考文献转Bib": {
|
||||
"Prefix": r"Here are some bibliography items, please transform them into bibtex style." +
|
||||
r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
|
||||
r"Items need to be transformed:",
|
||||
"Suffix": r"",
|
||||
"Visible": False,
|
||||
}
|
||||
}
|
||||
|
@ -246,5 +246,15 @@ def get_crazy_functions():
|
||||
"Function": HotReload(图片生成)
|
||||
},
|
||||
})
|
||||
from crazy_functions.总结音视频 import 总结音视频
|
||||
function_plugins.update({
|
||||
"批量总结音视频(输入路径或上传压缩包)": {
|
||||
"Color": "stop",
|
||||
"AsButton": False,
|
||||
"AdvancedArgs": True,
|
||||
"ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
|
||||
"Function": HotReload(总结音视频)
|
||||
}
|
||||
})
|
||||
###################### 第n组插件 ###########################
|
||||
return function_plugins
|
||||
|
@ -55,6 +55,7 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
||||
history = [] # 清空历史,以免输入溢出
|
||||
chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-xxxx或者api2d-xxxx。如果中文效果不理想, 尝试Prompt。正在处理中 ....."))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
resolution = plugin_kwargs.get("advanced_arg", '256x256')
|
||||
image_url, image_path = gen_image(llm_kwargs, prompt, resolution)
|
||||
chatbot.append([prompt,
|
||||
|
184
crazy_functions/总结音视频.py
Normal file
184
crazy_functions/总结音视频.py
Normal file
@ -0,0 +1,184 @@
|
||||
from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file, get_conf
|
||||
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
||||
|
||||
def split_audio_file(filename, split_duration=1000):
|
||||
"""
|
||||
根据给定的切割时长将音频文件切割成多个片段。
|
||||
|
||||
Args:
|
||||
filename (str): 需要被切割的音频文件名。
|
||||
split_duration (int, optional): 每个切割音频片段的时长(以秒为单位)。默认值为1000。
|
||||
|
||||
Returns:
|
||||
filelist (list): 一个包含所有切割音频片段文件路径的列表。
|
||||
|
||||
"""
|
||||
from moviepy.editor import AudioFileClip
|
||||
import os
|
||||
os.makedirs('gpt_log/mp3/cut/', exist_ok=True) # 创建存储切割音频的文件夹
|
||||
|
||||
# 读取音频文件
|
||||
audio = AudioFileClip(filename)
|
||||
|
||||
# 计算文件总时长和切割点
|
||||
total_duration = audio.duration
|
||||
split_points = list(range(0, int(total_duration), split_duration))
|
||||
split_points.append(int(total_duration))
|
||||
filelist = []
|
||||
|
||||
# 切割音频文件
|
||||
for i in range(len(split_points) - 1):
|
||||
start_time = split_points[i]
|
||||
end_time = split_points[i + 1]
|
||||
split_audio = audio.subclip(start_time, end_time)
|
||||
split_audio.write_audiofile(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3")
|
||||
filelist.append(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3")
|
||||
|
||||
audio.close()
|
||||
return filelist
|
||||
|
||||
def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
|
||||
import os, requests
|
||||
from moviepy.editor import AudioFileClip
|
||||
from request_llm.bridge_all import model_info
|
||||
|
||||
# 设置OpenAI密钥和模型
|
||||
api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
|
||||
chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||
|
||||
whisper_endpoint = chat_endpoint.replace('chat/completions', 'audio/transcriptions')
|
||||
url = whisper_endpoint
|
||||
headers = {
|
||||
'Authorization': f"Bearer {api_key}"
|
||||
}
|
||||
|
||||
os.makedirs('gpt_log/mp3/', exist_ok=True)
|
||||
for index, fp in enumerate(file_manifest):
|
||||
audio_history = []
|
||||
# 提取文件扩展名
|
||||
ext = os.path.splitext(fp)[1]
|
||||
# 提取视频中的音频
|
||||
if ext not in [".mp3", ".wav", ".m4a", ".mpga"]:
|
||||
audio_clip = AudioFileClip(fp)
|
||||
audio_clip.write_audiofile(f'gpt_log/mp3/output{index}.mp3')
|
||||
fp = f'gpt_log/mp3/output{index}.mp3'
|
||||
# 调用whisper模型音频转文字
|
||||
voice = split_audio_file(fp)
|
||||
for j, i in enumerate(voice):
|
||||
with open(i, 'rb') as f:
|
||||
file_content = f.read() # 读取文件内容到内存
|
||||
files = {
|
||||
'file': (os.path.basename(i), file_content),
|
||||
}
|
||||
data = {
|
||||
"model": "whisper-1",
|
||||
"prompt": parse_prompt,
|
||||
'response_format': "text"
|
||||
}
|
||||
|
||||
chatbot.append([f"将 {i} 发送到openai音频解析终端 (whisper),当前参数:{parse_prompt}", "正在处理 ..."])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
proxies, = get_conf('proxies')
|
||||
response = requests.post(url, headers=headers, files=files, data=data, proxies=proxies).text
|
||||
|
||||
chatbot.append(["音频解析结果", response])
|
||||
history.extend(["音频解析结果", response])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
i_say = f'请对下面的音频片段做概述,音频内容是 ```{response}```'
|
||||
i_say_show_user = f'第{index + 1}段音频的第{j + 1} / {len(voice)}片段。'
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=[],
|
||||
sys_prompt=f"总结音频。音频文件名{fp}"
|
||||
)
|
||||
|
||||
chatbot[-1] = (i_say_show_user, gpt_say)
|
||||
history.extend([i_say_show_user, gpt_say])
|
||||
audio_history.extend([i_say_show_user, gpt_say])
|
||||
|
||||
# 已经对该文章的所有片段总结完毕,如果文章被切分了
|
||||
result = "".join(audio_history)
|
||||
if len(audio_history) > 1:
|
||||
i_say = f"根据以上的对话,使用中文总结音频“{result}”的主要内容。"
|
||||
i_say_show_user = f'第{index + 1}段音频的主要内容:'
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
inputs=i_say,
|
||||
inputs_show_user=i_say_show_user,
|
||||
llm_kwargs=llm_kwargs,
|
||||
chatbot=chatbot,
|
||||
history=audio_history,
|
||||
sys_prompt="总结文章。"
|
||||
)
|
||||
|
||||
history.extend([i_say, gpt_say])
|
||||
audio_history.extend([i_say, gpt_say])
|
||||
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append((f"第{index + 1}段音频完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
# 删除中间文件夹
|
||||
import shutil
|
||||
shutil.rmtree('gpt_log/mp3')
|
||||
res = write_results_to_file(history)
|
||||
chatbot.append(("所有音频都总结完成了吗?", res))
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
|
||||
@CatchException
|
||||
def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, WEB_PORT):
|
||||
import glob, os
|
||||
|
||||
# 基本信息:功能、贡献者
|
||||
chatbot.append([
|
||||
"函数插件功能?",
|
||||
"总结音视频内容,函数插件贡献者: dalvqw & BinaryHusky"])
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
|
||||
try:
|
||||
from moviepy.editor import AudioFileClip
|
||||
except:
|
||||
report_execption(chatbot, history,
|
||||
a=f"解析项目: {txt}",
|
||||
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 清空历史,以免输入溢出
|
||||
history = []
|
||||
|
||||
# 检测输入参数,如没有给定输入参数,直接退出
|
||||
if os.path.exists(txt):
|
||||
project_folder = txt
|
||||
else:
|
||||
if txt == "": txt = '空空如也的输入栏'
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 搜索需要处理的文件清单
|
||||
extensions = ['.mp4', '.m4a', '.wav', '.mpga', '.mpeg', '.mp3', '.avi', '.mkv', '.flac', '.aac']
|
||||
|
||||
if txt.endswith(tuple(extensions)):
|
||||
file_manifest = [txt]
|
||||
else:
|
||||
file_manifest = []
|
||||
for extension in extensions:
|
||||
file_manifest.extend(glob.glob(f'{project_folder}/**/*{extension}', recursive=True))
|
||||
|
||||
# 如果没找到任何文件
|
||||
if len(file_manifest) == 0:
|
||||
report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}")
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||
return
|
||||
|
||||
# 开始正式执行任务
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
parse_prompt = plugin_kwargs.get("advanced_arg", '将音频解析为简体中文')
|
||||
yield from AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history)
|
||||
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
@ -67,6 +67,7 @@ def parseNotebook(filename, enable_markdown=1):
|
||||
def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||
from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
|
||||
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
enable_markdown = plugin_kwargs.get("advanced_arg", "1")
|
||||
try:
|
||||
enable_markdown = int(enable_markdown)
|
||||
|
@ -45,6 +45,7 @@ def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history,
|
||||
chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……"))
|
||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
||||
|
||||
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
||||
# llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
||||
llm_kwargs['llm_model'] = plugin_kwargs.get("advanced_arg", 'chatglm&gpt-3.5-turbo') # 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
|
||||
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
||||
|
2
main.py
2
main.py
@ -74,6 +74,7 @@ def main():
|
||||
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
||||
with gr.Row():
|
||||
for k in functional:
|
||||
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
||||
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
||||
functional[k]["Button"] = gr.Button(k, variant=variant)
|
||||
with gr.Accordion("函数插件区", open=True) as area_crazy_fn:
|
||||
@ -144,6 +145,7 @@ def main():
|
||||
clearBtn2.click(lambda: ("",""), None, [txt, txt2])
|
||||
# 基础功能区的回调函数注册
|
||||
for k in functional:
|
||||
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
||||
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
||||
cancel_handles.append(click_handle)
|
||||
# 文件上传区,接收文件后与chatbot的互动
|
||||
|
@ -13,6 +13,31 @@ LLM_MODEL = "chatglm"
|
||||
`python main.py`
|
||||
```
|
||||
|
||||
## Claude-Stack
|
||||
|
||||
- 请参考此教程获取 https://zhuanlan.zhihu.com/p/627485689
|
||||
- 1、SLACK_CLAUDE_BOT_ID
|
||||
- 2、SLACK_CLAUDE_USER_TOKEN
|
||||
|
||||
- 把token加入config.py
|
||||
|
||||
## Newbing
|
||||
|
||||
- 使用cookie editor获取cookie(json)
|
||||
- 把cookie(json)加入config.py (NEWBING_COOKIES)
|
||||
|
||||
## Moss
|
||||
- 使用docker-compose
|
||||
|
||||
## RWKV
|
||||
- 使用docker-compose
|
||||
|
||||
## LLAMA
|
||||
- 使用docker-compose
|
||||
|
||||
## 盘古
|
||||
- 使用docker-compose
|
||||
|
||||
|
||||
---
|
||||
## Text-Generation-UI (TGUI,调试中,暂不可用)
|
||||
|
@ -130,6 +130,7 @@ model_info = {
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -186,8 +187,20 @@ if "moss" in AVAIL_LLM_MODELS:
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
if "stack-claude" in AVAIL_LLM_MODELS:
|
||||
from .bridge_stackclaude import predict_no_ui_long_connection as claude_noui
|
||||
from .bridge_stackclaude import predict as claude_ui
|
||||
# claude
|
||||
model_info.update({
|
||||
"stack-claude": {
|
||||
"fn_with_ui": claude_ui,
|
||||
"fn_without_ui": claude_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 8192,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
def LLM_CATCH_EXCEPTION(f):
|
||||
|
@ -153,7 +153,7 @@ class NewBingHandle(Process):
|
||||
# 进入任务等待状态
|
||||
asyncio.run(self.async_run())
|
||||
except Exception:
|
||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
|
296
request_llm/bridge_stackclaude.py
Normal file
296
request_llm/bridge_stackclaude.py
Normal file
@ -0,0 +1,296 @@
|
||||
from .bridge_newbing import preprocess_newbing_out, preprocess_newbing_out_simple
|
||||
from multiprocessing import Process, Pipe
|
||||
from toolbox import update_ui, get_conf, trimmed_format_exc
|
||||
import threading
|
||||
import importlib
|
||||
import logging
|
||||
import time
|
||||
from toolbox import get_conf
|
||||
import asyncio
|
||||
load_message = "正在加载Claude组件,请稍候..."
|
||||
|
||||
try:
|
||||
"""
|
||||
========================================================================
|
||||
第一部分:Slack API Client
|
||||
https://github.com/yokonsan/claude-in-slack-api
|
||||
========================================================================
|
||||
"""
|
||||
|
||||
from slack_sdk.errors import SlackApiError
|
||||
from slack_sdk.web.async_client import AsyncWebClient
|
||||
|
||||
class SlackClient(AsyncWebClient):
|
||||
"""SlackClient类用于与Slack API进行交互,实现消息发送、接收等功能。
|
||||
|
||||
属性:
|
||||
- CHANNEL_ID:str类型,表示频道ID。
|
||||
|
||||
方法:
|
||||
- open_channel():异步方法。通过调用conversations_open方法打开一个频道,并将返回的频道ID保存在属性CHANNEL_ID中。
|
||||
- chat(text: str):异步方法。向已打开的频道发送一条文本消息。
|
||||
- get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。
|
||||
- get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。
|
||||
|
||||
"""
|
||||
CHANNEL_ID = None
|
||||
|
||||
async def open_channel(self):
|
||||
response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')[0])
|
||||
self.CHANNEL_ID = response["channel"]["id"]
|
||||
|
||||
async def chat(self, text):
|
||||
if not self.CHANNEL_ID:
|
||||
raise Exception("Channel not found.")
|
||||
|
||||
resp = await self.chat_postMessage(channel=self.CHANNEL_ID, text=text)
|
||||
self.LAST_TS = resp["ts"]
|
||||
|
||||
async def get_slack_messages(self):
|
||||
try:
|
||||
# TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题
|
||||
resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1)
|
||||
msg = [msg for msg in resp["messages"]
|
||||
if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')[0]]
|
||||
return msg
|
||||
except (SlackApiError, KeyError) as e:
|
||||
raise RuntimeError(f"获取Slack消息失败。")
|
||||
|
||||
async def get_reply(self):
|
||||
while True:
|
||||
slack_msgs = await self.get_slack_messages()
|
||||
if len(slack_msgs) == 0:
|
||||
await asyncio.sleep(0.5)
|
||||
continue
|
||||
|
||||
msg = slack_msgs[-1]
|
||||
if msg["text"].endswith("Typing…_"):
|
||||
yield False, msg["text"]
|
||||
else:
|
||||
yield True, msg["text"]
|
||||
break
|
||||
except:
|
||||
pass
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
第二部分:子进程Worker(调用主体)
|
||||
========================================================================
|
||||
"""
|
||||
|
||||
|
||||
class ClaudeHandle(Process):
|
||||
def __init__(self):
|
||||
super().__init__(daemon=True)
|
||||
self.parent, self.child = Pipe()
|
||||
self.claude_model = None
|
||||
self.info = ""
|
||||
self.success = True
|
||||
self.local_history = []
|
||||
self.check_dependency()
|
||||
if self.success:
|
||||
self.start()
|
||||
self.threadLock = threading.Lock()
|
||||
|
||||
def check_dependency(self):
|
||||
try:
|
||||
self.success = False
|
||||
import slack_sdk
|
||||
self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。"
|
||||
self.success = True
|
||||
except:
|
||||
self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_slackclaude.txt`安装Claude的依赖,然后重启程序。"
|
||||
self.success = False
|
||||
|
||||
def ready(self):
|
||||
return self.claude_model is not None
|
||||
|
||||
async def async_run(self):
|
||||
await self.claude_model.open_channel()
|
||||
while True:
|
||||
# 等待
|
||||
kwargs = self.child.recv()
|
||||
question = kwargs['query']
|
||||
history = kwargs['history']
|
||||
# system_prompt=kwargs['system_prompt']
|
||||
|
||||
# 是否重置
|
||||
if len(self.local_history) > 0 and len(history) == 0:
|
||||
# await self.claude_model.reset()
|
||||
self.local_history = []
|
||||
|
||||
# 开始问问题
|
||||
prompt = ""
|
||||
# Slack API最好不要添加系统提示
|
||||
# if system_prompt not in self.local_history:
|
||||
# self.local_history.append(system_prompt)
|
||||
# prompt += system_prompt + '\n'
|
||||
|
||||
# 追加历史
|
||||
for ab in history:
|
||||
a, b = ab
|
||||
if a not in self.local_history:
|
||||
self.local_history.append(a)
|
||||
prompt += a + '\n'
|
||||
# if b not in self.local_history:
|
||||
# self.local_history.append(b)
|
||||
# prompt += b + '\n'
|
||||
|
||||
# 问题
|
||||
prompt += question
|
||||
self.local_history.append(question)
|
||||
print('question:', prompt)
|
||||
# 提交
|
||||
await self.claude_model.chat(prompt)
|
||||
# 获取回复
|
||||
# async for final, response in self.claude_model.get_reply():
|
||||
# await self.handle_claude_response(final, response)
|
||||
async for final, response in self.claude_model.get_reply():
|
||||
if not final:
|
||||
print(response)
|
||||
self.child.send(str(response))
|
||||
else:
|
||||
# 防止丢失最后一条消息
|
||||
slack_msgs = await self.claude_model.get_slack_messages()
|
||||
last_msg = slack_msgs[-1]["text"] if slack_msgs and len(slack_msgs) > 0 else ""
|
||||
if last_msg:
|
||||
self.child.send(last_msg)
|
||||
print('-------- receive final ---------')
|
||||
self.child.send('[Finish]')
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
这个函数运行在子进程
|
||||
"""
|
||||
# 第一次运行,加载参数
|
||||
self.success = False
|
||||
self.local_history = []
|
||||
if (self.claude_model is None) or (not self.success):
|
||||
# 代理设置
|
||||
proxies, = get_conf('proxies')
|
||||
if proxies is None:
|
||||
self.proxies_https = None
|
||||
else:
|
||||
self.proxies_https = proxies['https']
|
||||
|
||||
try:
|
||||
SLACK_CLAUDE_USER_TOKEN, = get_conf('SLACK_CLAUDE_USER_TOKEN')
|
||||
self.claude_model = SlackClient(token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https)
|
||||
print('Claude组件初始化成功。')
|
||||
except:
|
||||
self.success = False
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] 不能加载Claude组件。{tb_str}')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
raise RuntimeError(f"不能加载Claude组件。")
|
||||
|
||||
self.success = True
|
||||
try:
|
||||
# 进入任务等待状态
|
||||
asyncio.run(self.async_run())
|
||||
except Exception:
|
||||
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||
self.child.send(f'[Local Message] Claude失败 {tb_str}.')
|
||||
self.child.send('[Fail]')
|
||||
self.child.send('[Finish]')
|
||||
|
||||
def stream_chat(self, **kwargs):
|
||||
"""
|
||||
这个函数运行在主进程
|
||||
"""
|
||||
self.threadLock.acquire()
|
||||
self.parent.send(kwargs) # 发送请求到子进程
|
||||
while True:
|
||||
res = self.parent.recv() # 等待Claude回复的片段
|
||||
if res == '[Finish]':
|
||||
break # 结束
|
||||
elif res == '[Fail]':
|
||||
self.success = False
|
||||
break
|
||||
else:
|
||||
yield res # Claude回复的片段
|
||||
self.threadLock.release()
|
||||
|
||||
|
||||
"""
|
||||
========================================================================
|
||||
第三部分:主进程统一调用函数接口
|
||||
========================================================================
|
||||
"""
|
||||
global claude_handle
|
||||
claude_handle = None
|
||||
|
||||
|
||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||
"""
|
||||
多线程方法
|
||||
函数的说明请见 request_llm/bridge_all.py
|
||||
"""
|
||||
global claude_handle
|
||||
if (claude_handle is None) or (not claude_handle.success):
|
||||
claude_handle = ClaudeHandle()
|
||||
observe_window[0] = load_message + "\n\n" + claude_handle.info
|
||||
if not claude_handle.success:
|
||||
error = claude_handle.info
|
||||
claude_handle = None
|
||||
raise RuntimeError(error)
|
||||
|
||||
# 没有 sys_prompt 接口,因此把prompt加入 history
|
||||
history_feedin = []
|
||||
for i in range(len(history)//2):
|
||||
history_feedin.append([history[2*i], history[2*i+1]])
|
||||
|
||||
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
||||
response = ""
|
||||
observe_window[0] = "[Local Message]: 等待Claude响应中 ..."
|
||||
for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
||||
observe_window[0] = preprocess_newbing_out_simple(response)
|
||||
if len(observe_window) >= 2:
|
||||
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||
raise RuntimeError("程序终止。")
|
||||
return preprocess_newbing_out_simple(response)
|
||||
|
||||
|
||||
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
|
||||
"""
|
||||
单线程方法
|
||||
函数的说明请见 request_llm/bridge_all.py
|
||||
"""
|
||||
chatbot.append((inputs, "[Local Message]: 等待Claude响应中 ..."))
|
||||
|
||||
global claude_handle
|
||||
if (claude_handle is None) or (not claude_handle.success):
|
||||
claude_handle = ClaudeHandle()
|
||||
chatbot[-1] = (inputs, load_message + "\n\n" + claude_handle.info)
|
||||
yield from update_ui(chatbot=chatbot, history=[])
|
||||
if not claude_handle.success:
|
||||
claude_handle = None
|
||||
return
|
||||
|
||||
if additional_fn is not None:
|
||||
import core_functional
|
||||
importlib.reload(core_functional) # 热更新prompt
|
||||
core_functional = core_functional.get_core_functions()
|
||||
if "PreProcess" in core_functional[additional_fn]:
|
||||
inputs = core_functional[additional_fn]["PreProcess"](
|
||||
inputs) # 获取预处理函数(如果有的话)
|
||||
inputs = core_functional[additional_fn]["Prefix"] + \
|
||||
inputs + core_functional[additional_fn]["Suffix"]
|
||||
|
||||
history_feedin = []
|
||||
for i in range(len(history)//2):
|
||||
history_feedin.append([history[2*i], history[2*i+1]])
|
||||
|
||||
chatbot[-1] = (inputs, "[Local Message]: 等待Claude响应中 ...")
|
||||
response = "[Local Message]: 等待Claude响应中 ..."
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
||||
for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt):
|
||||
chatbot[-1] = (inputs, preprocess_newbing_out(response))
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
|
||||
if response == "[Local Message]: 等待Claude响应中 ...":
|
||||
response = "[Local Message]: Claude响应异常,请刷新界面重试 ..."
|
||||
history.extend([inputs, response])
|
||||
logging.info(f'[raw_input] {inputs}')
|
||||
logging.info(f'[response] {response}')
|
||||
yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
|
1
request_llm/requirements_slackclaude.txt
Normal file
1
request_llm/requirements_slackclaude.txt
Normal file
@ -0,0 +1 @@
|
||||
slack-sdk==3.21.3
|
4
version
4
version
@ -1,5 +1,5 @@
|
||||
{
|
||||
"version": 3.34,
|
||||
"version": 3.35,
|
||||
"show_feature": true,
|
||||
"new_feature": "修复新版gradio(3.28.3)的暗色主题适配 <-> 提供复旦MOSS模型适配(启用需额外依赖) <-> 提供docker-compose方案兼容LLAMA盘古RWKV等模型的后端 <-> 新增Live2D WAIFU装饰 <-> 完善对话历史的保存/载入/删除 <-> ChatGLM加线程锁提高并发稳定性 <-> 支持NewBing <-> Markdown翻译功能支持直接输入Readme文件网址 <-> 保存对话功能 <-> 解读任意语言代码+同时询问任意的LLM组合 <-> 添加联网(Google)回答问题插件"
|
||||
"new_feature": "添加了OpenAI图片生成插件 <-> 添加了OpenAI音频转文本总结插件 <-> 通过Slack添加对Claude的支持 <-> 提供复旦MOSS模型适配(启用需额外依赖) <-> 提供docker-compose方案兼容LLAMA盘古RWKV等模型的后端 <-> 新增Live2D装饰 <-> 完善对话历史的保存/载入/删除 <-> 保存对话功能"
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user