From 30de8f1358e9b8117f9afacfc6739d64ed6f8ae6 Mon Sep 17 00:00:00 2001 From: CSUMaVeRick <603312917@qq.com> Date: Thu, 4 May 2023 00:52:12 +0800 Subject: [PATCH 01/19] Add or update the Azure App Service build and deployment workflow config --- .github/workflows/master_gptacademic.yml | 63 ++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 .github/workflows/master_gptacademic.yml diff --git a/.github/workflows/master_gptacademic.yml b/.github/workflows/master_gptacademic.yml new file mode 100644 index 0000000..e4189c8 --- /dev/null +++ b/.github/workflows/master_gptacademic.yml @@ -0,0 +1,63 @@ +# Docs for the Azure Web Apps Deploy action: https://github.com/Azure/webapps-deploy +# More GitHub Actions for Azure: https://github.com/Azure/actions +# More info on Python, GitHub Actions, and Azure App Service: https://aka.ms/python-webapps-actions + +name: Build and deploy Python app to Azure Web App - GPTacademic + +on: + push: + branches: + - master + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Set up Python version + uses: actions/setup-python@v1 + with: + python-version: '3.9' + + - name: Create and start virtual environment + run: | + python -m venv venv + source venv/bin/activate + + - name: Install dependencies + run: pip install -r requirements.txt + + # Optional: Add step to run tests here (PyTest, Django test suites, etc.) + + - name: Upload artifact for deployment jobs + uses: actions/upload-artifact@v2 + with: + name: python-app + path: | + . + !venv/ + + deploy: + runs-on: ubuntu-latest + needs: build + environment: + name: 'Production' + url: ${{ steps.deploy-to-webapp.outputs.webapp-url }} + + steps: + - name: Download artifact from build job + uses: actions/download-artifact@v2 + with: + name: python-app + path: . + + - name: 'Deploy to Azure Web App' + uses: azure/webapps-deploy@v2 + id: deploy-to-webapp + with: + app-name: 'GPTacademic' + slot-name: 'Production' + publish-profile: ${{ secrets.AZUREAPPSERVICE_PUBLISHPROFILE_8917F3C29B9D4A63975B1945E8C5833E }} From 57297605e2d1201570e8cb34007a518f2bd6d613 Mon Sep 17 00:00:00 2001 From: CSUMaVeRick <603312917@qq.com> Date: Thu, 11 May 2023 13:42:51 +0800 Subject: [PATCH 02/19] Update core_functional.py --- core_functional.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/core_functional.py b/core_functional.py index 536ccb6..a71140f 100644 --- a/core_functional.py +++ b/core_functional.py @@ -68,4 +68,10 @@ def get_core_functions(): "Prefix": r"请解释以下代码:" + "\n```\n", "Suffix": "\n```\n", }, + "参考文献转Bib": { + "Prefix": r"Here are some bibliography items, please transform them into bibtex style." + + r"Note that, reference styles maybe more than one kind, you should transform each item correctly." + + r"Items need to be transformed:", + "Suffix": r"", + } } From dcd5f7996e94644a2b77b6867b04b3b1b9cd00e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=98dalvqw=E2=80=99?= <‘1297762043@qq.com’> Date: Sun, 14 May 2023 12:51:33 +0800 Subject: [PATCH 03/19] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E6=89=B9=E9=87=8F?= =?UTF-8?q?=E6=80=BB=E7=BB=93=E9=9F=B3=E8=A7=86=E9=A2=91=E7=9A=84=E5=8A=9F?= =?UTF-8?q?=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 10 +++ crazy_functions/crazy_utils.py | 37 +++++++++ crazy_functions/总结音视频.py | 138 +++++++++++++++++++++++++++++++++ 3 files changed, 185 insertions(+) create mode 100644 crazy_functions/总结音视频.py diff --git a/crazy_functional.py b/crazy_functional.py index 3e7b12f..f6b7253 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -246,5 +246,15 @@ def get_crazy_functions(): "Function": HotReload(图片生成) }, }) + from crazy_functions.总结音视频 import 总结音视频 + function_plugins.update({ + "批量总结音视频(输入路径或上传压缩包)": { + "Color": "stop", + "AsButton": False, + "AdvancedArgs": True, + "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3, 此处无需输入参数", + "Function": HotReload(总结音视频) + } + }) ###################### 第n组插件 ########################### return function_plugins diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index e54136c..3570ca9 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -606,3 +606,40 @@ def get_files_from_everything(txt, type): # type='.md' success = False return success, file_manifest, project_folder + + +def split_audio_file(filename, split_duration=1000): + """ + 根据给定的切割时长将音频文件切割成多个片段。 + + Args: + filename (str): 需要被切割的音频文件名。 + split_duration (int, optional): 每个切割音频片段的时长(以秒为单位)。默认值为1000。 + + Returns: + filelist (list): 一个包含所有切割音频片段文件路径的列表。 + + """ + from moviepy.editor import AudioFileClip + import os + os.makedirs('gpt_log/mp3/cut/', exist_ok=True) # 创建存储切割音频的文件夹 + + # 读取音频文件 + audio = AudioFileClip(filename) + + # 计算文件总时长和切割点 + total_duration = audio.duration + split_points = list(range(0, int(total_duration), split_duration)) + split_points.append(int(total_duration)) + filelist = [] + + # 切割音频文件 + for i in range(len(split_points) - 1): + start_time = split_points[i] + end_time = split_points[i + 1] + split_audio = audio.subclip(start_time, end_time) + split_audio.write_audiofile(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3") + filelist.append(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3") + + audio.close() + return filelist \ No newline at end of file diff --git a/crazy_functions/总结音视频.py b/crazy_functions/总结音视频.py new file mode 100644 index 0000000..e391061 --- /dev/null +++ b/crazy_functions/总结音视频.py @@ -0,0 +1,138 @@ +from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file +from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, split_audio_file + + +def AnalyAudio(file_manifest, llm_kwargs, chatbot, history): + import os, requests + from moviepy.editor import AudioFileClip + from request_llm.bridge_all import model_info + + # 设置OpenAI密钥和模型 + api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model']) + chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint'] + + whisper_endpoint = chat_endpoint.replace('chat/completions', 'audio/transcriptions') + url = whisper_endpoint + headers = { + 'Authorization': f"Bearer {api_key}" + } + + os.makedirs('gpt_log/mp3/', exist_ok=True) + for index, fp in enumerate(file_manifest): + audio_history = [] + # 提取文件扩展名 + ext = os.path.splitext(fp)[1] + # 提取视频中的音频 + if ext not in [".mp3", ".wav", ".m4a", ".mpga"]: + audio_clip = AudioFileClip(fp) + audio_clip.write_audiofile(f'gpt_log/mp3/output{index}.mp3') + fp = f'gpt_log/mp3/output{index}.mp3' + # 调用whisper模型音频转文字 + voice = split_audio_file(fp) + for j, i in enumerate(voice): + with open(i, 'rb') as f: + file_content = f.read() # 读取文件内容到内存 + files = { + 'file': (os.path.basename(i), file_content), + } + data = { + "model": "whisper-1", + 'response_format': "text" + } + response = requests.post(url, headers=headers, files=files, data=data).text + + i_say = f'请对下面的文章片段做概述,文章内容是 ```{response}```' + i_say_show_user = f'第{index + 1}段音频的第{j + 1} / {len(voice)}片段。' + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, + inputs_show_user=i_say_show_user, + llm_kwargs=llm_kwargs, + chatbot=chatbot, + history=[], + sys_prompt="总结文章。" + ) + + chatbot[-1] = (i_say_show_user, gpt_say) + history.extend([i_say_show_user, gpt_say]) + audio_history.extend([i_say_show_user, gpt_say]) + + # 已经对该文章的所有片段总结完毕,如果文章被切分了, + result = "".join(audio_history) + if len(audio_history) > 1: + i_say = f"根据以上的对话,使用中文总结文章{result}的主要内容。" + i_say_show_user = f'第{index + 1}段音频的主要内容:' + gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( + inputs=i_say, + inputs_show_user=i_say_show_user, + llm_kwargs=llm_kwargs, + chatbot=chatbot, + history=audio_history, + sys_prompt="总结文章。" + ) + + history.extend([i_say, gpt_say]) + audio_history.extend([i_say, gpt_say]) + + res = write_results_to_file(history) + chatbot.append((f"第{index + 1}段音频完成了吗?", res)) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # 删除中间文件夹 + import shutil + shutil.rmtree('gpt_log/mp3') + res = write_results_to_file(history) + chatbot.append(("所有音频都总结完成了吗?", res)) + yield from update_ui(chatbot=chatbot, history=history) + + +@CatchException +def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, WEB_PORT): + import glob, os + + # 基本信息:功能、贡献者 + chatbot.append([ + "函数插件功能?", + "总结音视频内容,函数插件贡献者: dalvqw"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + try: + from moviepy.editor import AudioFileClip + except: + report_execption(chatbot, history, + a=f"解析项目: {txt}", + b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # 清空历史,以免输入溢出 + history = [] + + # 检测输入参数,如没有给定输入参数,直接退出 + if os.path.exists(txt): + project_folder = txt + else: + if txt == "": txt = '空空如也的输入栏' + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # 搜索需要处理的文件清单 + extensions = ['.mp4', '.m4a', '.wav', '.mpga', '.mpeg', '.mp3', '.avi', '.mkv', '.flac', '.aac'] + + if txt.endswith(tuple(extensions)): + file_manifest = [txt] + else: + file_manifest = [] + for extension in extensions: + file_manifest.extend(glob.glob(f'{project_folder}/**/*{extension}', recursive=True)) + + # 如果没找到任何文件 + if len(file_manifest) == 0: + report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}") + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + # 开始正式执行任务 + yield from AnalyAudio(file_manifest, llm_kwargs, chatbot, history) + + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 From 2291a67cf8779000f9532a991a8b5e73cf4b274f Mon Sep 17 00:00:00 2001 From: Rid7 Date: Mon, 15 May 2023 14:27:31 +0800 Subject: [PATCH 04/19] =?UTF-8?q?=E5=AE=9E=E7=8E=B0Claude=E8=81=8A?= =?UTF-8?q?=E5=A4=A9=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llm/bridge_claude.py | 296 ++++++++++++++++++++++++++++ request_llm/requirements_claude.txt | 1 + 2 files changed, 297 insertions(+) create mode 100644 request_llm/bridge_claude.py create mode 100644 request_llm/requirements_claude.txt diff --git a/request_llm/bridge_claude.py b/request_llm/bridge_claude.py new file mode 100644 index 0000000..f2511b0 --- /dev/null +++ b/request_llm/bridge_claude.py @@ -0,0 +1,296 @@ +from .bridge_newbing import preprocess_newbing_out, preprocess_newbing_out_simple +from multiprocessing import Process, Pipe +from toolbox import update_ui, get_conf, trimmed_format_exc +import threading +import importlib +import logging +import time +from toolbox import get_conf +from slack_sdk.errors import SlackApiError +from slack_sdk.web.async_client import AsyncWebClient +import asyncio +import sys +sys.path.append('..') + + +""" +======================================================================== +第一部分:Slack API Client +https://github.com/yokonsan/claude-in-slack-api +======================================================================== +""" +load_message = "正在加载Claude组件,请稍候..." + + +class SlackClient(AsyncWebClient): + """SlackClient类用于与Slack API进行交互,实现消息发送、接收等功能。 + + 属性: + - CHANNEL_ID:str类型,表示频道ID。 + + 方法: + - open_channel():异步方法。通过调用conversations_open方法打开一个频道,并将返回的频道ID保存在属性CHANNEL_ID中。 + - chat(text: str):异步方法。向已打开的频道发送一条文本消息。 + - get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。 + - get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。 + + """ + CHANNEL_ID = None + + async def open_channel(self): + response = await self.conversations_open(users=get_conf('CLAUDE_BOT_ID')[0]) + self.CHANNEL_ID = response["channel"]["id"] + + async def chat(self, text): + if not self.CHANNEL_ID: + raise Exception("Channel not found.") + + resp = await self.chat_postMessage(channel=self.CHANNEL_ID, text=text) + self.LAST_TS = resp["ts"] + + async def get_slack_messages(self): + try: + # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题 + resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1) + msg = [msg for msg in resp["messages"] + if msg.get("user") == get_conf('CLAUDE_BOT_ID')[0]] + return msg + except (SlackApiError, KeyError) as e: + raise RuntimeError(f"获取Slack消息失败。") + + async def get_reply(self): + while True: + slack_msgs = await self.get_slack_messages() + if len(slack_msgs) == 0: + await asyncio.sleep(0.5) + continue + + msg = slack_msgs[-1] + if msg["text"].endswith("Typing…_"): + yield False, msg["text"] + else: + yield True, msg["text"] + break + + +""" +======================================================================== +第二部分:子进程Worker(调用主体) +======================================================================== +""" + + +class ClaudeHandle(Process): + def __init__(self): + super().__init__(daemon=True) + self.parent, self.child = Pipe() + self.claude_model = None + self.info = "" + self.success = True + self.local_history = [] + self.check_dependency() + self.start() + self.threadLock = threading.Lock() + + def check_dependency(self): + try: + self.success = False + import slack_sdk + self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。" + self.success = True + except: + self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_claude.txt`安装Claude的依赖。" + self.success = False + + def ready(self): + return self.claude_model is not None + + async def async_run(self): + await self.claude_model.open_channel() + while True: + # 等待 + kwargs = self.child.recv() + question = kwargs['query'] + history = kwargs['history'] + # system_prompt=kwargs['system_prompt'] + + # 是否重置 + if len(self.local_history) > 0 and len(history) == 0: + await self.claude_model.reset() + self.local_history = [] + + # 开始问问题 + prompt = "" + # Slack API最好不要添加系统提示 + # if system_prompt not in self.local_history: + # self.local_history.append(system_prompt) + # prompt += system_prompt + '\n' + + # 追加历史 + for ab in history: + a, b = ab + if a not in self.local_history: + self.local_history.append(a) + prompt += a + '\n' + # if b not in self.local_history: + # self.local_history.append(b) + # prompt += b + '\n' + + # 问题 + prompt += question + self.local_history.append(question) + print('question:', prompt) + # 提交 + await self.claude_model.chat(prompt) + # 获取回复 + # async for final, response in self.claude_model.get_reply(): + # await self.handle_claude_response(final, response) + async for final, response in self.claude_model.get_reply(): + if not final: + print(response) + self.child.send(str(response)) + else: + # 防止丢失最后一条消息 + slack_msgs = await self.claude_model.get_slack_messages() + last_msg = slack_msgs[-1]["text"] if slack_msgs and len(slack_msgs) > 0 else "" + if last_msg: + self.child.send(last_msg) + print('-------- receive final ---------') + self.child.send('[Finish]') + + def run(self): + """ + 这个函数运行在子进程 + """ + # 第一次运行,加载参数 + self.success = False + self.local_history = [] + if (self.claude_model is None) or (not self.success): + # 代理设置 + proxies, = get_conf('proxies') + if proxies is None: + self.proxies_https = None + else: + self.proxies_https = proxies['https'] + + try: + SLACK_USER_TOKEN, = get_conf('SLACK_USER_TOKEN') + self.claude_model = SlackClient(token=SLACK_USER_TOKEN, proxy=self.proxies_https) + print('Claude组件初始化成功。') + except: + self.success = False + tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n' + self.child.send(f'[Local Message] 不能加载Claude组件。{tb_str}') + self.child.send('[Fail]') + self.child.send('[Finish]') + raise RuntimeError(f"不能加载Claude组件。") + + self.success = True + try: + # 进入任务等待状态 + asyncio.run(self.async_run()) + except Exception: + tb_str = '```\n' + trimmed_format_exc() + '```' + self.child.send(f'[Local Message] Claude失败 {tb_str}.') + self.child.send('[Fail]') + self.child.send('[Finish]') + + def stream_chat(self, **kwargs): + """ + 这个函数运行在主进程 + """ + self.threadLock.acquire() + self.parent.send(kwargs) # 发送请求到子进程 + while True: + res = self.parent.recv() # 等待Claude回复的片段 + if res == '[Finish]': + break # 结束 + elif res == '[Fail]': + self.success = False + break + else: + yield res # Claude回复的片段 + self.threadLock.release() + + +""" +======================================================================== +第三部分:主进程统一调用函数接口 +======================================================================== +""" +global claude_handle +claude_handle = None + + +def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): + """ + 多线程方法 + 函数的说明请见 request_llm/bridge_all.py + """ + global claude_handle + if (claude_handle is None) or (not claude_handle.success): + claude_handle = ClaudeHandle() + observe_window[0] = load_message + "\n\n" + claude_handle.info + if not claude_handle.success: + error = claude_handle.info + claude_handle = None + raise RuntimeError(error) + + # 没有 sys_prompt 接口,因此把prompt加入 history + history_feedin = [] + for i in range(len(history)//2): + history_feedin.append([history[2*i], history[2*i+1]]) + + watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 + response = "" + observe_window[0] = "[Local Message]: 等待Claude响应中 ..." + for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): + observe_window[0] = preprocess_newbing_out_simple(response) + if len(observe_window) >= 2: + if (time.time()-observe_window[1]) > watch_dog_patience: + raise RuntimeError("程序终止。") + return preprocess_newbing_out_simple(response) + + +def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None): + """ + 单线程方法 + 函数的说明请见 request_llm/bridge_all.py + """ + chatbot.append((inputs, "[Local Message]: 等待Claude响应中 ...")) + + global claude_handle + if (claude_handle is None) or (not claude_handle.success): + claude_handle = ClaudeHandle() + chatbot[-1] = (inputs, load_message + "\n\n" + claude_handle.info) + yield from update_ui(chatbot=chatbot, history=[]) + if not claude_handle.success: + claude_handle = None + return + + if additional_fn is not None: + import core_functional + importlib.reload(core_functional) # 热更新prompt + core_functional = core_functional.get_core_functions() + if "PreProcess" in core_functional[additional_fn]: + inputs = core_functional[additional_fn]["PreProcess"]( + inputs) # 获取预处理函数(如果有的话) + inputs = core_functional[additional_fn]["Prefix"] + \ + inputs + core_functional[additional_fn]["Suffix"] + + history_feedin = [] + for i in range(len(history)//2): + history_feedin.append([history[2*i], history[2*i+1]]) + + chatbot[-1] = (inputs, "[Local Message]: 等待Claude响应中 ...") + response = "[Local Message]: 等待Claude响应中 ..." + yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") + for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt): + chatbot[-1] = (inputs, preprocess_newbing_out(response)) + yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") + if response == "[Local Message]: 等待Claude响应中 ...": + response = "[Local Message]: Claude响应异常,请刷新界面重试 ..." + history.extend([inputs, response]) + logging.info(f'[raw_input] {inputs}') + logging.info(f'[response] {response}') + yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。") diff --git a/request_llm/requirements_claude.txt b/request_llm/requirements_claude.txt new file mode 100644 index 0000000..472d58c --- /dev/null +++ b/request_llm/requirements_claude.txt @@ -0,0 +1 @@ +slack-sdk==3.21.3 \ No newline at end of file From 595e5cceae6d8e079393f9ee74b5f9e133b32090 Mon Sep 17 00:00:00 2001 From: Rid7 Date: Mon, 15 May 2023 14:27:31 +0800 Subject: [PATCH 05/19] =?UTF-8?q?=E5=AE=9E=E7=8E=B0Claude=E8=81=8A?= =?UTF-8?q?=E5=A4=A9=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llm/bridge_claude.py | 296 ++++++++++++++++++++++++++++ request_llm/requirements_claude.txt | 1 + 2 files changed, 297 insertions(+) create mode 100644 request_llm/bridge_claude.py create mode 100644 request_llm/requirements_claude.txt diff --git a/request_llm/bridge_claude.py b/request_llm/bridge_claude.py new file mode 100644 index 0000000..f2511b0 --- /dev/null +++ b/request_llm/bridge_claude.py @@ -0,0 +1,296 @@ +from .bridge_newbing import preprocess_newbing_out, preprocess_newbing_out_simple +from multiprocessing import Process, Pipe +from toolbox import update_ui, get_conf, trimmed_format_exc +import threading +import importlib +import logging +import time +from toolbox import get_conf +from slack_sdk.errors import SlackApiError +from slack_sdk.web.async_client import AsyncWebClient +import asyncio +import sys +sys.path.append('..') + + +""" +======================================================================== +第一部分:Slack API Client +https://github.com/yokonsan/claude-in-slack-api +======================================================================== +""" +load_message = "正在加载Claude组件,请稍候..." + + +class SlackClient(AsyncWebClient): + """SlackClient类用于与Slack API进行交互,实现消息发送、接收等功能。 + + 属性: + - CHANNEL_ID:str类型,表示频道ID。 + + 方法: + - open_channel():异步方法。通过调用conversations_open方法打开一个频道,并将返回的频道ID保存在属性CHANNEL_ID中。 + - chat(text: str):异步方法。向已打开的频道发送一条文本消息。 + - get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。 + - get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。 + + """ + CHANNEL_ID = None + + async def open_channel(self): + response = await self.conversations_open(users=get_conf('CLAUDE_BOT_ID')[0]) + self.CHANNEL_ID = response["channel"]["id"] + + async def chat(self, text): + if not self.CHANNEL_ID: + raise Exception("Channel not found.") + + resp = await self.chat_postMessage(channel=self.CHANNEL_ID, text=text) + self.LAST_TS = resp["ts"] + + async def get_slack_messages(self): + try: + # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题 + resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1) + msg = [msg for msg in resp["messages"] + if msg.get("user") == get_conf('CLAUDE_BOT_ID')[0]] + return msg + except (SlackApiError, KeyError) as e: + raise RuntimeError(f"获取Slack消息失败。") + + async def get_reply(self): + while True: + slack_msgs = await self.get_slack_messages() + if len(slack_msgs) == 0: + await asyncio.sleep(0.5) + continue + + msg = slack_msgs[-1] + if msg["text"].endswith("Typing…_"): + yield False, msg["text"] + else: + yield True, msg["text"] + break + + +""" +======================================================================== +第二部分:子进程Worker(调用主体) +======================================================================== +""" + + +class ClaudeHandle(Process): + def __init__(self): + super().__init__(daemon=True) + self.parent, self.child = Pipe() + self.claude_model = None + self.info = "" + self.success = True + self.local_history = [] + self.check_dependency() + self.start() + self.threadLock = threading.Lock() + + def check_dependency(self): + try: + self.success = False + import slack_sdk + self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。" + self.success = True + except: + self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_claude.txt`安装Claude的依赖。" + self.success = False + + def ready(self): + return self.claude_model is not None + + async def async_run(self): + await self.claude_model.open_channel() + while True: + # 等待 + kwargs = self.child.recv() + question = kwargs['query'] + history = kwargs['history'] + # system_prompt=kwargs['system_prompt'] + + # 是否重置 + if len(self.local_history) > 0 and len(history) == 0: + await self.claude_model.reset() + self.local_history = [] + + # 开始问问题 + prompt = "" + # Slack API最好不要添加系统提示 + # if system_prompt not in self.local_history: + # self.local_history.append(system_prompt) + # prompt += system_prompt + '\n' + + # 追加历史 + for ab in history: + a, b = ab + if a not in self.local_history: + self.local_history.append(a) + prompt += a + '\n' + # if b not in self.local_history: + # self.local_history.append(b) + # prompt += b + '\n' + + # 问题 + prompt += question + self.local_history.append(question) + print('question:', prompt) + # 提交 + await self.claude_model.chat(prompt) + # 获取回复 + # async for final, response in self.claude_model.get_reply(): + # await self.handle_claude_response(final, response) + async for final, response in self.claude_model.get_reply(): + if not final: + print(response) + self.child.send(str(response)) + else: + # 防止丢失最后一条消息 + slack_msgs = await self.claude_model.get_slack_messages() + last_msg = slack_msgs[-1]["text"] if slack_msgs and len(slack_msgs) > 0 else "" + if last_msg: + self.child.send(last_msg) + print('-------- receive final ---------') + self.child.send('[Finish]') + + def run(self): + """ + 这个函数运行在子进程 + """ + # 第一次运行,加载参数 + self.success = False + self.local_history = [] + if (self.claude_model is None) or (not self.success): + # 代理设置 + proxies, = get_conf('proxies') + if proxies is None: + self.proxies_https = None + else: + self.proxies_https = proxies['https'] + + try: + SLACK_USER_TOKEN, = get_conf('SLACK_USER_TOKEN') + self.claude_model = SlackClient(token=SLACK_USER_TOKEN, proxy=self.proxies_https) + print('Claude组件初始化成功。') + except: + self.success = False + tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n' + self.child.send(f'[Local Message] 不能加载Claude组件。{tb_str}') + self.child.send('[Fail]') + self.child.send('[Finish]') + raise RuntimeError(f"不能加载Claude组件。") + + self.success = True + try: + # 进入任务等待状态 + asyncio.run(self.async_run()) + except Exception: + tb_str = '```\n' + trimmed_format_exc() + '```' + self.child.send(f'[Local Message] Claude失败 {tb_str}.') + self.child.send('[Fail]') + self.child.send('[Finish]') + + def stream_chat(self, **kwargs): + """ + 这个函数运行在主进程 + """ + self.threadLock.acquire() + self.parent.send(kwargs) # 发送请求到子进程 + while True: + res = self.parent.recv() # 等待Claude回复的片段 + if res == '[Finish]': + break # 结束 + elif res == '[Fail]': + self.success = False + break + else: + yield res # Claude回复的片段 + self.threadLock.release() + + +""" +======================================================================== +第三部分:主进程统一调用函数接口 +======================================================================== +""" +global claude_handle +claude_handle = None + + +def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False): + """ + 多线程方法 + 函数的说明请见 request_llm/bridge_all.py + """ + global claude_handle + if (claude_handle is None) or (not claude_handle.success): + claude_handle = ClaudeHandle() + observe_window[0] = load_message + "\n\n" + claude_handle.info + if not claude_handle.success: + error = claude_handle.info + claude_handle = None + raise RuntimeError(error) + + # 没有 sys_prompt 接口,因此把prompt加入 history + history_feedin = [] + for i in range(len(history)//2): + history_feedin.append([history[2*i], history[2*i+1]]) + + watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 + response = "" + observe_window[0] = "[Local Message]: 等待Claude响应中 ..." + for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): + observe_window[0] = preprocess_newbing_out_simple(response) + if len(observe_window) >= 2: + if (time.time()-observe_window[1]) > watch_dog_patience: + raise RuntimeError("程序终止。") + return preprocess_newbing_out_simple(response) + + +def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None): + """ + 单线程方法 + 函数的说明请见 request_llm/bridge_all.py + """ + chatbot.append((inputs, "[Local Message]: 等待Claude响应中 ...")) + + global claude_handle + if (claude_handle is None) or (not claude_handle.success): + claude_handle = ClaudeHandle() + chatbot[-1] = (inputs, load_message + "\n\n" + claude_handle.info) + yield from update_ui(chatbot=chatbot, history=[]) + if not claude_handle.success: + claude_handle = None + return + + if additional_fn is not None: + import core_functional + importlib.reload(core_functional) # 热更新prompt + core_functional = core_functional.get_core_functions() + if "PreProcess" in core_functional[additional_fn]: + inputs = core_functional[additional_fn]["PreProcess"]( + inputs) # 获取预处理函数(如果有的话) + inputs = core_functional[additional_fn]["Prefix"] + \ + inputs + core_functional[additional_fn]["Suffix"] + + history_feedin = [] + for i in range(len(history)//2): + history_feedin.append([history[2*i], history[2*i+1]]) + + chatbot[-1] = (inputs, "[Local Message]: 等待Claude响应中 ...") + response = "[Local Message]: 等待Claude响应中 ..." + yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") + for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt): + chatbot[-1] = (inputs, preprocess_newbing_out(response)) + yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。") + if response == "[Local Message]: 等待Claude响应中 ...": + response = "[Local Message]: Claude响应异常,请刷新界面重试 ..." + history.extend([inputs, response]) + logging.info(f'[raw_input] {inputs}') + logging.info(f'[response] {response}') + yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。") diff --git a/request_llm/requirements_claude.txt b/request_llm/requirements_claude.txt new file mode 100644 index 0000000..472d58c --- /dev/null +++ b/request_llm/requirements_claude.txt @@ -0,0 +1 @@ +slack-sdk==3.21.3 \ No newline at end of file From 6d267947bba707706868c1c6aae1fcaa3d222485 Mon Sep 17 00:00:00 2001 From: Rid7 Date: Mon, 15 May 2023 15:12:50 +0800 Subject: [PATCH 06/19] =?UTF-8?q?=E5=AE=9E=E7=8E=B0Claude=E8=81=8A?= =?UTF-8?q?=E5=A4=A9=E5=8A=9F=E8=83=BD=E9=85=8D=E7=BD=AE=E9=A1=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 7 ++++++- request_llm/bridge_all.py | 12 ++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/config.py b/config.py index 2617aff..99b72d9 100644 --- a/config.py +++ b/config.py @@ -46,7 +46,7 @@ MAX_RETRY = 2 # OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d) LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude"] # 本地LLM模型如ChatGLM的执行方式 CPU/GPU LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" @@ -75,3 +75,8 @@ NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"] NEWBING_COOKIES = """ your bing cookies here """ + +# slack-claude bot +# 下面的id怎么填写具体参见https://zhuanlan.zhihu.com/p/627485689 +CLAUDE_BOT_ID = '' +SLACK_USER_TOKEN = '' \ No newline at end of file diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index 9dbcf79..5560592 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -22,6 +22,9 @@ from .bridge_chatglm import predict as chatglm_ui from .bridge_newbing import predict_no_ui_long_connection as newbing_noui from .bridge_newbing import predict as newbing_ui +from .bridge_claude import predict_no_ui_long_connection as claude_noui +from .bridge_claude import predict as claude_ui + # from .bridge_tgui import predict_no_ui_long_connection as tgui_noui # from .bridge_tgui import predict as tgui_ui @@ -130,6 +133,15 @@ model_info = { "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, + # claude + "claude": { + "fn_with_ui": claude_ui, + "fn_without_ui": claude_noui, + "endpoint": None, + "max_token": 4096, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + }, } From d795dc1a81251e13c2bae19a491bcdc507829f4c Mon Sep 17 00:00:00 2001 From: Rid7 Date: Mon, 15 May 2023 15:47:05 +0800 Subject: [PATCH 07/19] =?UTF-8?q?=E5=8F=96=E6=B6=88=E9=87=8D=E7=BD=AE?= =?UTF-8?q?=E6=97=B6=E8=B0=83=E7=94=A8claude=5Fmodel=E7=9A=84reset?= =?UTF-8?q?=E6=96=B9=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llm/bridge_claude.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/request_llm/bridge_claude.py b/request_llm/bridge_claude.py index f2511b0..4e12bc9 100644 --- a/request_llm/bridge_claude.py +++ b/request_llm/bridge_claude.py @@ -116,7 +116,7 @@ class ClaudeHandle(Process): # 是否重置 if len(self.local_history) > 0 and len(history) == 0: - await self.claude_model.reset() + # await self.claude_model.reset() self.local_history = [] # 开始问问题 From c43e22bc4198e358caa12cdf09a06444b85588a5 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 19 May 2023 10:46:12 +0800 Subject: [PATCH 08/19] change claude model name to stack-claude --- config.py | 12 ++++---- request_llm/README.md | 25 ++++++++++++++++ request_llm/bridge_all.py | 29 ++++++++++--------- request_llm/bridge_newbing.py | 2 +- ...bridge_claude.py => bridge_stackclaude.py} | 12 ++++---- 5 files changed, 52 insertions(+), 28 deletions(-) rename request_llm/{bridge_claude.py => bridge_stackclaude.py} (97%) diff --git a/config.py b/config.py index 99b72d9..baaa410 100644 --- a/config.py +++ b/config.py @@ -44,9 +44,10 @@ WEB_PORT = -1 # 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制 MAX_RETRY = 2 -# OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d) +# 模型选择是 LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] +# P.S. 其他可用的模型还包括 ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] # 本地LLM模型如ChatGLM的执行方式 CPU/GPU LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" @@ -76,7 +77,6 @@ NEWBING_COOKIES = """ your bing cookies here """ -# slack-claude bot -# 下面的id怎么填写具体参见https://zhuanlan.zhihu.com/p/627485689 -CLAUDE_BOT_ID = '' -SLACK_USER_TOKEN = '' \ No newline at end of file +# Slack Claude bot, 使用教程详情见 request_llm/README.md +SLACK_CLAUDE_BOT_ID = '' +SLACK_CLAUDE_USER_TOKEN = '' diff --git a/request_llm/README.md b/request_llm/README.md index 4a912d1..545bc1f 100644 --- a/request_llm/README.md +++ b/request_llm/README.md @@ -13,6 +13,31 @@ LLM_MODEL = "chatglm" `python main.py` ``` +## Claude-Stack + +- 请参考此教程获取 https://zhuanlan.zhihu.com/p/627485689 + - 1、SLACK_CLAUDE_BOT_ID + - 2、SLACK_CLAUDE_USER_TOKEN + +- 把token加入config.py + +## Newbing + +- 使用cookie editor获取cookie(json) +- 把cookie(json)加入config.py (NEWBING_COOKIES) + +## Moss +- 使用docker-compose + +## RWKV +- 使用docker-compose + +## LLAMA +- 使用docker-compose + +## 盘古 +- 使用docker-compose + --- ## Text-Generation-UI (TGUI,调试中,暂不可用) diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index 5560592..0c46812 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -22,9 +22,6 @@ from .bridge_chatglm import predict as chatglm_ui from .bridge_newbing import predict_no_ui_long_connection as newbing_noui from .bridge_newbing import predict as newbing_ui -from .bridge_claude import predict_no_ui_long_connection as claude_noui -from .bridge_claude import predict as claude_ui - # from .bridge_tgui import predict_no_ui_long_connection as tgui_noui # from .bridge_tgui import predict as tgui_ui @@ -133,15 +130,7 @@ model_info = { "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, - # claude - "claude": { - "fn_with_ui": claude_ui, - "fn_without_ui": claude_noui, - "endpoint": None, - "max_token": 4096, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, + } @@ -198,8 +187,20 @@ if "moss" in AVAIL_LLM_MODELS: "token_cnt": get_token_num_gpt35, }, }) - - +if "stack-claude" in AVAIL_LLM_MODELS: + from .bridge_stackclaude import predict_no_ui_long_connection as claude_noui + from .bridge_stackclaude import predict as claude_ui + # claude + model_info.update({ + "stack-claude": { + "fn_with_ui": claude_ui, + "fn_without_ui": claude_noui, + "endpoint": None, + "max_token": 8192, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + } + }) def LLM_CATCH_EXCEPTION(f): diff --git a/request_llm/bridge_newbing.py b/request_llm/bridge_newbing.py index dca7485..2136f01 100644 --- a/request_llm/bridge_newbing.py +++ b/request_llm/bridge_newbing.py @@ -153,7 +153,7 @@ class NewBingHandle(Process): # 进入任务等待状态 asyncio.run(self.async_run()) except Exception: - tb_str = '```\n' + trimmed_format_exc() + '```' + tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n' self.child.send(f'[Local Message] Newbing失败 {tb_str}.') self.child.send('[Fail]') self.child.send('[Finish]') diff --git a/request_llm/bridge_claude.py b/request_llm/bridge_stackclaude.py similarity index 97% rename from request_llm/bridge_claude.py rename to request_llm/bridge_stackclaude.py index 4e12bc9..65ea881 100644 --- a/request_llm/bridge_claude.py +++ b/request_llm/bridge_stackclaude.py @@ -9,8 +9,6 @@ from toolbox import get_conf from slack_sdk.errors import SlackApiError from slack_sdk.web.async_client import AsyncWebClient import asyncio -import sys -sys.path.append('..') """ @@ -38,7 +36,7 @@ class SlackClient(AsyncWebClient): CHANNEL_ID = None async def open_channel(self): - response = await self.conversations_open(users=get_conf('CLAUDE_BOT_ID')[0]) + response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')[0]) self.CHANNEL_ID = response["channel"]["id"] async def chat(self, text): @@ -53,7 +51,7 @@ class SlackClient(AsyncWebClient): # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题 resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1) msg = [msg for msg in resp["messages"] - if msg.get("user") == get_conf('CLAUDE_BOT_ID')[0]] + if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')[0]] return msg except (SlackApiError, KeyError) as e: raise RuntimeError(f"获取Slack消息失败。") @@ -174,8 +172,8 @@ class ClaudeHandle(Process): self.proxies_https = proxies['https'] try: - SLACK_USER_TOKEN, = get_conf('SLACK_USER_TOKEN') - self.claude_model = SlackClient(token=SLACK_USER_TOKEN, proxy=self.proxies_https) + SLACK_CLAUDE_USER_TOKEN, = get_conf('SLACK_CLAUDE_USER_TOKEN') + self.claude_model = SlackClient(token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https) print('Claude组件初始化成功。') except: self.success = False @@ -190,7 +188,7 @@ class ClaudeHandle(Process): # 进入任务等待状态 asyncio.run(self.async_run()) except Exception: - tb_str = '```\n' + trimmed_format_exc() + '```' + tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n' self.child.send(f'[Local Message] Claude失败 {tb_str}.') self.child.send('[Fail]') self.child.send('[Finish]') From 77a2d62ef64e0fb4d664916cb7df989136e70107 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 19 May 2023 10:55:50 +0800 Subject: [PATCH 09/19] =?UTF-8?q?=E6=8D=95=E8=8E=B7=E7=BC=BA=E5=B0=91?= =?UTF-8?q?=E4=BE=9D=E8=B5=96=E6=97=B6=E7=9A=84=E5=BC=82=E5=B8=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llm/bridge_stackclaude.py | 118 +++++++++--------- ...laude.txt => requirements_slackclaude.txt} | 0 2 files changed, 60 insertions(+), 58 deletions(-) rename request_llm/{requirements_claude.txt => requirements_slackclaude.txt} (100%) diff --git a/request_llm/bridge_stackclaude.py b/request_llm/bridge_stackclaude.py index 65ea881..cb836de 100644 --- a/request_llm/bridge_stackclaude.py +++ b/request_llm/bridge_stackclaude.py @@ -6,70 +6,71 @@ import importlib import logging import time from toolbox import get_conf -from slack_sdk.errors import SlackApiError -from slack_sdk.web.async_client import AsyncWebClient import asyncio - - -""" -======================================================================== -第一部分:Slack API Client -https://github.com/yokonsan/claude-in-slack-api -======================================================================== -""" load_message = "正在加载Claude组件,请稍候..." - -class SlackClient(AsyncWebClient): - """SlackClient类用于与Slack API进行交互,实现消息发送、接收等功能。 - - 属性: - - CHANNEL_ID:str类型,表示频道ID。 - - 方法: - - open_channel():异步方法。通过调用conversations_open方法打开一个频道,并将返回的频道ID保存在属性CHANNEL_ID中。 - - chat(text: str):异步方法。向已打开的频道发送一条文本消息。 - - get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。 - - get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。 - +try: + """ + ======================================================================== + 第一部分:Slack API Client + https://github.com/yokonsan/claude-in-slack-api + ======================================================================== """ - CHANNEL_ID = None - async def open_channel(self): - response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')[0]) - self.CHANNEL_ID = response["channel"]["id"] + from slack_sdk.errors import SlackApiError + from slack_sdk.web.async_client import AsyncWebClient - async def chat(self, text): - if not self.CHANNEL_ID: - raise Exception("Channel not found.") + class SlackClient(AsyncWebClient): + """SlackClient类用于与Slack API进行交互,实现消息发送、接收等功能。 - resp = await self.chat_postMessage(channel=self.CHANNEL_ID, text=text) - self.LAST_TS = resp["ts"] + 属性: + - CHANNEL_ID:str类型,表示频道ID。 - async def get_slack_messages(self): - try: - # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题 - resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1) - msg = [msg for msg in resp["messages"] - if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')[0]] - return msg - except (SlackApiError, KeyError) as e: - raise RuntimeError(f"获取Slack消息失败。") - - async def get_reply(self): - while True: - slack_msgs = await self.get_slack_messages() - if len(slack_msgs) == 0: - await asyncio.sleep(0.5) - continue - - msg = slack_msgs[-1] - if msg["text"].endswith("Typing…_"): - yield False, msg["text"] - else: - yield True, msg["text"] - break + 方法: + - open_channel():异步方法。通过调用conversations_open方法打开一个频道,并将返回的频道ID保存在属性CHANNEL_ID中。 + - chat(text: str):异步方法。向已打开的频道发送一条文本消息。 + - get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。 + - get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。 + """ + CHANNEL_ID = None + + async def open_channel(self): + response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')[0]) + self.CHANNEL_ID = response["channel"]["id"] + + async def chat(self, text): + if not self.CHANNEL_ID: + raise Exception("Channel not found.") + + resp = await self.chat_postMessage(channel=self.CHANNEL_ID, text=text) + self.LAST_TS = resp["ts"] + + async def get_slack_messages(self): + try: + # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题 + resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1) + msg = [msg for msg in resp["messages"] + if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')[0]] + return msg + except (SlackApiError, KeyError) as e: + raise RuntimeError(f"获取Slack消息失败。") + + async def get_reply(self): + while True: + slack_msgs = await self.get_slack_messages() + if len(slack_msgs) == 0: + await asyncio.sleep(0.5) + continue + + msg = slack_msgs[-1] + if msg["text"].endswith("Typing…_"): + yield False, msg["text"] + else: + yield True, msg["text"] + break +except: + pass """ ======================================================================== @@ -87,8 +88,9 @@ class ClaudeHandle(Process): self.success = True self.local_history = [] self.check_dependency() - self.start() - self.threadLock = threading.Lock() + if self.success: + self.start() + self.threadLock = threading.Lock() def check_dependency(self): try: @@ -97,7 +99,7 @@ class ClaudeHandle(Process): self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。" self.success = True except: - self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_claude.txt`安装Claude的依赖。" + self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_slackclaude.txt`安装Claude的依赖。" self.success = False def ready(self): diff --git a/request_llm/requirements_claude.txt b/request_llm/requirements_slackclaude.txt similarity index 100% rename from request_llm/requirements_claude.txt rename to request_llm/requirements_slackclaude.txt From b0c2e2d92b4dec32c8fd2f24671b8fb73aa5f7e4 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 19 May 2023 10:58:22 +0800 Subject: [PATCH 10/19] =?UTF-8?q?=E4=BF=AE=E8=AE=A2=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llm/bridge_stackclaude.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/request_llm/bridge_stackclaude.py b/request_llm/bridge_stackclaude.py index cb836de..f9f3e84 100644 --- a/request_llm/bridge_stackclaude.py +++ b/request_llm/bridge_stackclaude.py @@ -99,7 +99,7 @@ class ClaudeHandle(Process): self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。" self.success = True except: - self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_slackclaude.txt`安装Claude的依赖。" + self.info = "缺少的依赖,如果要使用Claude,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_slackclaude.txt`安装Claude的依赖,然后重启程序。" self.success = False def ready(self): From e2d75f1b62f7279d849596afaaa6a1f25cf2af4b Mon Sep 17 00:00:00 2001 From: binary-husky <505030475@qq.com> Date: Fri, 19 May 2023 11:09:30 +0800 Subject: [PATCH 11/19] remove yml --- .github/workflows/master_gptacademic.yml | 63 ------------------------ 1 file changed, 63 deletions(-) delete mode 100644 .github/workflows/master_gptacademic.yml diff --git a/.github/workflows/master_gptacademic.yml b/.github/workflows/master_gptacademic.yml deleted file mode 100644 index e4189c8..0000000 --- a/.github/workflows/master_gptacademic.yml +++ /dev/null @@ -1,63 +0,0 @@ -# Docs for the Azure Web Apps Deploy action: https://github.com/Azure/webapps-deploy -# More GitHub Actions for Azure: https://github.com/Azure/actions -# More info on Python, GitHub Actions, and Azure App Service: https://aka.ms/python-webapps-actions - -name: Build and deploy Python app to Azure Web App - GPTacademic - -on: - push: - branches: - - master - workflow_dispatch: - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Set up Python version - uses: actions/setup-python@v1 - with: - python-version: '3.9' - - - name: Create and start virtual environment - run: | - python -m venv venv - source venv/bin/activate - - - name: Install dependencies - run: pip install -r requirements.txt - - # Optional: Add step to run tests here (PyTest, Django test suites, etc.) - - - name: Upload artifact for deployment jobs - uses: actions/upload-artifact@v2 - with: - name: python-app - path: | - . - !venv/ - - deploy: - runs-on: ubuntu-latest - needs: build - environment: - name: 'Production' - url: ${{ steps.deploy-to-webapp.outputs.webapp-url }} - - steps: - - name: Download artifact from build job - uses: actions/download-artifact@v2 - with: - name: python-app - path: . - - - name: 'Deploy to Azure Web App' - uses: azure/webapps-deploy@v2 - id: deploy-to-webapp - with: - app-name: 'GPTacademic' - slot-name: 'Production' - publish-profile: ${{ secrets.AZUREAPPSERVICE_PUBLISHPROFILE_8917F3C29B9D4A63975B1945E8C5833E }} From 5159a1e7a1f5a54afa344ce17027a6457379bbf4 Mon Sep 17 00:00:00 2001 From: binary-husky <505030475@qq.com> Date: Fri, 19 May 2023 11:14:44 +0800 Subject: [PATCH 12/19] =?UTF-8?q?core=20function=20=E9=9A=90=E8=97=8F?= =?UTF-8?q?=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- core_functional.py | 1 + main.py | 2 ++ request_llm/moss | 1 + 3 files changed, 4 insertions(+) create mode 160000 request_llm/moss diff --git a/core_functional.py b/core_functional.py index a71140f..e126b57 100644 --- a/core_functional.py +++ b/core_functional.py @@ -73,5 +73,6 @@ def get_core_functions(): r"Note that, reference styles maybe more than one kind, you should transform each item correctly." + r"Items need to be transformed:", "Suffix": r"", + "Visible": False, } } diff --git a/main.py b/main.py index 4de8015..d9888f8 100644 --- a/main.py +++ b/main.py @@ -74,6 +74,7 @@ def main(): with gr.Accordion("基础功能区", open=True) as area_basic_fn: with gr.Row(): for k in functional: + if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue variant = functional[k]["Color"] if "Color" in functional[k] else "secondary" functional[k]["Button"] = gr.Button(k, variant=variant) with gr.Accordion("函数插件区", open=True) as area_crazy_fn: @@ -144,6 +145,7 @@ def main(): clearBtn2.click(lambda: ("",""), None, [txt, txt2]) # 基础功能区的回调函数注册 for k in functional: + if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo) cancel_handles.append(click_handle) # 文件上传区,接收文件后与chatbot的互动 diff --git a/request_llm/moss b/request_llm/moss new file mode 160000 index 0000000..4d905bc --- /dev/null +++ b/request_llm/moss @@ -0,0 +1 @@ +Subproject commit 4d905bcead53739d4395b145cae2be308b1df795 From 254fac0045d44a820daa565873f42cedf40b5326 Mon Sep 17 00:00:00 2001 From: binary-husky <505030475@qq.com> Date: Fri, 19 May 2023 11:16:53 +0800 Subject: [PATCH 13/19] move moss folder to gitignore --- .gitignore | 3 ++- request_llm/moss | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) delete mode 160000 request_llm/moss diff --git a/.gitignore b/.gitignore index 0dd68f8..06ed13d 100644 --- a/.gitignore +++ b/.gitignore @@ -146,4 +146,5 @@ debug* private* crazy_functions/test_project/pdf_and_word crazy_functions/test_samples -request_llm/jittorllms \ No newline at end of file +request_llm/jittorllms +request_llm/moss \ No newline at end of file diff --git a/request_llm/moss b/request_llm/moss deleted file mode 160000 index 4d905bc..0000000 --- a/request_llm/moss +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 4d905bcead53739d4395b145cae2be308b1df795 From d8540d42a6b1e1d63ac284a0c181505a207a7c4f Mon Sep 17 00:00:00 2001 From: binary-husky <505030475@qq.com> Date: Fri, 19 May 2023 11:22:25 +0800 Subject: [PATCH 14/19] move dep --- crazy_functions/crazy_utils.py | 37 ---------------------------------- crazy_functions/总结音视频.py | 37 +++++++++++++++++++++++++++++++++- 2 files changed, 36 insertions(+), 38 deletions(-) diff --git a/crazy_functions/crazy_utils.py b/crazy_functions/crazy_utils.py index 3570ca9..e54136c 100644 --- a/crazy_functions/crazy_utils.py +++ b/crazy_functions/crazy_utils.py @@ -606,40 +606,3 @@ def get_files_from_everything(txt, type): # type='.md' success = False return success, file_manifest, project_folder - - -def split_audio_file(filename, split_duration=1000): - """ - 根据给定的切割时长将音频文件切割成多个片段。 - - Args: - filename (str): 需要被切割的音频文件名。 - split_duration (int, optional): 每个切割音频片段的时长(以秒为单位)。默认值为1000。 - - Returns: - filelist (list): 一个包含所有切割音频片段文件路径的列表。 - - """ - from moviepy.editor import AudioFileClip - import os - os.makedirs('gpt_log/mp3/cut/', exist_ok=True) # 创建存储切割音频的文件夹 - - # 读取音频文件 - audio = AudioFileClip(filename) - - # 计算文件总时长和切割点 - total_duration = audio.duration - split_points = list(range(0, int(total_duration), split_duration)) - split_points.append(int(total_duration)) - filelist = [] - - # 切割音频文件 - for i in range(len(split_points) - 1): - start_time = split_points[i] - end_time = split_points[i + 1] - split_audio = audio.subclip(start_time, end_time) - split_audio.write_audiofile(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3") - filelist.append(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3") - - audio.close() - return filelist \ No newline at end of file diff --git a/crazy_functions/总结音视频.py b/crazy_functions/总结音视频.py index e391061..5e4f884 100644 --- a/crazy_functions/总结音视频.py +++ b/crazy_functions/总结音视频.py @@ -1,6 +1,41 @@ from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, split_audio_file +from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive +def split_audio_file(filename, split_duration=1000): + """ + 根据给定的切割时长将音频文件切割成多个片段。 + + Args: + filename (str): 需要被切割的音频文件名。 + split_duration (int, optional): 每个切割音频片段的时长(以秒为单位)。默认值为1000。 + + Returns: + filelist (list): 一个包含所有切割音频片段文件路径的列表。 + + """ + from moviepy.editor import AudioFileClip + import os + os.makedirs('gpt_log/mp3/cut/', exist_ok=True) # 创建存储切割音频的文件夹 + + # 读取音频文件 + audio = AudioFileClip(filename) + + # 计算文件总时长和切割点 + total_duration = audio.duration + split_points = list(range(0, int(total_duration), split_duration)) + split_points.append(int(total_duration)) + filelist = [] + + # 切割音频文件 + for i in range(len(split_points) - 1): + start_time = split_points[i] + end_time = split_points[i + 1] + split_audio = audio.subclip(start_time, end_time) + split_audio.write_audiofile(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3") + filelist.append(f"gpt_log/mp3/cut/{filename[0]}_{i}.mp3") + + audio.close() + return filelist def AnalyAudio(file_manifest, llm_kwargs, chatbot, history): import os, requests From c46a8d27e698d95e741e29abee3f9b03c498c68a Mon Sep 17 00:00:00 2001 From: binary-husky <505030475@qq.com> Date: Fri, 19 May 2023 12:23:01 +0800 Subject: [PATCH 15/19] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E5=8F=82=E6=95=B0?= =?UTF-8?q?=E9=BB=98=E8=AE=A4=E5=80=BCbug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functions/图片生成.py | 1 + crazy_functions/解析JupyterNotebook.py | 1 + crazy_functions/询问多个大语言模型.py | 1 + 3 files changed, 3 insertions(+) diff --git a/crazy_functions/图片生成.py b/crazy_functions/图片生成.py index ecb75cd..5bf8bc4 100644 --- a/crazy_functions/图片生成.py +++ b/crazy_functions/图片生成.py @@ -55,6 +55,7 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro history = [] # 清空历史,以免输入溢出 chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-xxxx或者api2d-xxxx。如果中文效果不理想, 尝试Prompt。正在处理中 .....")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") resolution = plugin_kwargs.get("advanced_arg", '256x256') image_url, image_path = gen_image(llm_kwargs, prompt, resolution) chatbot.append([prompt, diff --git a/crazy_functions/解析JupyterNotebook.py b/crazy_functions/解析JupyterNotebook.py index 95a3d69..b4bcd56 100644 --- a/crazy_functions/解析JupyterNotebook.py +++ b/crazy_functions/解析JupyterNotebook.py @@ -67,6 +67,7 @@ def parseNotebook(filename, enable_markdown=1): def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") enable_markdown = plugin_kwargs.get("advanced_arg", "1") try: enable_markdown = int(enable_markdown) diff --git a/crazy_functions/询问多个大语言模型.py b/crazy_functions/询问多个大语言模型.py index 2939d04..ec9fd4a 100644 --- a/crazy_functions/询问多个大语言模型.py +++ b/crazy_functions/询问多个大语言模型.py @@ -45,6 +45,7 @@ def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, chatbot.append((txt, "正在同时咨询ChatGPT和ChatGLM……")) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 llm_kwargs['llm_model'] = plugin_kwargs.get("advanced_arg", 'chatglm&gpt-3.5-turbo') # 'chatglm&gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔 gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( From 7d8338ce70388fcbe5677b7ea6ba20cbc2421f82 Mon Sep 17 00:00:00 2001 From: binary-husky <505030475@qq.com> Date: Fri, 19 May 2023 12:24:04 +0800 Subject: [PATCH 16/19] =?UTF-8?q?=E5=85=81=E8=AE=B8=E9=9F=B3=E9=A2=91?= =?UTF-8?q?=E8=BD=AC=E6=96=87=E5=AD=97=E6=97=B6=E7=9A=84=E9=AB=98=E7=BA=A7?= =?UTF-8?q?=E5=8F=82=E6=95=B0=E6=8C=87=E4=BB=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crazy_functional.py | 2 +- crazy_functions/总结音视频.py | 29 ++++++++++++++++++++--------- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/crazy_functional.py b/crazy_functional.py index f6b7253..462000e 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -252,7 +252,7 @@ def get_crazy_functions(): "Color": "stop", "AsButton": False, "AdvancedArgs": True, - "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3, 此处无需输入参数", + "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。", "Function": HotReload(总结音视频) } }) diff --git a/crazy_functions/总结音视频.py b/crazy_functions/总结音视频.py index 5e4f884..62f05d3 100644 --- a/crazy_functions/总结音视频.py +++ b/crazy_functions/总结音视频.py @@ -1,4 +1,4 @@ -from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file +from toolbox import CatchException, report_execption, select_api_key, update_ui, write_results_to_file, get_conf from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive def split_audio_file(filename, split_duration=1000): @@ -37,7 +37,7 @@ def split_audio_file(filename, split_duration=1000): audio.close() return filelist -def AnalyAudio(file_manifest, llm_kwargs, chatbot, history): +def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history): import os, requests from moviepy.editor import AudioFileClip from request_llm.bridge_all import model_info @@ -72,11 +72,20 @@ def AnalyAudio(file_manifest, llm_kwargs, chatbot, history): } data = { "model": "whisper-1", + "prompt": parse_prompt, 'response_format': "text" } - response = requests.post(url, headers=headers, files=files, data=data).text - i_say = f'请对下面的文章片段做概述,文章内容是 ```{response}```' + chatbot.append([f"将 {i} 发送到openai音频解析终端 (whisper),当前参数:{parse_prompt}", "正在处理 ..."]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + proxies, = get_conf('proxies') + response = requests.post(url, headers=headers, files=files, data=data, proxies=proxies).text + + chatbot.append(["音频解析结果", response]) + history.extend(["音频解析结果", response]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + i_say = f'请对下面的音频片段做概述,音频内容是 ```{response}```' i_say_show_user = f'第{index + 1}段音频的第{j + 1} / {len(voice)}片段。' gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, @@ -84,17 +93,17 @@ def AnalyAudio(file_manifest, llm_kwargs, chatbot, history): llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="总结文章。" + sys_prompt=f"总结音频。音频文件名{fp}" ) chatbot[-1] = (i_say_show_user, gpt_say) history.extend([i_say_show_user, gpt_say]) audio_history.extend([i_say_show_user, gpt_say]) - # 已经对该文章的所有片段总结完毕,如果文章被切分了, + # 已经对该文章的所有片段总结完毕,如果文章被切分了 result = "".join(audio_history) if len(audio_history) > 1: - i_say = f"根据以上的对话,使用中文总结文章{result}的主要内容。" + i_say = f"根据以上的对话,使用中文总结音频“{result}”的主要内容。" i_say_show_user = f'第{index + 1}段音频的主要内容:' gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( inputs=i_say, @@ -127,7 +136,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro # 基本信息:功能、贡献者 chatbot.append([ "函数插件功能?", - "总结音视频内容,函数插件贡献者: dalvqw"]) + "总结音视频内容,函数插件贡献者: dalvqw & BinaryHusky"]) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 try: @@ -168,6 +177,8 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro return # 开始正式执行任务 - yield from AnalyAudio(file_manifest, llm_kwargs, chatbot, history) + if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") + parse_prompt = plugin_kwargs.get("advanced_arg", '将音频解析为简体中文') + yield from AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history) yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 From c32c585384a156e90991ec5cf9dd441516ab9d23 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Fri, 19 May 2023 12:25:58 +0800 Subject: [PATCH 17/19] =?UTF-8?q?=E9=9F=B3=E9=A2=91=E8=BD=AC=E6=96=87?= =?UTF-8?q?=E5=AD=97+=E6=80=BB=E7=BB=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index b2cddba..3e16f0b 100644 --- a/README.md +++ b/README.md @@ -267,6 +267,12 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h +9. OpenAI音频解析与总结 +
+ +
+ + ## 版本: - version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级) From 212ca0c0b9448831ec08d9a6c41e33568fcdc3ea Mon Sep 17 00:00:00 2001 From: binary-husky <505030475@qq.com> Date: Fri, 19 May 2023 12:51:43 +0800 Subject: [PATCH 18/19] 3.35 --- version | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/version b/version index e833fda..81729fe 100644 --- a/version +++ b/version @@ -1,5 +1,5 @@ { - "version": 3.34, + "version": 3.35, "show_feature": true, - "new_feature": "修复新版gradio(3.28.3)的暗色主题适配 <-> 提供复旦MOSS模型适配(启用需额外依赖) <-> 提供docker-compose方案兼容LLAMA盘古RWKV等模型的后端 <-> 新增Live2D WAIFU装饰 <-> 完善对话历史的保存/载入/删除 <-> ChatGLM加线程锁提高并发稳定性 <-> 支持NewBing <-> Markdown翻译功能支持直接输入Readme文件网址 <-> 保存对话功能 <-> 解读任意语言代码+同时询问任意的LLM组合 <-> 添加联网(Google)回答问题插件" + "new_feature": "添加了OpenAI图片生成插件 <-> 添加了OpenAI音频转文本总结插件 <-> 通过Slack添加对Claude的支持 <-> 提供复旦MOSS模型适配(启用需额外依赖) <-> 提供docker-compose方案兼容LLAMA盘古RWKV等模型的后端 <-> 新增Live2D装饰 <-> 完善对话历史的保存/载入/删除 <-> 保存对话功能" } From d2fa4c80eb1ba3cd088a0249f2b04197a591bbf7 Mon Sep 17 00:00:00 2001 From: binary-husky <96192199+binary-husky@users.noreply.github.com> Date: Fri, 19 May 2023 13:00:38 +0800 Subject: [PATCH 19/19] Update config.py --- config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.py b/config.py index baaa410..6e26539 100644 --- a/config.py +++ b/config.py @@ -77,6 +77,6 @@ NEWBING_COOKIES = """ your bing cookies here """ -# Slack Claude bot, 使用教程详情见 request_llm/README.md +# 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md SLACK_CLAUDE_BOT_ID = '' SLACK_CLAUDE_USER_TOKEN = ''