diff --git a/.github/workflows/build-with-chatglm.yml b/.github/workflows/build-with-chatglm.yml index f968bb9..f729abb 100644 --- a/.github/workflows/build-with-chatglm.yml +++ b/.github/workflows/build-with-chatglm.yml @@ -1,5 +1,5 @@ # https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages -name: Create and publish a Docker image for ChatGLM support +name: build-with-chatglm on: push: diff --git a/.github/workflows/build-with-latex.yml b/.github/workflows/build-with-latex.yml index fb16d2c..173d482 100644 --- a/.github/workflows/build-with-latex.yml +++ b/.github/workflows/build-with-latex.yml @@ -1,5 +1,5 @@ # https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages -name: Create and publish a Docker image for Latex support +name: build-with-latex on: push: diff --git a/.github/workflows/build-without-local-llms.yml b/.github/workflows/build-without-local-llms.yml index b0aed7f..7bebd06 100644 --- a/.github/workflows/build-without-local-llms.yml +++ b/.github/workflows/build-without-local-llms.yml @@ -1,5 +1,5 @@ # https://docs.github.com/en/actions/publishing-packages/publishing-docker-images#publishing-images-to-github-packages -name: Create and publish a Docker image +name: build-without-local-llms on: push: diff --git a/.gitignore b/.gitignore index 18d3fb8..55c4db1 100644 --- a/.gitignore +++ b/.gitignore @@ -150,3 +150,4 @@ request_llm/jittorllms multi-language request_llm/moss media +flagged diff --git a/README.md b/README.md index fd6f4bf..34160de 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ To translate this project to arbitary language with GPT, read and run [`multi_la
-【可选步骤】如果需要支持清华ChatGLM/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): +【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强): ```sh -# 【可选步骤I】支持清华ChatGLM。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) +# 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) python -m pip install -r request_llm/requirements_chatglm.txt # 【可选步骤II】支持复旦MOSS @@ -144,6 +145,8 @@ python main.py ### 安装方法II:使用Docker 1. 仅ChatGPT(推荐大多数人选择,等价于docker-compose方案1) +[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml) +[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml) ``` sh git clone https://github.com/binary-husky/gpt_academic.git # 下载项目 @@ -158,7 +161,8 @@ docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic ``` P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用docker-compose获取Latex功能(修改docker-compose.yml,保留方案4并删除其他方案)。 -2. ChatGPT + ChatGLM + MOSS(需要熟悉Docker) +2. ChatGPT + ChatGLM2 + MOSS(需要熟悉Docker) +[](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml) ``` sh # 修改docker-compose.yml,保留方案2并删除其他方案。修改docker-compose.yml中方案2的配置,参考其中注释即可 @@ -284,6 +288,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h ### II:版本: - version 3.5(Todo): 使用自然语言调用本项目的所有函数插件(高优先级) +- version 3.46: 支持完全脱手操作的实时语音对话 - version 3.45: 支持自定义ChatGLM2微调模型 - version 3.44: 正式支持Azure,优化界面易用性 - version 3.4: +arxiv论文翻译、latex论文批改功能 @@ -306,13 +311,18 @@ gpt_academic开发者QQ群-2:610599535 - 某些浏览器翻译插件干扰此软件前端的运行 - 官方Gradio目前有很多兼容性Bug,请务必使用`requirement.txt`安装Gradio -### III:参考与学习 +### III:主题 +可以通过修改`THEME`选项(config.py)变更主题 +1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/) + + +### IV:参考与学习 ``` 代码中参考了很多其他优秀项目中的设计,顺序不分先后: -# 清华ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B +# 清华ChatGLM2-6B: +https://github.com/THUDM/ChatGLM2-6B # 清华JittorLLMs: https://github.com/Jittor/JittorLLMs diff --git a/config.py b/config.py index 8e624e6..9c1a000 100644 --- a/config.py +++ b/config.py @@ -89,6 +89,8 @@ CONCURRENT_COUNT = 100 # 是否在提交时自动清空输入框 AUTO_CLEAR_TXT = False +# 色彩主体,可选 ["Default", "Chuanhu-Small-and-Beautiful"] +THEME = "Default" # 加一个live2d装饰 ADD_WAIFU = False @@ -123,3 +125,9 @@ NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"] NEWBING_COOKIES = """ put your new bing cookies here """ + + +# 阿里云实时语音识别 配置难度较高 仅建议高手用户使用 参考 https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md +ENABLE_AUDIO = False +ALIYUN_TOKEN="" # 例如 f37f30e0f9934c34a992f6f64f7eba4f +ALIYUN_APPKEY="" # 例如 RoPlZrM88DnAFkZK diff --git a/crazy_functional.py b/crazy_functional.py index ff4d81f..d3b2953 100644 --- a/crazy_functional.py +++ b/crazy_functional.py @@ -392,7 +392,7 @@ def get_crazy_functions(): }) from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF function_plugins.update({ - "Arixv翻译(输入arxivID)[需Latex]": { + "Arixv论文精细翻译(输入arxivID)[需Latex]": { "Color": "stop", "AsButton": False, "AdvancedArgs": True, @@ -403,7 +403,7 @@ def get_crazy_functions(): } }) function_plugins.update({ - "本地论文翻译(上传Latex压缩包)[需Latex]": { + "本地Latex论文精细翻译(上传Latex项目)[需Latex]": { "Color": "stop", "AsButton": False, "AdvancedArgs": True, @@ -416,6 +416,22 @@ def get_crazy_functions(): except: print('Load function plugin failed') + + try: + from toolbox import get_conf + ENABLE_AUDIO, = get_conf('ENABLE_AUDIO') + if ENABLE_AUDIO: + from crazy_functions.语音助手 import 语音助手 + function_plugins.update({ + "实时音频采集": { + "Color": "stop", + "AsButton": True, + "Function": HotReload(语音助手) + } + }) + except: + print('Load function plugin failed') + # try: # from crazy_functions.虚空终端 import 终端 # function_plugins.update({ diff --git a/crazy_functions/live_audio/aliyunASR.py b/crazy_functions/live_audio/aliyunASR.py new file mode 100644 index 0000000..5cb1176 --- /dev/null +++ b/crazy_functions/live_audio/aliyunASR.py @@ -0,0 +1,93 @@ +import time, threading, json + + +class AliyunASR(): + + def test_on_sentence_begin(self, message, *args): + # print("test_on_sentence_begin:{}".format(message)) + pass + + def test_on_sentence_end(self, message, *args): + # print("test_on_sentence_end:{}".format(message)) + message = json.loads(message) + self.parsed_sentence = message['payload']['result'] + self.event_on_entence_end.set() + print(self.parsed_sentence) + + def test_on_start(self, message, *args): + # print("test_on_start:{}".format(message)) + pass + + def test_on_error(self, message, *args): + # print("on_error args=>{}".format(args)) + pass + + def test_on_close(self, *args): + self.aliyun_service_ok = False + pass + + def test_on_result_chg(self, message, *args): + # print("test_on_chg:{}".format(message)) + message = json.loads(message) + self.parsed_text = message['payload']['result'] + self.event_on_result_chg.set() + + def test_on_completed(self, message, *args): + # print("on_completed:args=>{} message=>{}".format(args, message)) + pass + + + def audio_convertion_thread(self, uuid): + # 在一个异步线程中采集音频 + import nls # pip install git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git + import tempfile + from scipy import io + from toolbox import get_conf + from .audio_io import change_sample_rate + from .audio_io import RealtimeAudioDistribution + NEW_SAMPLERATE = 16000 + rad = RealtimeAudioDistribution() + rad.clean_up() + temp_folder = tempfile.gettempdir() + TOKEN, APPKEY = get_conf('ALIYUN_TOKEN', 'ALIYUN_APPKEY') + self.aliyun_service_ok = True + URL="wss://nls-gateway.aliyuncs.com/ws/v1" + sr = nls.NlsSpeechTranscriber( + url=URL, + token=TOKEN, + appkey=APPKEY, + on_sentence_begin=self.test_on_sentence_begin, + on_sentence_end=self.test_on_sentence_end, + on_start=self.test_on_start, + on_result_changed=self.test_on_result_chg, + on_completed=self.test_on_completed, + on_error=self.test_on_error, + on_close=self.test_on_close, + callback_args=[uuid.hex] + ) + + r = sr.start(aformat="pcm", + enable_intermediate_result=True, + enable_punctuation_prediction=True, + enable_inverse_text_normalization=True) + + while not self.stop: + # time.sleep(self.capture_interval) + audio = rad.read(uuid.hex) + if audio is not None: + # convert to pcm file + temp_file = f'{temp_folder}/{uuid.hex}.pcm' # + dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000 + io.wavfile.write(temp_file, NEW_SAMPLERATE, dsdata) + # read pcm binary + with open(temp_file, "rb") as f: data = f.read() + # print('audio len:', len(audio), '\t ds len:', len(dsdata), '\t need n send:', len(data)//640) + slices = zip(*(iter(data),) * 640) # 640个字节为一组 + for i in slices: sr.send_audio(bytes(i)) + else: + time.sleep(0.1) + + if not self.aliyun_service_ok: + self.stop = True + self.stop_msg = 'Aliyun音频服务异常,请检查ALIYUN_TOKEN和ALIYUN_APPKEY是否过期。' + r = sr.stop() diff --git a/crazy_functions/live_audio/audio_io.py b/crazy_functions/live_audio/audio_io.py new file mode 100644 index 0000000..3ff83a6 --- /dev/null +++ b/crazy_functions/live_audio/audio_io.py @@ -0,0 +1,51 @@ +import numpy as np +from scipy import interpolate + +def Singleton(cls): + _instance = {} + + def _singleton(*args, **kargs): + if cls not in _instance: + _instance[cls] = cls(*args, **kargs) + return _instance[cls] + + return _singleton + + +@Singleton +class RealtimeAudioDistribution(): + def __init__(self) -> None: + self.data = {} + self.max_len = 1024*1024 + self.rate = 48000 # 只读,每秒采样数量 + + def clean_up(self): + self.data = {} + + def feed(self, uuid, audio): + self.rate, audio_ = audio + # print('feed', len(audio_), audio_[-25:]) + if uuid not in self.data: + self.data[uuid] = audio_ + else: + new_arr = np.concatenate((self.data[uuid], audio_)) + if len(new_arr) > self.max_len: new_arr = new_arr[-self.max_len:] + self.data[uuid] = new_arr + + def read(self, uuid): + if uuid in self.data: + res = self.data.pop(uuid) + print('\r read-', len(res), '-', max(res), end='', flush=True) + else: + res = None + return res + +def change_sample_rate(audio, old_sr, new_sr): + duration = audio.shape[0] / old_sr + + time_old = np.linspace(0, duration, audio.shape[0]) + time_new = np.linspace(0, duration, int(audio.shape[0] * new_sr / old_sr)) + + interpolator = interpolate.interp1d(time_old, audio.T) + new_audio = interpolator(time_new).T + return new_audio.astype(np.int16) \ No newline at end of file diff --git a/crazy_functions/对话历史存档.py b/crazy_functions/对话历史存档.py index fed0f8f..003cbf5 100644 --- a/crazy_functions/对话历史存档.py +++ b/crazy_functions/对话历史存档.py @@ -12,7 +12,7 @@ def write_chat_to_file(chatbot, history=None, file_name=None): file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html' os.makedirs('./gpt_log/', exist_ok=True) with open(f'./gpt_log/{file_name}', 'w', encoding='utf8') as f: - from theme import advanced_css + from theme.theme import advanced_css f.write(f'
').strip('
')) + return history + +class AsyncGptTask(): + def __init__(self) -> None: + self.observe_future = [] + self.observe_future_chatbot_index = [] + + def gpt_thread_worker(self, i_say, llm_kwargs, history, sys_prompt, observe_window, index): + try: + MAX_TOKEN_ALLO = 2560 + i_say, history = input_clipping(i_say, history, max_token_limit=MAX_TOKEN_ALLO) + gpt_say_partial = predict_no_ui_long_connection(inputs=i_say, llm_kwargs=llm_kwargs, history=history, sys_prompt=sys_prompt, + observe_window=observe_window[index], console_slience=True) + except ConnectionAbortedError as token_exceed_err: + print('至少一个线程任务Token溢出而失败', e) + except Exception as e: + print('至少一个线程任务意外失败', e) + + def add_async_gpt_task(self, i_say, chatbot_index, llm_kwargs, history, system_prompt): + self.observe_future.append([""]) + self.observe_future_chatbot_index.append(chatbot_index) + cur_index = len(self.observe_future)-1 + th_new = threading.Thread(target=self.gpt_thread_worker, args=(i_say, llm_kwargs, history, system_prompt, self.observe_future, cur_index)) + th_new.daemon = True + th_new.start() + + def update_chatbot(self, chatbot): + for of, ofci in zip(self.observe_future, self.observe_future_chatbot_index): + try: + chatbot[ofci] = list(chatbot[ofci]) + chatbot[ofci][1] = markdown_convertion(of[0]) + except: + self.observe_future = [] + self.observe_future_chatbot_index = [] + return chatbot + +class InterviewAssistant(AliyunASR): + def __init__(self): + self.capture_interval = 0.5 # second + self.stop = False + self.parsed_text = "" + self.parsed_sentence = "" + self.buffered_sentence = "" + self.event_on_result_chg = threading.Event() + self.event_on_entence_end = threading.Event() + self.event_on_commit_question = threading.Event() + + def __del__(self): + self.stop = True + self.stop_msg = "" + self.commit_wd.kill_dog = True + self.plugin_wd.kill_dog = True + + def init(self, chatbot): + # 初始化音频采集线程 + self.captured_audio = np.array([]) + self.keep_latest_n_second = 10 + self.commit_after_pause_n_second = 1.5 + self.ready_audio_flagment = None + self.stop = False + self.plugin_wd = WatchDog(timeout=5, bark_fn=self.__del__, msg="程序终止") + self.aut = threading.Thread(target=self.audio_convertion_thread, args=(chatbot._cookies['uuid'],)) + self.aut.daemon = True + self.aut.start() + # th2 = threading.Thread(target=self.audio2txt_thread, args=(chatbot._cookies['uuid'],)) + # th2.daemon = True + # th2.start() + + def no_audio_for_a_while(self): + if len(self.buffered_sentence) < 7: # 如果一句话小于7个字,暂不提交 + self.commit_wd.begin_watch() + else: + self.event_on_commit_question.set() + + def begin(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): + # main plugin function + self.init(chatbot) + chatbot.append(["[请讲话]", "[正在等您说完问题]"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + self.plugin_wd.begin_watch() + self.agt = AsyncGptTask() + self.commit_wd = WatchDog(timeout=self.commit_after_pause_n_second, bark_fn=self.no_audio_for_a_while, interval=0.2) + self.commit_wd.begin_watch() + + while not self.stop: + self.event_on_result_chg.wait(timeout=0.25) # run once every 0.25 second + chatbot = self.agt.update_chatbot(chatbot) # 将子线程的gpt结果写入chatbot + history = chatbot2history(chatbot) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + self.plugin_wd.feed() + + if self.event_on_result_chg.is_set(): + # update audio decode result + self.event_on_result_chg.clear() + chatbot[-1] = list(chatbot[-1]) + chatbot[-1][0] = self.buffered_sentence + self.parsed_text + history = chatbot2history(chatbot) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + self.commit_wd.feed() + + if self.event_on_entence_end.is_set(): + # called when a sentence has ended + self.event_on_entence_end.clear() + self.parsed_text = self.parsed_sentence + self.buffered_sentence += self.parsed_sentence + + if self.event_on_commit_question.is_set(): + # called when a question should be commited + self.event_on_commit_question.clear() + if len(self.buffered_sentence) == 0: raise RuntimeError + + self.commit_wd.begin_watch() + chatbot[-1] = list(chatbot[-1]) + chatbot[-1] = [self.buffered_sentence, "[等待GPT响应]"] + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + # add gpt task 创建子线程请求gpt,避免线程阻塞 + history = chatbot2history(chatbot) + self.agt.add_async_gpt_task(self.buffered_sentence, len(chatbot)-1, llm_kwargs, history, system_prompt) + + self.buffered_sentence = "" + chatbot.append(["[请讲话]", "[正在等您说完问题]"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + if len(self.stop_msg) != 0: + raise RuntimeError(self.stop_msg) + + + +@CatchException +def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): + # pip install -U openai-whisper + chatbot.append(["对话助手函数插件:使用时,双手离开鼠标键盘吧", "音频助手, 正在听您讲话(点击“停止”键可终止程序)..."]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + + # 尝试导入依赖,如果缺少依赖,则给出安装建议 + try: + import nls + from scipy import io + except: + chatbot.append(["导入依赖失败", "使用该模块需要额外依赖, 安装方法:```pip install --upgrade pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git```"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + TOKEN, APPKEY = get_conf('ALIYUN_TOKEN', 'ALIYUN_APPKEY') + if TOKEN == "" or APPKEY == "": + chatbot.append(["导入依赖失败", "没有阿里云语音识别APPKEY和TOKEN, 详情见https://help.aliyun.com/document_detail/450255.html"]) + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + return + + yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 + ia = InterviewAssistant() + yield from ia.begin(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) + diff --git a/docs/gradio-3.32.2-py3-none-any.whl b/docs/gradio-3.32.2-py3-none-any.whl index ce87604..796ad70 100644 Binary files a/docs/gradio-3.32.2-py3-none-any.whl and b/docs/gradio-3.32.2-py3-none-any.whl differ diff --git a/docs/use_audio.md b/docs/use_audio.md new file mode 100644 index 0000000..64a507e --- /dev/null +++ b/docs/use_audio.md @@ -0,0 +1,52 @@ +# 使用音频交互功能 + + +## 1. 安装额外依赖 +``` +pip install --upgrade pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git +``` + +如果因为特色网络问题导致上述命令无法执行: +1. git clone alibabacloud-nls-python-sdk这个项目(或者直接前往Github对应网址下载压缩包). +命令行输入: `git clone https://github.com/aliyun/alibabacloud-nls-python-sdk.git` +1. 进入alibabacloud-nls-python-sdk目录命令行输入:`python setup.py install` + + +## 2. 配置音频功能开关 和 阿里云APPKEY(config.py/config_private.py/环境变量) + +- 注册阿里云账号 +- 开通 智能语音交互 (有免费白嫖时长) +- 获取token和appkey +- 未来将逐步用其他更廉价的云服务取代阿里云 + +``` +ENABLE_AUDIO = True +ALIYUN_TOKEN = "554a50fcd0bb476c8d07bb630e94d20c" # 此token已经失效 +ALIYUN_APPKEY = "RoPlZrM88DnAFkZK" # 此appkey已经失效 +``` + +参考 https://help.aliyun.com/document_detail/450255.html +先有阿里云开发者账号,登录之后,需要开通 智能语音交互 的功能,可以免费获得一个token,然后在 全部项目 中,创建一个项目,可以获得一个appkey. + +## 3.启动 + +启动gpt-academic `python main.py` + +## 4.点击record from microphe,授权音频采集 + +I 如果需要监听自己说话(不监听电脑音频),直接在浏览器中选择对应的麦即可 + +II 如果需要监听电脑音频(不监听自己说话),需要安装`VB-Audio VoiceMeeter`,打开声音控制面板(sound control panel) +- 1 `[把电脑的所有外放声音用VoiceMeeter截留]` 在输出区(playback)选项卡,把VoiceMeeter Input虚拟设备set as default设为默认播放设备。 +- 2 `[把截留的声音释放到gpt-academic]` 打开gpt-academic主界面,授权音频采集后,在浏览器地址栏或者类似的地方会出现一个麦克风图标,打开后,按照浏览器的提示,选择VoiceMeeter虚拟麦克风。然后刷新页面,重新授权音频采集。 +- 3 `[把截留的声音同时释放到耳机或音响]` 完成第一步之后,您应处于听不到电脑声音的状态。为了在截获音频的同时,避免影响正常使用,请完成这最后一步配置。在声音控制面板(sound control panel)输入区(recording)选项卡,把VoiceMeeter Output虚拟设备set as default。双击进入VoiceMeeter Output虚拟设备的设置。 + - 3-1 进入VoiceMeeter Output虚拟设备子菜单,打开listen选项卡。 + - 3-2 勾选Listen to this device。 + - 3-3 在playback through this device下拉菜单中选择你的正常耳机或音响。 + +III 两种音频监听模式切换时,需要刷新页面才有效。 + +## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能 + + + diff --git a/main.py b/main.py index f875858..a39736d 100644 --- a/main.py +++ b/main.py @@ -8,18 +8,19 @@ def main(): # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = \ get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT') - + ENABLE_AUDIO, AUTO_CLEAR_TXT = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT') # 如果WEB_PORT是-1, 则随机选取WEB端口 PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT if not AUTHENTICATION: AUTHENTICATION = None from check_proxy import get_current_version + from theme.theme import adjust_theme, advanced_css, theme_declaration initial_prompt = "Serve me as a writing and programming assistant." - title_html = f"