version 3.75 (#1702)
* Update version to 3.74 * Add support for Yi Model API (#1635) * 更新以支持零一万物模型 * 删除newbing * 修改config --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> * Refactor function signatures in bridge files * fix qwen api change * rename and ref functions * rename and move some cookie functions * 增加haiku模型,新增endpoint配置说明 (#1626) * haiku added * 新增haiku,新增endpoint配置说明 * Haiku added * 将说明同步至最新Endpoint --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> * private_upload目录下进行文件鉴权 (#1596) * private_upload目录下进行文件鉴权 * minor fastapi adjustment * Add logging functionality to enable saving conversation records * waiting to fix username retrieve * support 2rd web path * allow accessing default user dir --------- Co-authored-by: binary-husky <qingxu.fu@outlook.com> * remove yaml deps * fix favicon * fix abs path auth problem * forget to write a return * add `dashscope` to deps * fix GHSA-v9q9-xj86-953p * 用户名重叠越权访问patch (#1681) * add cohere model api access * cohere + can_multi_thread * fix block user access(fail) * fix fastapi bug * change cohere api endpoint * explain version * # fix com_zhipuglm.py illegal temperature problem (#1687) * Update com_zhipuglm.py # fix 用户在使用 zhipuai 界面时遇到了关于温度参数的非法参数错误 * allow store lm model dropdown * add a btn to reverse previous reset * remove extra fns * Add support for glm-4v model (#1700) * 修改chatglm3量化加载方式 (#1688) Co-authored-by: zym9804 <ren990603@gmail.com> * save chat stage 1 * consider null cookie situation * 在点击复制按钮时激活语音 * miss some parts * move all to js * done first stage * add edge tts * bug fix * bug fix * remove console log * bug fix * bug fix * bug fix * audio switch * update tts readme * remove tempfile when done * disable auto audio follow * avoid play queue update after shut up * feat: minimizing common.js * improve tts functionality * deterine whether the cached model is in choices * Add support for Ollama (#1740) * print err when doc2x not successful * add icon * adjust url for doc2x key version * prepare merge --------- Co-authored-by: Menghuan1918 <menghuan2003@outlook.com> Co-authored-by: Skyzayre <120616113+Skyzayre@users.noreply.github.com> Co-authored-by: XIao <46100050+Kilig947@users.noreply.github.com> Co-authored-by: Yuki <903728862@qq.com> Co-authored-by: zyren123 <91042213+zyren123@users.noreply.github.com> Co-authored-by: zym9804 <ren990603@gmail.com>
This commit is contained in:
parent
bd5280df1b
commit
5fcd02506c
1
.gitignore
vendored
1
.gitignore
vendored
@ -153,3 +153,4 @@ media
|
|||||||
flagged
|
flagged
|
||||||
request_llms/ChatGLM-6b-onnx-u8s8
|
request_llms/ChatGLM-6b-onnx-u8s8
|
||||||
.pre-commit-config.yaml
|
.pre-commit-config.yaml
|
||||||
|
themes/common.js.min.*.js
|
16
config.py
16
config.py
@ -34,7 +34,7 @@ else:
|
|||||||
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
||||||
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
|
||||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
||||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo",
|
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
|
||||||
"gemini-pro", "chatglm3"
|
"gemini-pro", "chatglm3"
|
||||||
]
|
]
|
||||||
# --- --- --- ---
|
# --- --- --- ---
|
||||||
@ -50,9 +50,9 @@ AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-p
|
|||||||
# "yi-34b-chat-0205", "yi-34b-chat-200k"
|
# "yi-34b-chat-0205", "yi-34b-chat-200k"
|
||||||
# ]
|
# ]
|
||||||
# --- --- --- ---
|
# --- --- --- ---
|
||||||
# 此外,为了更灵活地接入one-api多模型管理界面,您还可以在接入one-api时,
|
# 此外,您还可以在接入one-api/vllm/ollama时,
|
||||||
# 使用"one-api-*"前缀直接使用非标准方式接入的模型,例如
|
# 使用"one-api-*","vllm-*","ollama-*"前缀直接使用非标准方式接入的模型,例如
|
||||||
# AVAIL_LLM_MODELS = ["one-api-claude-3-sonnet-20240229(max_token=100000)"]
|
# AVAIL_LLM_MODELS = ["one-api-claude-3-sonnet-20240229(max_token=100000)", "ollama-phi3(max_token=4096)"]
|
||||||
# --- --- --- ---
|
# --- --- --- ---
|
||||||
|
|
||||||
|
|
||||||
@ -60,7 +60,7 @@ AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-p
|
|||||||
|
|
||||||
# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
||||||
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
||||||
# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions"}
|
# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions", "http://localhost:11434/api/chat": "在这里填写您ollama的URL"}
|
||||||
API_URL_REDIRECT = {}
|
API_URL_REDIRECT = {}
|
||||||
|
|
||||||
|
|
||||||
@ -195,6 +195,12 @@ ALIYUN_ACCESSKEY="" # (无需填写)
|
|||||||
ALIYUN_SECRET="" # (无需填写)
|
ALIYUN_SECRET="" # (无需填写)
|
||||||
|
|
||||||
|
|
||||||
|
# GPT-SOVITS 文本转语音服务的运行地址(将语言模型的生成文本朗读出来)
|
||||||
|
TTS_TYPE = "DISABLE" # LOCAL / LOCAL_SOVITS_API / DISABLE
|
||||||
|
GPT_SOVITS_URL = ""
|
||||||
|
EDGE_TTS_VOICE = "zh-CN-XiaoxiaoNeural"
|
||||||
|
|
||||||
|
|
||||||
# 接入讯飞星火大模型 https://console.xfyun.cn/services/iat
|
# 接入讯飞星火大模型 https://console.xfyun.cn/services/iat
|
||||||
XFYUN_APPID = "00000000"
|
XFYUN_APPID = "00000000"
|
||||||
XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||||
|
@ -74,8 +74,10 @@ def 解析PDF_DOC2X_单文件(fp, project_folder, llm_kwargs, plugin_kwargs, cha
|
|||||||
import requests, json, os
|
import requests, json, os
|
||||||
markdown_dir = get_log_folder(plugin_name="pdf_ocr")
|
markdown_dir = get_log_folder(plugin_name="pdf_ocr")
|
||||||
doc2x_api_key = DOC2X_API_KEY
|
doc2x_api_key = DOC2X_API_KEY
|
||||||
# url = "https://api.doc2x.noedgeai.com/api/v1/pdf"
|
if doc2x_api_key.startswith('sk-'):
|
||||||
url = "https://api.doc2x.noedgeai.com/api/platform/pdf"
|
url = "https://api.doc2x.noedgeai.com/api/v1/pdf"
|
||||||
|
else:
|
||||||
|
url = "https://api.doc2x.noedgeai.com/api/platform/pdf"
|
||||||
|
|
||||||
chatbot.append((None, "加载PDF文件,发送至DOC2X解析..."))
|
chatbot.append((None, "加载PDF文件,发送至DOC2X解析..."))
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
58
docs/use_tts.md
Normal file
58
docs/use_tts.md
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# 使用TTS文字转语音
|
||||||
|
|
||||||
|
|
||||||
|
## 1. 使用EDGE-TTS(简单)
|
||||||
|
|
||||||
|
将本项目配置项修改如下即可
|
||||||
|
|
||||||
|
```
|
||||||
|
TTS_TYPE = "EDGE_TTS"
|
||||||
|
EDGE_TTS_VOICE = "zh-CN-XiaoxiaoNeural"
|
||||||
|
```
|
||||||
|
|
||||||
|
## 2. 使用SoVITS(需要有显卡)
|
||||||
|
|
||||||
|
使用以下docker-compose.yml文件,先启动SoVITS服务API
|
||||||
|
|
||||||
|
1. 创建以下文件夹结构
|
||||||
|
```shell
|
||||||
|
.
|
||||||
|
├── docker-compose.yml
|
||||||
|
└── reference
|
||||||
|
├── clone_target_txt.txt
|
||||||
|
└── clone_target_wave.mp3
|
||||||
|
```
|
||||||
|
2. 其中`docker-compose.yml`为
|
||||||
|
```yaml
|
||||||
|
version: '3.8'
|
||||||
|
services:
|
||||||
|
gpt-sovits:
|
||||||
|
image: fuqingxu/sovits_gptac_trim:latest
|
||||||
|
container_name: sovits_gptac_container
|
||||||
|
working_dir: /workspace/gpt_sovits_demo
|
||||||
|
environment:
|
||||||
|
- is_half=False
|
||||||
|
- is_share=False
|
||||||
|
volumes:
|
||||||
|
- ./reference:/reference
|
||||||
|
ports:
|
||||||
|
- "19880:9880" # 19880 为 sovits api 的暴露端口,记住它
|
||||||
|
shm_size: 16G
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
reservations:
|
||||||
|
devices:
|
||||||
|
- driver: nvidia
|
||||||
|
count: "all"
|
||||||
|
capabilities: [gpu]
|
||||||
|
command: bash -c "python3 api.py"
|
||||||
|
```
|
||||||
|
3. 其中`clone_target_wave.mp3`为需要克隆的角色音频,`clone_target_txt.txt`为该音频对应的文字文本( https://wiki.biligame.com/ys/%E8%A7%92%E8%89%B2%E8%AF%AD%E9%9F%B3 )
|
||||||
|
4. 运行`docker-compose up`
|
||||||
|
5. 将本项目配置项修改如下即可
|
||||||
|
(19880 为 sovits api 的暴露端口,与docker-compose.yml中的端口对应)
|
||||||
|
```
|
||||||
|
TTS_TYPE = "LOCAL_SOVITS_API"
|
||||||
|
GPT_SOVITS_URL = "http://127.0.0.1:19880"
|
||||||
|
```
|
||||||
|
6. 启动本项目
|
34
main.py
34
main.py
@ -1,4 +1,4 @@
|
|||||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
import os, json; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
|
|
||||||
help_menu_description = \
|
help_menu_description = \
|
||||||
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
|
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
|
||||||
@ -14,7 +14,7 @@ help_menu_description = \
|
|||||||
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"""
|
||||||
|
|
||||||
def enable_log(PATH_LOGGING):
|
def enable_log(PATH_LOGGING):
|
||||||
import logging, uuid
|
import logging
|
||||||
admin_log_path = os.path.join(PATH_LOGGING, "admin")
|
admin_log_path = os.path.join(PATH_LOGGING, "admin")
|
||||||
os.makedirs(admin_log_path, exist_ok=True)
|
os.makedirs(admin_log_path, exist_ok=True)
|
||||||
log_dir = os.path.join(admin_log_path, "chat_secrets.log")
|
log_dir = os.path.join(admin_log_path, "chat_secrets.log")
|
||||||
@ -29,13 +29,14 @@ def main():
|
|||||||
if gr.__version__ not in ['3.32.9']:
|
if gr.__version__ not in ['3.32.9']:
|
||||||
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
||||||
from request_llms.bridge_all import predict
|
from request_llms.bridge_all import predict
|
||||||
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
||||||
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
|
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
|
||||||
NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
||||||
DARK_MODE, INIT_SYS_PROMPT, ADD_WAIFU = get_conf('DARK_MODE', 'INIT_SYS_PROMPT', 'ADD_WAIFU')
|
DARK_MODE, INIT_SYS_PROMPT, ADD_WAIFU, TTS_TYPE = get_conf('DARK_MODE', 'INIT_SYS_PROMPT', 'ADD_WAIFU', 'TTS_TYPE')
|
||||||
|
if LLM_MODEL not in AVAIL_LLM_MODELS: AVAIL_LLM_MODELS += [LLM_MODEL]
|
||||||
|
|
||||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||||
@ -79,15 +80,18 @@ def main():
|
|||||||
cancel_handles = []
|
cancel_handles = []
|
||||||
customize_btns = {}
|
customize_btns = {}
|
||||||
predefined_btns = {}
|
predefined_btns = {}
|
||||||
|
from shared_utils.cookie_manager import make_cookie_cache, make_history_cache
|
||||||
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as app_block:
|
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as app_block:
|
||||||
gr.HTML(title_html)
|
gr.HTML(title_html)
|
||||||
secret_css, web_cookie_cache = gr.Textbox(visible=False), gr.Textbox(visible=False)
|
secret_css = gr.Textbox(visible=False, elem_id="secret_css")
|
||||||
cookies = gr.State(load_chat_cookies())
|
|
||||||
|
|
||||||
|
cookies, web_cookie_cache = make_cookie_cache() # 定义 后端state(cookies)、前端(web_cookie_cache)两兄弟
|
||||||
with gr_L1():
|
with gr_L1():
|
||||||
with gr_L2(scale=2, elem_id="gpt-chat"):
|
with gr_L2(scale=2, elem_id="gpt-chat"):
|
||||||
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
|
chatbot = gr.Chatbot(label=f"当前模型:{LLM_MODEL}", elem_id="gpt-chatbot")
|
||||||
if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
|
if LAYOUT == "TOP-DOWN": chatbot.style(height=CHATBOT_HEIGHT)
|
||||||
history = gr.State([])
|
history, history_cache, history_cache_update = make_history_cache() # 定义 后端state(history)、前端(history_cache)、后端setter(history_cache_update)三兄弟
|
||||||
with gr_L2(scale=1, elem_id="gpt-panel"):
|
with gr_L2(scale=1, elem_id="gpt-panel"):
|
||||||
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
|
with gr.Accordion("输入区", open=True, elem_id="input-panel") as area_input_primary:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@ -155,7 +159,7 @@ def main():
|
|||||||
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
|
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
|
||||||
|
|
||||||
with gr.Tab("更换模型", elem_id="interact-panel"):
|
with gr.Tab("更换模型", elem_id="interact-panel"):
|
||||||
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, elem_id="elem_model_sel", label="更换LLM模型/请求源").style(container=False)
|
||||||
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
||||||
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature", elem_id="elem_temperature")
|
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature", elem_id="elem_temperature")
|
||||||
max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
|
max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
|
||||||
@ -164,6 +168,8 @@ def main():
|
|||||||
_js="""(temperature)=>gpt_academic_gradio_saveload("save", "elem_prompt", "js_temperature_cookie", temperature)""")
|
_js="""(temperature)=>gpt_academic_gradio_saveload("save", "elem_prompt", "js_temperature_cookie", temperature)""")
|
||||||
system_prompt.change(None, inputs=[system_prompt], outputs=None,
|
system_prompt.change(None, inputs=[system_prompt], outputs=None,
|
||||||
_js="""(system_prompt)=>gpt_academic_gradio_saveload("save", "elem_prompt", "js_system_prompt_cookie", system_prompt)""")
|
_js="""(system_prompt)=>gpt_academic_gradio_saveload("save", "elem_prompt", "js_system_prompt_cookie", system_prompt)""")
|
||||||
|
md_dropdown.change(None, inputs=[md_dropdown], outputs=None,
|
||||||
|
_js="""(md_dropdown)=>gpt_academic_gradio_saveload("save", "elem_model_sel", "js_md_dropdown_cookie", md_dropdown)""")
|
||||||
|
|
||||||
with gr.Tab("界面外观", elem_id="interact-panel"):
|
with gr.Tab("界面外观", elem_id="interact-panel"):
|
||||||
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
||||||
@ -247,8 +253,10 @@ def main():
|
|||||||
cancel_handles.append(submitBtn2.click(**predict_args))
|
cancel_handles.append(submitBtn2.click(**predict_args))
|
||||||
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
||||||
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
||||||
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
|
reset_server_side_args = (lambda history: ([], [], "已重置", json.dumps(history)),
|
||||||
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
|
[history], [chatbot, history, status, history_cache])
|
||||||
|
resetBtn.click(*reset_server_side_args) # 再在后端清除history,把history转存history_cache备用
|
||||||
|
resetBtn2.click(*reset_server_side_args) # 再在后端清除history,把history转存history_cache备用
|
||||||
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
||||||
if AUTO_CLEAR_TXT:
|
if AUTO_CLEAR_TXT:
|
||||||
@ -271,7 +279,7 @@ def main():
|
|||||||
for k in plugins:
|
for k in plugins:
|
||||||
if not plugins[k].get("AsButton", True): continue
|
if not plugins[k].get("AsButton", True): continue
|
||||||
click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo], output_combo)
|
click_handle = plugins[k]["Button"].click(ArgsGeneralWrapper(plugins[k]["Function"]), [*input_combo], output_combo)
|
||||||
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
|
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [plugins[k]["Button"]], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
|
||||||
cancel_handles.append(click_handle)
|
cancel_handles.append(click_handle)
|
||||||
# 函数插件-下拉菜单与随变按钮的互动
|
# 函数插件-下拉菜单与随变按钮的互动
|
||||||
def on_dropdown_changed(k):
|
def on_dropdown_changed(k):
|
||||||
@ -309,7 +317,7 @@ def main():
|
|||||||
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
||||||
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
|
yield from ArgsGeneralWrapper(plugins[k]["Function"])(request, *args, **kwargs)
|
||||||
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo], output_combo)
|
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo], output_combo)
|
||||||
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot])
|
click_handle.then(on_report_generated, [cookies, file_upload, chatbot], [cookies, file_upload, chatbot]).then(None, [switchy_bt], None, _js=r"(fn)=>on_plugin_exe_complete(fn)")
|
||||||
cancel_handles.append(click_handle)
|
cancel_handles.append(click_handle)
|
||||||
# 终止按钮的回调函数注册
|
# 终止按钮的回调函数注册
|
||||||
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
||||||
@ -342,7 +350,7 @@ def main():
|
|||||||
app_block.load(load_web_cookie_cache, inputs = [web_cookie_cache, cookies],
|
app_block.load(load_web_cookie_cache, inputs = [web_cookie_cache, cookies],
|
||||||
outputs = [web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
|
outputs = [web_cookie_cache, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
|
||||||
|
|
||||||
app_block.load(None, inputs=[], outputs=None, _js=f"""()=>GptAcademicJavaScriptInit("{DARK_MODE}","{INIT_SYS_PROMPT}","{ADD_WAIFU}","{LAYOUT}")""") # 配置暗色主题或亮色主题
|
app_block.load(None, inputs=[], outputs=None, _js=f"""()=>GptAcademicJavaScriptInit("{DARK_MODE}","{INIT_SYS_PROMPT}","{ADD_WAIFU}","{LAYOUT}","{TTS_TYPE}")""") # 配置暗色主题或亮色主题
|
||||||
|
|
||||||
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
||||||
def run_delayed_tasks():
|
def run_delayed_tasks():
|
||||||
|
@ -67,7 +67,8 @@ newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub"
|
|||||||
gemini_endpoint = "https://generativelanguage.googleapis.com/v1beta/models"
|
gemini_endpoint = "https://generativelanguage.googleapis.com/v1beta/models"
|
||||||
claude_endpoint = "https://api.anthropic.com/v1/messages"
|
claude_endpoint = "https://api.anthropic.com/v1/messages"
|
||||||
yimodel_endpoint = "https://api.lingyiwanwu.com/v1/chat/completions"
|
yimodel_endpoint = "https://api.lingyiwanwu.com/v1/chat/completions"
|
||||||
cohere_endpoint = 'https://api.cohere.ai/v1/chat'
|
cohere_endpoint = "https://api.cohere.ai/v1/chat"
|
||||||
|
ollama_endpoint = "http://localhost:11434/api/chat"
|
||||||
|
|
||||||
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
|
if not AZURE_ENDPOINT.endswith('/'): AZURE_ENDPOINT += '/'
|
||||||
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
|
azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/completions?api-version=2023-05-15'
|
||||||
@ -87,6 +88,7 @@ if gemini_endpoint in API_URL_REDIRECT: gemini_endpoint = API_URL_REDIRECT[gemin
|
|||||||
if claude_endpoint in API_URL_REDIRECT: claude_endpoint = API_URL_REDIRECT[claude_endpoint]
|
if claude_endpoint in API_URL_REDIRECT: claude_endpoint = API_URL_REDIRECT[claude_endpoint]
|
||||||
if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint]
|
if yimodel_endpoint in API_URL_REDIRECT: yimodel_endpoint = API_URL_REDIRECT[yimodel_endpoint]
|
||||||
if cohere_endpoint in API_URL_REDIRECT: cohere_endpoint = API_URL_REDIRECT[cohere_endpoint]
|
if cohere_endpoint in API_URL_REDIRECT: cohere_endpoint = API_URL_REDIRECT[cohere_endpoint]
|
||||||
|
if ollama_endpoint in API_URL_REDIRECT: ollama_endpoint = API_URL_REDIRECT[ollama_endpoint]
|
||||||
|
|
||||||
# 获取tokenizer
|
# 获取tokenizer
|
||||||
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo")
|
||||||
@ -266,6 +268,14 @@ model_info = {
|
|||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
|
"glm-4v": {
|
||||||
|
"fn_with_ui": zhipu_ui,
|
||||||
|
"fn_without_ui": zhipu_noui,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 1000,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
"glm-3-turbo": {
|
"glm-3-turbo": {
|
||||||
"fn_with_ui": zhipu_ui,
|
"fn_with_ui": zhipu_ui,
|
||||||
"fn_without_ui": zhipu_noui,
|
"fn_without_ui": zhipu_noui,
|
||||||
@ -827,7 +837,32 @@ for model in [m for m in AVAIL_LLM_MODELS if m.startswith("vllm-")]:
|
|||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
# -=-=-=-=-=-=- ollama 对齐支持 -=-=-=-=-=-=-
|
||||||
|
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("ollama-")]:
|
||||||
|
from .bridge_ollama import predict_no_ui_long_connection as ollama_noui
|
||||||
|
from .bridge_ollama import predict as ollama_ui
|
||||||
|
break
|
||||||
|
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("ollama-")]:
|
||||||
|
# 为了更灵活地接入ollama多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["ollama-phi3(max_token=6666)"]
|
||||||
|
# 其中
|
||||||
|
# "ollama-" 是前缀(必要)
|
||||||
|
# "phi3" 是模型名(必要)
|
||||||
|
# "(max_token=6666)" 是配置(非必要)
|
||||||
|
try:
|
||||||
|
_, max_token_tmp = read_one_api_model_name(model)
|
||||||
|
except:
|
||||||
|
print(f"ollama模型 {model} 的 max_token 配置不是整数,请检查配置文件。")
|
||||||
|
continue
|
||||||
|
model_info.update({
|
||||||
|
model: {
|
||||||
|
"fn_with_ui": ollama_ui,
|
||||||
|
"fn_without_ui": ollama_noui,
|
||||||
|
"endpoint": ollama_endpoint,
|
||||||
|
"max_token": max_token_tmp,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
# -=-=-=-=-=-=- azure模型对齐支持 -=-=-=-=-=-=-
|
# -=-=-=-=-=-=- azure模型对齐支持 -=-=-=-=-=-=-
|
||||||
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY") # <-- 用于定义和切换多个azure模型 -->
|
AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY") # <-- 用于定义和切换多个azure模型 -->
|
||||||
|
@ -6,7 +6,6 @@ from toolbox import get_conf, ProxyNetworkActivate
|
|||||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 Local Model
|
# 🔌💻 Local Model
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
@ -23,20 +22,45 @@ class GetGLM3Handle(LocalLLMHandle):
|
|||||||
import os, glob
|
import os, glob
|
||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
LOCAL_MODEL_QUANT, device = get_conf('LOCAL_MODEL_QUANT', 'LOCAL_MODEL_DEVICE')
|
|
||||||
|
|
||||||
if LOCAL_MODEL_QUANT == "INT4": # INT4
|
LOCAL_MODEL_QUANT, device = get_conf("LOCAL_MODEL_QUANT", "LOCAL_MODEL_DEVICE")
|
||||||
_model_name_ = "THUDM/chatglm3-6b-int4"
|
_model_name_ = "THUDM/chatglm3-6b"
|
||||||
elif LOCAL_MODEL_QUANT == "INT8": # INT8
|
# if LOCAL_MODEL_QUANT == "INT4": # INT4
|
||||||
_model_name_ = "THUDM/chatglm3-6b-int8"
|
# _model_name_ = "THUDM/chatglm3-6b-int4"
|
||||||
else:
|
# elif LOCAL_MODEL_QUANT == "INT8": # INT8
|
||||||
_model_name_ = "THUDM/chatglm3-6b" # FP16
|
# _model_name_ = "THUDM/chatglm3-6b-int8"
|
||||||
with ProxyNetworkActivate('Download_LLM'):
|
# else:
|
||||||
chatglm_tokenizer = AutoTokenizer.from_pretrained(_model_name_, trust_remote_code=True)
|
# _model_name_ = "THUDM/chatglm3-6b" # FP16
|
||||||
if device=='cpu':
|
with ProxyNetworkActivate("Download_LLM"):
|
||||||
chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True, device='cpu').float()
|
chatglm_tokenizer = AutoTokenizer.from_pretrained(
|
||||||
|
_model_name_, trust_remote_code=True
|
||||||
|
)
|
||||||
|
if device == "cpu":
|
||||||
|
chatglm_model = AutoModel.from_pretrained(
|
||||||
|
_model_name_,
|
||||||
|
trust_remote_code=True,
|
||||||
|
device="cpu",
|
||||||
|
).float()
|
||||||
|
elif LOCAL_MODEL_QUANT == "INT4": # INT4
|
||||||
|
chatglm_model = AutoModel.from_pretrained(
|
||||||
|
pretrained_model_name_or_path=_model_name_,
|
||||||
|
trust_remote_code=True,
|
||||||
|
device="cuda",
|
||||||
|
load_in_4bit=True,
|
||||||
|
)
|
||||||
|
elif LOCAL_MODEL_QUANT == "INT8": # INT8
|
||||||
|
chatglm_model = AutoModel.from_pretrained(
|
||||||
|
pretrained_model_name_or_path=_model_name_,
|
||||||
|
trust_remote_code=True,
|
||||||
|
device="cuda",
|
||||||
|
load_in_8bit=True,
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
chatglm_model = AutoModel.from_pretrained(_model_name_, trust_remote_code=True, device='cuda')
|
chatglm_model = AutoModel.from_pretrained(
|
||||||
|
pretrained_model_name_or_path=_model_name_,
|
||||||
|
trust_remote_code=True,
|
||||||
|
device="cuda",
|
||||||
|
)
|
||||||
chatglm_model = chatglm_model.eval()
|
chatglm_model = chatglm_model.eval()
|
||||||
|
|
||||||
self._model = chatglm_model
|
self._model = chatglm_model
|
||||||
@ -46,32 +70,36 @@ class GetGLM3Handle(LocalLLMHandle):
|
|||||||
def llm_stream_generator(self, **kwargs):
|
def llm_stream_generator(self, **kwargs):
|
||||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||||
def adaptor(kwargs):
|
def adaptor(kwargs):
|
||||||
query = kwargs['query']
|
query = kwargs["query"]
|
||||||
max_length = kwargs['max_length']
|
max_length = kwargs["max_length"]
|
||||||
top_p = kwargs['top_p']
|
top_p = kwargs["top_p"]
|
||||||
temperature = kwargs['temperature']
|
temperature = kwargs["temperature"]
|
||||||
history = kwargs['history']
|
history = kwargs["history"]
|
||||||
return query, max_length, top_p, temperature, history
|
return query, max_length, top_p, temperature, history
|
||||||
|
|
||||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||||
|
|
||||||
for response, history in self._model.stream_chat(self._tokenizer,
|
for response, history in self._model.stream_chat(
|
||||||
query,
|
self._tokenizer,
|
||||||
history,
|
query,
|
||||||
max_length=max_length,
|
history,
|
||||||
top_p=top_p,
|
max_length=max_length,
|
||||||
temperature=temperature,
|
top_p=top_p,
|
||||||
):
|
temperature=temperature,
|
||||||
|
):
|
||||||
yield response
|
yield response
|
||||||
|
|
||||||
def try_to_import_special_deps(self, **kwargs):
|
def try_to_import_special_deps(self, **kwargs):
|
||||||
# import something that will raise error if the user does not install requirement_*.txt
|
# import something that will raise error if the user does not install requirement_*.txt
|
||||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||||
import importlib
|
import importlib
|
||||||
|
|
||||||
# importlib.import_module('modelscope')
|
# importlib.import_module('modelscope')
|
||||||
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
# 🔌💻 GPT-Academic Interface
|
# 🔌💻 GPT-Academic Interface
|
||||||
# ------------------------------------------------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------------------------------------------------
|
||||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetGLM3Handle, model_name, history_format='chatglm3')
|
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(
|
||||||
|
GetGLM3Handle, model_name, history_format="chatglm3"
|
||||||
|
)
|
||||||
|
272
request_llms/bridge_ollama.py
Normal file
272
request_llms/bridge_ollama.py
Normal file
@ -0,0 +1,272 @@
|
|||||||
|
# 借鉴自同目录下的bridge_chatgpt.py
|
||||||
|
|
||||||
|
"""
|
||||||
|
该文件中主要包含三个函数
|
||||||
|
|
||||||
|
不具备多线程能力的函数:
|
||||||
|
1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
|
||||||
|
|
||||||
|
具备多线程调用能力的函数
|
||||||
|
2. predict_no_ui_long_connection:支持多线程
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import gradio as gr
|
||||||
|
import logging
|
||||||
|
import traceback
|
||||||
|
import requests
|
||||||
|
import importlib
|
||||||
|
import random
|
||||||
|
|
||||||
|
# config_private.py放自己的秘密如API和代理网址
|
||||||
|
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||||
|
from toolbox import get_conf, update_ui, trimmed_format_exc, is_the_upload_folder, read_one_api_model_name
|
||||||
|
proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf(
|
||||||
|
"proxies", "TIMEOUT_SECONDS", "MAX_RETRY"
|
||||||
|
)
|
||||||
|
|
||||||
|
timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
|
||||||
|
'网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
|
||||||
|
|
||||||
|
def get_full_error(chunk, stream_response):
|
||||||
|
"""
|
||||||
|
获取完整的从Openai返回的报错
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
chunk += next(stream_response)
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
return chunk
|
||||||
|
|
||||||
|
def decode_chunk(chunk):
|
||||||
|
# 提前读取一些信息(用于判断异常)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
chunkjson = None
|
||||||
|
is_last_chunk = False
|
||||||
|
try:
|
||||||
|
chunkjson = json.loads(chunk_decoded)
|
||||||
|
is_last_chunk = chunkjson.get("done", False)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return chunk_decoded, chunkjson, is_last_chunk
|
||||||
|
|
||||||
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||||
|
"""
|
||||||
|
发送至chatGPT,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。
|
||||||
|
inputs:
|
||||||
|
是本次问询的输入
|
||||||
|
sys_prompt:
|
||||||
|
系统静默prompt
|
||||||
|
llm_kwargs:
|
||||||
|
chatGPT的内部调优参数
|
||||||
|
history:
|
||||||
|
是之前的对话列表
|
||||||
|
observe_window = None:
|
||||||
|
用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗
|
||||||
|
"""
|
||||||
|
watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
|
||||||
|
if inputs == "": inputs = "空空如也的输入栏"
|
||||||
|
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt=sys_prompt, stream=True)
|
||||||
|
retry = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# make a POST request to the API endpoint, stream=False
|
||||||
|
from .bridge_all import model_info
|
||||||
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
|
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||||
|
json=payload, stream=True, timeout=TIMEOUT_SECONDS); break
|
||||||
|
except requests.exceptions.ReadTimeout as e:
|
||||||
|
retry += 1
|
||||||
|
traceback.print_exc()
|
||||||
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
|
if MAX_RETRY!=0: print(f'请求超时,正在重试 ({retry}/{MAX_RETRY}) ……')
|
||||||
|
|
||||||
|
stream_response = response.iter_lines()
|
||||||
|
result = ''
|
||||||
|
while True:
|
||||||
|
try: chunk = next(stream_response)
|
||||||
|
except StopIteration:
|
||||||
|
break
|
||||||
|
except requests.exceptions.ConnectionError:
|
||||||
|
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||||
|
chunk_decoded, chunkjson, is_last_chunk = decode_chunk(chunk)
|
||||||
|
if chunk:
|
||||||
|
try:
|
||||||
|
if is_last_chunk:
|
||||||
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
|
logging.info(f'[response] {result}')
|
||||||
|
break
|
||||||
|
result += chunkjson['message']["content"]
|
||||||
|
if not console_slience: print(chunkjson['message']["content"], end='')
|
||||||
|
if observe_window is not None:
|
||||||
|
# 观测窗,把已经获取的数据显示出去
|
||||||
|
if len(observe_window) >= 1:
|
||||||
|
observe_window[0] += chunkjson['message']["content"]
|
||||||
|
# 看门狗,如果超过期限没有喂狗,则终止
|
||||||
|
if len(observe_window) >= 2:
|
||||||
|
if (time.time()-observe_window[1]) > watch_dog_patience:
|
||||||
|
raise RuntimeError("用户取消了程序。")
|
||||||
|
except Exception as e:
|
||||||
|
chunk = get_full_error(chunk, stream_response)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
error_msg = chunk_decoded
|
||||||
|
print(error_msg)
|
||||||
|
raise RuntimeError("Json解析不合常规")
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
||||||
|
"""
|
||||||
|
发送至chatGPT,流式获取输出。
|
||||||
|
用于基础的对话功能。
|
||||||
|
inputs 是本次问询的输入
|
||||||
|
top_p, temperature是chatGPT的内部调优参数
|
||||||
|
history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误)
|
||||||
|
chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容
|
||||||
|
additional_fn代表点击的哪个按钮,按钮见functional.py
|
||||||
|
"""
|
||||||
|
if inputs == "": inputs = "空空如也的输入栏"
|
||||||
|
user_input = inputs
|
||||||
|
if additional_fn is not None:
|
||||||
|
from core_functional import handle_core_functionality
|
||||||
|
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
|
||||||
|
|
||||||
|
raw_input = inputs
|
||||||
|
logging.info(f'[raw_input] {raw_input}')
|
||||||
|
chatbot.append((inputs, ""))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
|
||||||
|
|
||||||
|
# check mis-behavior
|
||||||
|
if is_the_upload_folder(user_input):
|
||||||
|
chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需点击“**函数插件区**”按钮进行处理,请勿点击“提交”按钮或者“基础功能区”按钮。")
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
headers, payload = generate_payload(inputs, llm_kwargs, history, system_prompt, stream)
|
||||||
|
|
||||||
|
from .bridge_all import model_info
|
||||||
|
endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
|
||||||
|
|
||||||
|
history.append(inputs); history.append("")
|
||||||
|
|
||||||
|
retry = 0
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
# make a POST request to the API endpoint, stream=True
|
||||||
|
response = requests.post(endpoint, headers=headers, proxies=proxies,
|
||||||
|
json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
|
||||||
|
except:
|
||||||
|
retry += 1
|
||||||
|
chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
|
||||||
|
retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
|
||||||
|
if retry > MAX_RETRY: raise TimeoutError
|
||||||
|
|
||||||
|
gpt_replying_buffer = ""
|
||||||
|
|
||||||
|
if stream:
|
||||||
|
stream_response = response.iter_lines()
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
chunk = next(stream_response)
|
||||||
|
except StopIteration:
|
||||||
|
break
|
||||||
|
except requests.exceptions.ConnectionError:
|
||||||
|
chunk = next(stream_response) # 失败了,重试一次?再失败就没办法了。
|
||||||
|
|
||||||
|
# 提前读取一些信息 (用于判断异常)
|
||||||
|
chunk_decoded, chunkjson, is_last_chunk = decode_chunk(chunk)
|
||||||
|
|
||||||
|
if chunk:
|
||||||
|
try:
|
||||||
|
if is_last_chunk:
|
||||||
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
|
logging.info(f'[response] {gpt_replying_buffer}')
|
||||||
|
break
|
||||||
|
# 处理数据流的主体
|
||||||
|
try:
|
||||||
|
status_text = f"finish_reason: {chunkjson['error'].get('message', 'null')}"
|
||||||
|
except:
|
||||||
|
status_text = "finish_reason: null"
|
||||||
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson['message']["content"]
|
||||||
|
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
||||||
|
history[-1] = gpt_replying_buffer
|
||||||
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||||
|
except Exception as e:
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
|
||||||
|
chunk = get_full_error(chunk, stream_response)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
error_msg = chunk_decoded
|
||||||
|
chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg)
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
|
||||||
|
print(error_msg)
|
||||||
|
return
|
||||||
|
|
||||||
|
def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg):
|
||||||
|
from .bridge_all import model_info
|
||||||
|
if "bad_request" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] 已经超过了模型的最大上下文或是模型格式错误,请尝试削减单次输入的文本量。")
|
||||||
|
elif "authentication_error" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. 请确保API key有效。")
|
||||||
|
elif "not_found" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], f"[Local Message] {llm_kwargs['llm_model']} 无效,请确保使用小写的模型名称。")
|
||||||
|
elif "rate_limit" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] 遇到了控制请求速率限制,请一分钟后重试。")
|
||||||
|
elif "system_busy" in error_msg:
|
||||||
|
chatbot[-1] = (chatbot[-1][0], "[Local Message] 系统繁忙,请一分钟后重试。")
|
||||||
|
else:
|
||||||
|
from toolbox import regular_txt_to_markdown
|
||||||
|
tb_str = '```\n' + trimmed_format_exc() + '```'
|
||||||
|
chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
|
||||||
|
return chatbot, history
|
||||||
|
|
||||||
|
def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
||||||
|
"""
|
||||||
|
整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
|
||||||
|
"""
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}
|
||||||
|
|
||||||
|
conversation_cnt = len(history) // 2
|
||||||
|
|
||||||
|
messages = [{"role": "system", "content": system_prompt}]
|
||||||
|
if conversation_cnt:
|
||||||
|
for index in range(0, 2*conversation_cnt, 2):
|
||||||
|
what_i_have_asked = {}
|
||||||
|
what_i_have_asked["role"] = "user"
|
||||||
|
what_i_have_asked["content"] = history[index]
|
||||||
|
what_gpt_answer = {}
|
||||||
|
what_gpt_answer["role"] = "assistant"
|
||||||
|
what_gpt_answer["content"] = history[index+1]
|
||||||
|
if what_i_have_asked["content"] != "":
|
||||||
|
if what_gpt_answer["content"] == "": continue
|
||||||
|
if what_gpt_answer["content"] == timeout_bot_msg: continue
|
||||||
|
messages.append(what_i_have_asked)
|
||||||
|
messages.append(what_gpt_answer)
|
||||||
|
else:
|
||||||
|
messages[-1]['content'] = what_gpt_answer['content']
|
||||||
|
|
||||||
|
what_i_ask_now = {}
|
||||||
|
what_i_ask_now["role"] = "user"
|
||||||
|
what_i_ask_now["content"] = inputs
|
||||||
|
messages.append(what_i_ask_now)
|
||||||
|
model = llm_kwargs['llm_model']
|
||||||
|
if llm_kwargs['llm_model'].startswith('ollama-'):
|
||||||
|
model = llm_kwargs['llm_model'][len('ollama-'):]
|
||||||
|
model, _ = read_one_api_model_name(model)
|
||||||
|
options = {"temperature": llm_kwargs['temperature']}
|
||||||
|
payload = {
|
||||||
|
"model": model,
|
||||||
|
"messages": messages,
|
||||||
|
"options": options,
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
print(f" {llm_kwargs['llm_model']} : {conversation_cnt} : {inputs[:100]} ..........")
|
||||||
|
except:
|
||||||
|
print('输入中可能存在乱码。')
|
||||||
|
return headers,payload
|
@ -75,6 +75,10 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
|
|||||||
llm_kwargs["llm_model"] = zhipuai_default_model
|
llm_kwargs["llm_model"] = zhipuai_default_model
|
||||||
|
|
||||||
if llm_kwargs["llm_model"] in ["glm-4v"]:
|
if llm_kwargs["llm_model"] in ["glm-4v"]:
|
||||||
|
if (len(inputs) + sum(len(temp) for temp in history) + 1047) > 2000:
|
||||||
|
chatbot.append((inputs, "上下文长度超过glm-4v上限2000tokens,注意图片大约占用1,047个tokens"))
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history)
|
||||||
|
return
|
||||||
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
|
have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
|
||||||
if not have_recent_file:
|
if not have_recent_file:
|
||||||
chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写"))
|
chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写"))
|
||||||
|
@ -36,8 +36,14 @@ class ZhipuChatInit:
|
|||||||
what_i_have_asked = {"role": "user", "content": []}
|
what_i_have_asked = {"role": "user", "content": []}
|
||||||
what_i_have_asked['content'].append({"type": 'text', "text": user_input})
|
what_i_have_asked['content'].append({"type": 'text', "text": user_input})
|
||||||
if encode_img:
|
if encode_img:
|
||||||
|
if len(encode_img) > 1:
|
||||||
|
logging.warning("glm-4v只支持一张图片,将只取第一张图片进行处理")
|
||||||
|
print("glm-4v只支持一张图片,将只取第一张图片进行处理")
|
||||||
img_d = {"type": "image_url",
|
img_d = {"type": "image_url",
|
||||||
"image_url": {'url': encode_img}}
|
"image_url": {
|
||||||
|
"url": encode_img[0]['data']
|
||||||
|
}
|
||||||
|
}
|
||||||
what_i_have_asked['content'].append(img_d)
|
what_i_have_asked['content'].append(img_d)
|
||||||
return what_i_have_asked
|
return what_i_have_asked
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@ Markdown
|
|||||||
pygments
|
pygments
|
||||||
pymupdf
|
pymupdf
|
||||||
openai
|
openai
|
||||||
|
rjsmin
|
||||||
arxiv
|
arxiv
|
||||||
numpy
|
numpy
|
||||||
rich
|
rich
|
@ -1,4 +1,6 @@
|
|||||||
|
import json
|
||||||
from typing import Callable
|
from typing import Callable
|
||||||
|
|
||||||
def load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)->Callable:
|
def load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)->Callable:
|
||||||
def load_web_cookie_cache(persistent_cookie_, cookies_):
|
def load_web_cookie_cache(persistent_cookie_, cookies_):
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
@ -22,7 +24,6 @@ def load_web_cookie_cache__fn_builder(customize_btns, cookies, predefined_btns)-
|
|||||||
return ret
|
return ret
|
||||||
return load_web_cookie_cache
|
return load_web_cookie_cache
|
||||||
|
|
||||||
|
|
||||||
def assign_btn__fn_builder(customize_btns, predefined_btns, cookies, web_cookie_cache)->Callable:
|
def assign_btn__fn_builder(customize_btns, predefined_btns, cookies, web_cookie_cache)->Callable:
|
||||||
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix, clean_up=False):
|
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix, clean_up=False):
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
@ -59,3 +60,29 @@ def assign_btn__fn_builder(customize_btns, predefined_btns, cookies, web_cookie_
|
|||||||
return ret
|
return ret
|
||||||
return assign_btn
|
return assign_btn
|
||||||
|
|
||||||
|
# cookies, web_cookie_cache = make_cookie_cache()
|
||||||
|
def make_cookie_cache():
|
||||||
|
# 定义 后端state(cookies)、前端(web_cookie_cache)两兄弟
|
||||||
|
import gradio as gr
|
||||||
|
from toolbox import load_chat_cookies
|
||||||
|
# 定义cookies的后端state
|
||||||
|
cookies = gr.State(load_chat_cookies())
|
||||||
|
# 定义cookies的一个孪生的前端存储区(隐藏)
|
||||||
|
web_cookie_cache = gr.Textbox(visible=False, elem_id="web_cookie_cache")
|
||||||
|
return cookies, web_cookie_cache
|
||||||
|
|
||||||
|
# history, history_cache, history_cache_update = make_history_cache()
|
||||||
|
def make_history_cache():
|
||||||
|
# 定义 后端state(history)、前端(history_cache)、后端setter(history_cache_update)三兄弟
|
||||||
|
import gradio as gr
|
||||||
|
# 定义history的后端state
|
||||||
|
history = gr.State([])
|
||||||
|
# 定义history的一个孪生的前端存储区(隐藏)
|
||||||
|
history_cache = gr.Textbox(visible=False, elem_id="history_cache")
|
||||||
|
# 定义history_cache->history的更新方法(隐藏)。在触发这个按钮时,会先执行js代码更新history_cache,然后再执行python代码更新history
|
||||||
|
def process_history_cache(history_cache):
|
||||||
|
return json.loads(history_cache)
|
||||||
|
# 另一种更简单的setter方法
|
||||||
|
history_cache_update = gr.Button("", elem_id="elem_update_history", visible=False).click(
|
||||||
|
process_history_cache, inputs=[history_cache], outputs=[history])
|
||||||
|
return history, history_cache, history_cache_update
|
||||||
|
@ -137,6 +137,47 @@ def start_app(app_block, CONCURRENT_COUNT, AUTHENTICATION, PORT, SSL_KEYFILE, SS
|
|||||||
return "越权访问!"
|
return "越权访问!"
|
||||||
return await endpoint(path_or_url, request)
|
return await endpoint(path_or_url, request)
|
||||||
|
|
||||||
|
TTS_TYPE = get_conf("TTS_TYPE")
|
||||||
|
if TTS_TYPE != "DISABLE":
|
||||||
|
# audio generation functionality
|
||||||
|
import httpx
|
||||||
|
from fastapi import FastAPI, Request, HTTPException
|
||||||
|
from starlette.responses import Response
|
||||||
|
async def forward_request(request: Request, method: str) -> Response:
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
try:
|
||||||
|
# Forward the request to the target service
|
||||||
|
if TTS_TYPE == "EDGE_TTS":
|
||||||
|
import tempfile
|
||||||
|
import edge_tts
|
||||||
|
import wave
|
||||||
|
import uuid
|
||||||
|
from pydub import AudioSegment
|
||||||
|
json = await request.json()
|
||||||
|
voice = get_conf("EDGE_TTS_VOICE")
|
||||||
|
tts = edge_tts.Communicate(text=json['text'], voice=voice)
|
||||||
|
temp_folder = tempfile.gettempdir()
|
||||||
|
temp_file_name = str(uuid.uuid4().hex)
|
||||||
|
temp_file = os.path.join(temp_folder, f'{temp_file_name}.mp3')
|
||||||
|
await tts.save(temp_file)
|
||||||
|
mp3_audio = AudioSegment.from_file(temp_file, format="mp3")
|
||||||
|
mp3_audio.export(temp_file, format="wav")
|
||||||
|
with open(temp_file, 'rb') as wav_file: t = wav_file.read()
|
||||||
|
os.remove(temp_file)
|
||||||
|
return Response(content=t)
|
||||||
|
if TTS_TYPE == "LOCAL_SOVITS_API":
|
||||||
|
# Forward the request to the target service
|
||||||
|
TARGET_URL = get_conf("GPT_SOVITS_URL")
|
||||||
|
body = await request.body()
|
||||||
|
resp = await client.post(TARGET_URL, content=body, timeout=60)
|
||||||
|
# Return the response from the target service
|
||||||
|
return Response(content=resp.content, status_code=resp.status_code, headers=dict(resp.headers))
|
||||||
|
except httpx.RequestError as e:
|
||||||
|
raise HTTPException(status_code=400, detail=f"Request to the target service failed: {str(e)}")
|
||||||
|
@gradio_app.post("/vits")
|
||||||
|
async def forward_post_request(request: Request):
|
||||||
|
return await forward_request(request, "POST")
|
||||||
|
|
||||||
# --- --- app_lifespan --- ---
|
# --- --- app_lifespan --- ---
|
||||||
from contextlib import asynccontextmanager
|
from contextlib import asynccontextmanager
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
|
@ -38,6 +38,7 @@
|
|||||||
left: calc(100% + 3px);
|
left: calc(100% + 3px);
|
||||||
top: 0;
|
top: 0;
|
||||||
display: flex;
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
justify-content: space-between;
|
justify-content: space-between;
|
||||||
}
|
}
|
||||||
/* .message-btn-row-leading, .message-btn-row-trailing {
|
/* .message-btn-row-leading, .message-btn-row-trailing {
|
||||||
|
604
themes/common.js
604
themes/common.js
@ -7,6 +7,9 @@ function push_data_to_gradio_component(DAT, ELEM_ID, TYPE) {
|
|||||||
if (TYPE == "str") {
|
if (TYPE == "str") {
|
||||||
// convert dat to string: do nothing
|
// convert dat to string: do nothing
|
||||||
}
|
}
|
||||||
|
else if (TYPE == "obj") {
|
||||||
|
// convert dat to string: do nothing
|
||||||
|
}
|
||||||
else if (TYPE == "no_conversion") {
|
else if (TYPE == "no_conversion") {
|
||||||
// no nothing
|
// no nothing
|
||||||
}
|
}
|
||||||
@ -254,11 +257,22 @@ function cancel_loading_status() {
|
|||||||
// 第 2 部分: 复制按钮
|
// 第 2 部分: 复制按钮
|
||||||
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
function addCopyButton(botElement) {
|
|
||||||
|
var allow_auto_read_continously = true;
|
||||||
|
var allow_auto_read_tts_flag = false;
|
||||||
|
function addCopyButton(botElement, index, is_last_in_arr) {
|
||||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||||
// Copy bot button
|
// Copy bot button
|
||||||
const copiedIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
|
const copiedIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
|
||||||
const copyIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
|
const copyIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
|
||||||
|
// const audioIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
|
||||||
|
const audioIcon = '<span><svg t="1713628577799" fill="currentColor" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="4587" width=".9em" height=".9em"><path d="M113.7664 540.4672c0-219.9552 178.2784-398.2336 398.2336-398.2336S910.2336 320.512 910.2336 540.4672v284.4672c0 31.4368-25.4976 56.9344-56.9344 56.9344h-56.9344c-31.4368 0-56.9344-25.4976-56.9344-56.9344V597.2992c0-31.4368 25.4976-56.9344 56.9344-56.9344h56.9344c0-188.5184-152.7808-341.2992-341.2992-341.2992S170.7008 351.9488 170.7008 540.4672h56.9344c31.4368 0 56.9344 25.4976 56.9344 56.9344v227.5328c0 31.4368-25.4976 56.9344-56.9344 56.9344h-56.9344c-31.4368 0-56.9344-25.4976-56.9344-56.9344V540.4672z" p-id="4588"></path></svg></span>';
|
||||||
|
// const cancelAudioIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect><path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path></svg></span>';
|
||||||
|
|
||||||
|
// 此功能没准备好
|
||||||
|
if (allow_auto_read_continously && is_last_in_arr && allow_auto_read_tts_flag) {
|
||||||
|
process_latest_text_output(botElement.innerText, index);
|
||||||
|
}
|
||||||
|
|
||||||
const messageBtnColumnElement = botElement.querySelector('.message-btn-row');
|
const messageBtnColumnElement = botElement.querySelector('.message-btn-row');
|
||||||
if (messageBtnColumnElement) {
|
if (messageBtnColumnElement) {
|
||||||
@ -273,6 +287,7 @@ function addCopyButton(botElement) {
|
|||||||
copyButton.addEventListener('click', async () => {
|
copyButton.addEventListener('click', async () => {
|
||||||
const textToCopy = botElement.innerText;
|
const textToCopy = botElement.innerText;
|
||||||
try {
|
try {
|
||||||
|
// push_text_to_audio(textToCopy).catch(console.error);
|
||||||
if ("clipboard" in navigator) {
|
if ("clipboard" in navigator) {
|
||||||
await navigator.clipboard.writeText(textToCopy);
|
await navigator.clipboard.writeText(textToCopy);
|
||||||
copyButton.innerHTML = copiedIcon;
|
copyButton.innerHTML = copiedIcon;
|
||||||
@ -299,9 +314,35 @@ function addCopyButton(botElement) {
|
|||||||
console.error("Copy failed: ", error);
|
console.error("Copy failed: ", error);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if (enable_tts){
|
||||||
|
var audioButton = document.createElement('button');
|
||||||
|
audioButton.classList.add('audio-toggle-btn');
|
||||||
|
audioButton.innerHTML = audioIcon;
|
||||||
|
audioButton.addEventListener('click', async () => {
|
||||||
|
if (audioPlayer.isPlaying) {
|
||||||
|
allow_auto_read_tts_flag = false;
|
||||||
|
toast_push('自动朗读已禁用。', 3000);
|
||||||
|
audioPlayer.stop();
|
||||||
|
setCookie("js_auto_read_cookie", "False", 365);
|
||||||
|
|
||||||
|
} else {
|
||||||
|
allow_auto_read_tts_flag = true;
|
||||||
|
toast_push('正在合成语音 & 自动朗读已开启 (再次点击此按钮可禁用自动朗读)。', 3000);
|
||||||
|
// toast_push('正在合成语音', 3000);
|
||||||
|
const readText = botElement.innerText;
|
||||||
|
push_text_to_audio(readText);
|
||||||
|
setCookie("js_auto_read_cookie", "True", 365);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
var messageBtnColumn = document.createElement('div');
|
var messageBtnColumn = document.createElement('div');
|
||||||
messageBtnColumn.classList.add('message-btn-row');
|
messageBtnColumn.classList.add('message-btn-row');
|
||||||
messageBtnColumn.appendChild(copyButton);
|
messageBtnColumn.appendChild(copyButton);
|
||||||
|
if (enable_tts){
|
||||||
|
messageBtnColumn.appendChild(audioButton);
|
||||||
|
}
|
||||||
botElement.appendChild(messageBtnColumn);
|
botElement.appendChild(messageBtnColumn);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,7 +378,15 @@ function chatbotContentChanged(attempt = 1, force = false) {
|
|||||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||||
for (var i = 0; i < attempt; i++) {
|
for (var i = 0; i < attempt; i++) {
|
||||||
setTimeout(() => {
|
setTimeout(() => {
|
||||||
gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton);
|
const messages = gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot');
|
||||||
|
messages.forEach((message, index, arr) => {
|
||||||
|
// Check if the current message is the last in the array
|
||||||
|
const is_last_in_arr = index === arr.length - 1;
|
||||||
|
|
||||||
|
// Now pass both the message element and the is_last_in_arr boolean to addCopyButton
|
||||||
|
addCopyButton(message, index, is_last_in_arr);
|
||||||
|
});
|
||||||
|
// gradioApp().querySelectorAll('#gpt-chatbot .message-wrap .message.bot').forEach(addCopyButton);
|
||||||
}, i === 0 ? 0 : 200);
|
}, i === 0 ? 0 : 200);
|
||||||
}
|
}
|
||||||
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
|
// we have moved mermaid-related code to gradio-fix repository: binary-husky/gradio-fix@32150d0
|
||||||
@ -621,16 +670,16 @@ function monitoring_input_box() {
|
|||||||
|
|
||||||
if (elem_input_main) {
|
if (elem_input_main) {
|
||||||
if (elem_input_main.querySelector("textarea")) {
|
if (elem_input_main.querySelector("textarea")) {
|
||||||
register_func_paste(elem_input_main.querySelector("textarea"))
|
register_func_paste(elem_input_main.querySelector("textarea"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (elem_input_float) {
|
if (elem_input_float) {
|
||||||
if (elem_input_float.querySelector("textarea")) {
|
if (elem_input_float.querySelector("textarea")) {
|
||||||
register_func_paste(elem_input_float.querySelector("textarea"))
|
register_func_paste(elem_input_float.querySelector("textarea"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (elem_chatbot) {
|
if (elem_chatbot) {
|
||||||
register_func_drag(elem_chatbot)
|
register_func_drag(elem_chatbot);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -737,7 +786,7 @@ function minor_ui_adjustment() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
setInterval(function () {
|
setInterval(function () {
|
||||||
auto_hide_toolbar()
|
auto_hide_toolbar();
|
||||||
}, 200); // 每50毫秒执行一次
|
}, 200); // 每50毫秒执行一次
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -857,8 +906,8 @@ function gpt_academic_gradio_saveload(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enable_tts = false;
|
||||||
async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout) {
|
async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout, tts) {
|
||||||
// 第一部分,布局初始化
|
// 第一部分,布局初始化
|
||||||
audio_fn_init();
|
audio_fn_init();
|
||||||
minor_ui_adjustment();
|
minor_ui_adjustment();
|
||||||
@ -873,7 +922,6 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout) {
|
|||||||
// 第二部分,读取Cookie,初始话界面
|
// 第二部分,读取Cookie,初始话界面
|
||||||
let searchString = "";
|
let searchString = "";
|
||||||
let bool_value = "";
|
let bool_value = "";
|
||||||
|
|
||||||
// darkmode 深色模式
|
// darkmode 深色模式
|
||||||
if (getCookie("js_darkmode_cookie")) {
|
if (getCookie("js_darkmode_cookie")) {
|
||||||
dark = getCookie("js_darkmode_cookie")
|
dark = getCookie("js_darkmode_cookie")
|
||||||
@ -889,11 +937,39 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 自动朗读
|
||||||
|
if (tts != "DISABLE"){
|
||||||
|
enable_tts = true;
|
||||||
|
if (getCookie("js_auto_read_cookie")) {
|
||||||
|
auto_read_tts = getCookie("js_auto_read_cookie")
|
||||||
|
auto_read_tts = auto_read_tts == "True";
|
||||||
|
if (auto_read_tts) {
|
||||||
|
allow_auto_read_tts_flag = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// SysPrompt 系统静默提示词
|
// SysPrompt 系统静默提示词
|
||||||
gpt_academic_gradio_saveload("load", "elem_prompt", "js_system_prompt_cookie", null, "str");
|
gpt_academic_gradio_saveload("load", "elem_prompt", "js_system_prompt_cookie", null, "str");
|
||||||
|
|
||||||
// Temperature 大模型温度参数
|
// Temperature 大模型温度参数
|
||||||
gpt_academic_gradio_saveload("load", "elem_temperature", "js_temperature_cookie", null, "float");
|
gpt_academic_gradio_saveload("load", "elem_temperature", "js_temperature_cookie", null, "float");
|
||||||
|
// md_dropdown 大模型类型选择
|
||||||
|
if (getCookie("js_md_dropdown_cookie")) {
|
||||||
|
const cached_model = getCookie("js_md_dropdown_cookie");
|
||||||
|
var model_sel = await get_gradio_component("elem_model_sel");
|
||||||
|
// deterine whether the cached model is in the choices
|
||||||
|
if (model_sel.props.choices.includes(cached_model)){
|
||||||
|
// change dropdown
|
||||||
|
gpt_academic_gradio_saveload("load", "elem_model_sel", "js_md_dropdown_cookie", null, "str");
|
||||||
|
// 连锁修改chatbot的label
|
||||||
|
push_data_to_gradio_component({
|
||||||
|
label: '当前模型:' + getCookie("js_md_dropdown_cookie"),
|
||||||
|
__type__: 'update'
|
||||||
|
}, "gpt-chatbot", "obj")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// clearButton 自动清除按钮
|
// clearButton 自动清除按钮
|
||||||
if (getCookie("js_clearbtn_show_cookie")) {
|
if (getCookie("js_clearbtn_show_cookie")) {
|
||||||
@ -953,4 +1029,510 @@ async function GptAcademicJavaScriptInit(dark, prompt, live2d, layout) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function reset_conversation(a, b) {
|
||||||
|
console.log("js_code_reset");
|
||||||
|
a = btoa(unescape(encodeURIComponent(JSON.stringify(a))));
|
||||||
|
setCookie("js_previous_chat_cookie", a, 1);
|
||||||
|
gen_restore_btn();
|
||||||
|
return [[], [], "已重置"];
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear -> 将 history 缓存至 history_cache -> 点击复原 -> restore_previous_chat() -> 触发elem_update_history -> 读取 history_cache
|
||||||
|
function restore_previous_chat() {
|
||||||
|
console.log("restore_previous_chat");
|
||||||
|
let chat = getCookie("js_previous_chat_cookie");
|
||||||
|
chat = JSON.parse(decodeURIComponent(escape(atob(chat))));
|
||||||
|
push_data_to_gradio_component(chat, "gpt-chatbot", "obj");
|
||||||
|
document.querySelector("#elem_update_history").click(); // in order to call set_history_gr_state, and send history state to server
|
||||||
|
}
|
||||||
|
|
||||||
|
function gen_restore_btn() {
|
||||||
|
|
||||||
|
|
||||||
|
// 创建按钮元素
|
||||||
|
const button = document.createElement('div');
|
||||||
|
// const recvIcon = '<span><svg stroke="currentColor" fill="none" stroke-width="2" viewBox="0 0 24 24" stroke-linecap="round" stroke-linejoin="round" height=".8em" width=".8em" xmlns="http://www.w3.org/2000/svg"><polyline points="20 6 9 17 4 12"></polyline></svg></span>';
|
||||||
|
const rec_svg = '<svg t="1714361184567" style="transform:translate(1px, 2.5px)" class="icon" viewBox="0 0 1024 1024" version="1.1" xmlns="http://www.w3.org/2000/svg" p-id="4389" width="35" height="35"><path d="M320 512h384v64H320zM320 384h384v64H320zM320 640h192v64H320z" p-id="4390" fill="#ffffff"></path><path d="M863.7 544c-1.9 44-11.4 86.8-28.5 127.2-18.5 43.8-45.1 83.2-78.9 117-33.8 33.8-73.2 60.4-117 78.9C593.9 886.3 545.7 896 496 896s-97.9-9.7-143.2-28.9c-43.8-18.5-83.2-45.1-117-78.9-33.8-33.8-60.4-73.2-78.9-117C137.7 625.9 128 577.7 128 528s9.7-97.9 28.9-143.2c18.5-43.8 45.1-83.2 78.9-117s73.2-60.4 117-78.9C398.1 169.7 446.3 160 496 160s97.9 9.7 143.2 28.9c23.5 9.9 45.8 22.2 66.5 36.7l-119.7 20 9.9 59.4 161.6-27 59.4-9.9-9.9-59.4-27-161.5-59.4 9.9 19 114.2C670.3 123.8 586.4 96 496 96 257.4 96 64 289.4 64 528s193.4 432 432 432c233.2 0 423.3-184.8 431.7-416h-64z" p-id="4391" fill="#ffffff"></path></svg>'
|
||||||
|
const recvIcon = '<span>' + rec_svg + '</span>';
|
||||||
|
|
||||||
|
// 设置按钮的样式和属性
|
||||||
|
button.id = 'floatingButton';
|
||||||
|
button.className = 'glow';
|
||||||
|
button.style.textAlign = 'center';
|
||||||
|
button.style.position = 'fixed';
|
||||||
|
button.style.bottom = '10px';
|
||||||
|
button.style.left = '10px';
|
||||||
|
button.style.width = '50px';
|
||||||
|
button.style.height = '50px';
|
||||||
|
button.style.borderRadius = '50%';
|
||||||
|
button.style.backgroundColor = '#007bff';
|
||||||
|
button.style.color = 'white';
|
||||||
|
button.style.display = 'flex';
|
||||||
|
button.style.alignItems = 'center';
|
||||||
|
button.style.justifyContent = 'center';
|
||||||
|
button.style.cursor = 'pointer';
|
||||||
|
button.style.transition = 'all 0.3s ease';
|
||||||
|
button.style.boxShadow = '0 0 10px rgba(0,0,0,0.2)';
|
||||||
|
|
||||||
|
button.innerHTML = recvIcon;
|
||||||
|
|
||||||
|
// 添加发光动画的关键帧
|
||||||
|
const styleSheet = document.createElement('style');
|
||||||
|
styleSheet.id = 'floatingButtonStyle';
|
||||||
|
styleSheet.innerText = `
|
||||||
|
@keyframes glow {
|
||||||
|
from {
|
||||||
|
box-shadow: 0 0 10px rgba(0,0,0,0.2);
|
||||||
|
}
|
||||||
|
to {
|
||||||
|
box-shadow: 0 0 13px rgba(0,0,0,0.5);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#floatingButton.glow {
|
||||||
|
animation: glow 1s infinite alternate;
|
||||||
|
}
|
||||||
|
#floatingButton:hover {
|
||||||
|
transform: scale(1.2);
|
||||||
|
box-shadow: 0 0 20px rgba(0,0,0,0.4);
|
||||||
|
}
|
||||||
|
#floatingButton.disappearing {
|
||||||
|
animation: shrinkAndDisappear 0.5s forwards;
|
||||||
|
}
|
||||||
|
`;
|
||||||
|
|
||||||
|
// only add when not exist
|
||||||
|
if (!document.getElementById('recvButtonStyle'))
|
||||||
|
{
|
||||||
|
document.head.appendChild(styleSheet);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 鼠标悬停和移开的事件监听器
|
||||||
|
button.addEventListener('mouseover', function () {
|
||||||
|
this.textContent = "还原\n对话";
|
||||||
|
});
|
||||||
|
|
||||||
|
button.addEventListener('mouseout', function () {
|
||||||
|
this.innerHTML = recvIcon;
|
||||||
|
});
|
||||||
|
|
||||||
|
// 点击事件监听器
|
||||||
|
button.addEventListener('click', function () {
|
||||||
|
// 添加一个类来触发缩小和消失的动画
|
||||||
|
restore_previous_chat();
|
||||||
|
this.classList.add('disappearing');
|
||||||
|
// 在动画结束后移除按钮
|
||||||
|
document.body.removeChild(this);
|
||||||
|
});
|
||||||
|
// only add when not exist
|
||||||
|
if (!document.getElementById('recvButton'))
|
||||||
|
{
|
||||||
|
document.body.appendChild(button);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 将按钮添加到页面中
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
async function on_plugin_exe_complete(fn_name) {
|
||||||
|
console.log(fn_name);
|
||||||
|
if (fn_name === "保存当前的对话") {
|
||||||
|
// get chat profile path
|
||||||
|
let chatbot = await get_data_from_gradio_component('gpt-chatbot');
|
||||||
|
let may_have_chat_profile_info = chatbot[chatbot.length - 1][1];
|
||||||
|
|
||||||
|
function get_href(htmlString) {
|
||||||
|
const parser = new DOMParser();
|
||||||
|
const doc = parser.parseFromString(htmlString, 'text/html');
|
||||||
|
const anchor = doc.querySelector('a');
|
||||||
|
|
||||||
|
if (anchor) {
|
||||||
|
return anchor.getAttribute('href');
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let href = get_href(may_have_chat_profile_info);
|
||||||
|
if (href) {
|
||||||
|
const cleanedHref = href.replace('file=', ''); // /home/fuqingxu/chatgpt_academic/gpt_log/default_user/chat_history/GPT-Academic对话存档2024-04-12-00-35-06.html
|
||||||
|
console.log(cleanedHref);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
// 第 8 部分: TTS语音生成函数
|
||||||
|
// -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
class AudioPlayer {
|
||||||
|
constructor() {
|
||||||
|
this.audioCtx = new (window.AudioContext || window.webkitAudioContext)();
|
||||||
|
this.queue = [];
|
||||||
|
this.isPlaying = false;
|
||||||
|
this.currentSource = null; // 添加属性来保存当前播放的源
|
||||||
|
}
|
||||||
|
|
||||||
|
// Base64 编码的字符串转换为 ArrayBuffer
|
||||||
|
base64ToArrayBuffer(base64) {
|
||||||
|
const binaryString = window.atob(base64);
|
||||||
|
const len = binaryString.length;
|
||||||
|
const bytes = new Uint8Array(len);
|
||||||
|
for (let i = 0; i < len; i++) {
|
||||||
|
bytes[i] = binaryString.charCodeAt(i);
|
||||||
|
}
|
||||||
|
return bytes.buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 检查音频播放队列并播放音频
|
||||||
|
checkQueue() {
|
||||||
|
if (!this.isPlaying && this.queue.length > 0) {
|
||||||
|
this.isPlaying = true;
|
||||||
|
const nextAudio = this.queue.shift();
|
||||||
|
this.play_wave(nextAudio);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 将音频添加到播放队列
|
||||||
|
enqueueAudio(audio_buf_wave) {
|
||||||
|
if (allow_auto_read_tts_flag) {
|
||||||
|
this.queue.push(audio_buf_wave);
|
||||||
|
this.checkQueue();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 播放音频
|
||||||
|
async play_wave(encodedAudio) {
|
||||||
|
//const audioData = this.base64ToArrayBuffer(encodedAudio);
|
||||||
|
const audioData = encodedAudio;
|
||||||
|
try {
|
||||||
|
const buffer = await this.audioCtx.decodeAudioData(audioData);
|
||||||
|
const source = this.audioCtx.createBufferSource();
|
||||||
|
source.buffer = buffer;
|
||||||
|
source.connect(this.audioCtx.destination);
|
||||||
|
source.onended = () => {
|
||||||
|
if (allow_auto_read_tts_flag) {
|
||||||
|
this.isPlaying = false;
|
||||||
|
this.currentSource = null; // 播放结束后清空当前源
|
||||||
|
this.checkQueue();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
this.currentSource = source; // 保存当前播放的源
|
||||||
|
source.start();
|
||||||
|
} catch (e) {
|
||||||
|
console.log("Audio error!", e);
|
||||||
|
this.isPlaying = false;
|
||||||
|
this.currentSource = null; // 出错时也应清空当前源
|
||||||
|
this.checkQueue();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 新增:立即停止播放音频的方法
|
||||||
|
stop() {
|
||||||
|
if (this.currentSource) {
|
||||||
|
this.queue = []; // 清空队列
|
||||||
|
this.currentSource.stop(); // 停止当前源
|
||||||
|
this.currentSource = null; // 清空当前源
|
||||||
|
this.isPlaying = false; // 更新播放状态
|
||||||
|
// 关闭音频上下文可能会导致无法再次播放音频,因此仅停止当前源
|
||||||
|
// this.audioCtx.close(); // 可选:如果需要可以关闭音频上下文
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const audioPlayer = new AudioPlayer();
|
||||||
|
|
||||||
|
class FIFOLock {
|
||||||
|
constructor() {
|
||||||
|
this.queue = [];
|
||||||
|
this.currentTaskExecuting = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
lock() {
|
||||||
|
let resolveLock;
|
||||||
|
const lock = new Promise(resolve => {
|
||||||
|
resolveLock = resolve;
|
||||||
|
});
|
||||||
|
|
||||||
|
this.queue.push(resolveLock);
|
||||||
|
|
||||||
|
if (!this.currentTaskExecuting) {
|
||||||
|
this._dequeueNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
return lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
_dequeueNext() {
|
||||||
|
if (this.queue.length === 0) {
|
||||||
|
this.currentTaskExecuting = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.currentTaskExecuting = true;
|
||||||
|
const resolveLock = this.queue.shift();
|
||||||
|
resolveLock();
|
||||||
|
}
|
||||||
|
|
||||||
|
unlock() {
|
||||||
|
this.currentTaskExecuting = false;
|
||||||
|
this._dequeueNext();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
function delay(ms) {
|
||||||
|
return new Promise(resolve => setTimeout(resolve, ms));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define the trigger function with delay parameter T in milliseconds
|
||||||
|
function trigger(T, fire) {
|
||||||
|
// Variable to keep track of the timer ID
|
||||||
|
let timeoutID = null;
|
||||||
|
// Variable to store the latest arguments
|
||||||
|
let lastArgs = null;
|
||||||
|
|
||||||
|
return function (...args) {
|
||||||
|
// Update lastArgs with the latest arguments
|
||||||
|
lastArgs = args;
|
||||||
|
// Clear the existing timer if the function is called again
|
||||||
|
if (timeoutID !== null) {
|
||||||
|
clearTimeout(timeoutID);
|
||||||
|
}
|
||||||
|
// Set a new timer that calls the `fire` function with the latest arguments after T milliseconds
|
||||||
|
timeoutID = setTimeout(() => {
|
||||||
|
fire(...lastArgs);
|
||||||
|
}, T);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
prev_text = "";
|
||||||
|
prev_text_already_pushed = "";
|
||||||
|
prev_chatbot_index = -1;
|
||||||
|
const delay_live_text_update = trigger(3000, on_live_stream_terminate);
|
||||||
|
|
||||||
|
function on_live_stream_terminate(latest_text) {
|
||||||
|
// remove `prev_text_already_pushed` from `latest_text`
|
||||||
|
console.log("on_live_stream_terminate", latest_text)
|
||||||
|
remaining_text = latest_text.slice(prev_text_already_pushed.length);
|
||||||
|
if ((!isEmptyOrWhitespaceOnly(remaining_text)) && remaining_text.length != 0) {
|
||||||
|
prev_text_already_pushed = latest_text;
|
||||||
|
push_text_to_audio(remaining_text);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function is_continue_from_prev(text, prev_text) {
|
||||||
|
abl = 5
|
||||||
|
if (text.length < prev_text.length - abl) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (prev_text.length > 10) {
|
||||||
|
return text.startsWith(prev_text.slice(0, Math.min(prev_text.length - abl, 100)));
|
||||||
|
} else {
|
||||||
|
return text.startsWith(prev_text);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function isEmptyOrWhitespaceOnly(remaining_text) {
|
||||||
|
// Replace \n and 。 with empty strings
|
||||||
|
let textWithoutSpecifiedCharacters = remaining_text.replace(/[\n。]/g, '');
|
||||||
|
// Check if the remaining string is empty
|
||||||
|
return textWithoutSpecifiedCharacters.trim().length === 0;
|
||||||
|
}
|
||||||
|
function process_increased_text(remaining_text) {
|
||||||
|
// console.log('[is continue], remaining_text: ', remaining_text)
|
||||||
|
// remaining_text starts with \n or 。, then move these chars into prev_text_already_pushed
|
||||||
|
while (remaining_text.startsWith('\n') || remaining_text.startsWith('。')) {
|
||||||
|
prev_text_already_pushed = prev_text_already_pushed + remaining_text[0];
|
||||||
|
remaining_text = remaining_text.slice(1);
|
||||||
|
}
|
||||||
|
if (remaining_text.includes('\n') || remaining_text.includes('。')) { // determine remaining_text contain \n or 。
|
||||||
|
// new message begin!
|
||||||
|
index_of_last_sep = Math.max(remaining_text.lastIndexOf('\n'), remaining_text.lastIndexOf('。'));
|
||||||
|
// break the text into two parts
|
||||||
|
tobe_pushed = remaining_text.slice(0, index_of_last_sep + 1);
|
||||||
|
prev_text_already_pushed = prev_text_already_pushed + tobe_pushed;
|
||||||
|
// console.log('[is continue], push: ', tobe_pushed)
|
||||||
|
// console.log('[is continue], update prev_text_already_pushed: ', prev_text_already_pushed)
|
||||||
|
if (!isEmptyOrWhitespaceOnly(tobe_pushed)) {
|
||||||
|
// console.log('[is continue], remaining_text is empty')
|
||||||
|
push_text_to_audio(tobe_pushed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function process_latest_text_output(text, chatbot_index) {
|
||||||
|
if (text.length == 0) {
|
||||||
|
prev_text = text;
|
||||||
|
prev_text_mask = text;
|
||||||
|
// console.log('empty text')
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (text == prev_text) {
|
||||||
|
// console.log('[nothing changed]')
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var is_continue = is_continue_from_prev(text, prev_text_already_pushed);
|
||||||
|
if (chatbot_index == prev_chatbot_index && is_continue) {
|
||||||
|
// on_text_continue_grow
|
||||||
|
remaining_text = text.slice(prev_text_already_pushed.length);
|
||||||
|
process_increased_text(remaining_text);
|
||||||
|
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
|
||||||
|
}
|
||||||
|
else if (chatbot_index == prev_chatbot_index && !is_continue) {
|
||||||
|
console.log('---------------------')
|
||||||
|
console.log('text twisting!')
|
||||||
|
console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed)
|
||||||
|
console.log('---------------------')
|
||||||
|
prev_text_already_pushed = "";
|
||||||
|
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
// on_new_message_begin, we have to clear `prev_text_already_pushed`
|
||||||
|
console.log('---------------------')
|
||||||
|
console.log('new message begin!')
|
||||||
|
console.log('[new message begin]', 'text', text, 'prev_text_already_pushed', prev_text_already_pushed)
|
||||||
|
console.log('---------------------')
|
||||||
|
prev_text_already_pushed = "";
|
||||||
|
process_increased_text(text);
|
||||||
|
delay_live_text_update(text); // in case of no \n or 。 in the text, this timer will finally commit
|
||||||
|
}
|
||||||
|
prev_text = text;
|
||||||
|
prev_chatbot_index = chatbot_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
const audio_push_lock = new FIFOLock();
|
||||||
|
async function push_text_to_audio(text) {
|
||||||
|
if (!allow_auto_read_tts_flag) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
await audio_push_lock.lock();
|
||||||
|
var lines = text.split(/[\n。]/);
|
||||||
|
for (const audio_buf_text of lines) {
|
||||||
|
if (audio_buf_text) {
|
||||||
|
// Append '/vits' to the current URL to form the target endpoint
|
||||||
|
const url = `${window.location.href}vits`;
|
||||||
|
// Define the payload to be sent in the POST request
|
||||||
|
const payload = {
|
||||||
|
text: audio_buf_text, // Ensure 'audio_buf_text' is defined with valid data
|
||||||
|
text_language: "zh"
|
||||||
|
};
|
||||||
|
// Call the async postData function and log the response
|
||||||
|
post_text(url, payload, send_index);
|
||||||
|
send_index = send_index + 1;
|
||||||
|
console.log(send_index, audio_buf_text)
|
||||||
|
// sleep 2 seconds
|
||||||
|
if (allow_auto_read_tts_flag) {
|
||||||
|
await delay(3000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
audio_push_lock.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
send_index = 0;
|
||||||
|
recv_index = 0;
|
||||||
|
to_be_processed = [];
|
||||||
|
async function UpdatePlayQueue(cnt, audio_buf_wave) {
|
||||||
|
if (cnt != recv_index) {
|
||||||
|
to_be_processed.push([cnt, audio_buf_wave]);
|
||||||
|
console.log('cache', cnt);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
console.log('processing', cnt);
|
||||||
|
recv_index = recv_index + 1;
|
||||||
|
if (audio_buf_wave) {
|
||||||
|
audioPlayer.enqueueAudio(audio_buf_wave);
|
||||||
|
}
|
||||||
|
// deal with other cached audio
|
||||||
|
while (true) {
|
||||||
|
find_any = false;
|
||||||
|
for (i = to_be_processed.length - 1; i >= 0; i--) {
|
||||||
|
if (to_be_processed[i][0] == recv_index) {
|
||||||
|
console.log('processing cached', recv_index);
|
||||||
|
if (to_be_processed[i][1]) {
|
||||||
|
audioPlayer.enqueueAudio(to_be_processed[i][1]);
|
||||||
|
}
|
||||||
|
to_be_processed.pop(i);
|
||||||
|
find_any = true;
|
||||||
|
recv_index = recv_index + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!find_any) { break; }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function post_text(url, payload, cnt) {
|
||||||
|
if (allow_auto_read_tts_flag) {
|
||||||
|
postData(url, payload, cnt)
|
||||||
|
.then(data => {
|
||||||
|
UpdatePlayQueue(cnt, data);
|
||||||
|
return;
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
UpdatePlayQueue(cnt, null);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
notify_user_error = false
|
||||||
|
// Create an async function to perform the POST request
|
||||||
|
async function postData(url = '', data = {}) {
|
||||||
|
try {
|
||||||
|
// Use the Fetch API with await
|
||||||
|
const response = await fetch(url, {
|
||||||
|
method: 'POST', // Specify the request method
|
||||||
|
body: JSON.stringify(data), // Convert the JavaScript object to a JSON string
|
||||||
|
});
|
||||||
|
// Check if the response is ok (status in the range 200-299)
|
||||||
|
if (!response.ok) {
|
||||||
|
// If not OK, throw an error
|
||||||
|
console.info('There was a problem during audio generation requests:', response.status);
|
||||||
|
// if (!notify_user_error){
|
||||||
|
// notify_user_error = true;
|
||||||
|
// alert('There was a problem during audio generation requests:', response.status);
|
||||||
|
// }
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
// If OK, parse and return the JSON response
|
||||||
|
return await response.arrayBuffer();
|
||||||
|
} catch (error) {
|
||||||
|
// Log any errors that occur during the fetch operation
|
||||||
|
console.info('There was a problem during audio generation requests:', error);
|
||||||
|
// if (!notify_user_error){
|
||||||
|
// notify_user_error = true;
|
||||||
|
// alert('There was a problem during audio generation requests:', error);
|
||||||
|
// }
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,10 +1,34 @@
|
|||||||
from toolbox import get_conf
|
from toolbox import get_conf
|
||||||
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
CODE_HIGHLIGHT, ADD_WAIFU, LAYOUT = get_conf("CODE_HIGHLIGHT", "ADD_WAIFU", "LAYOUT")
|
||||||
|
|
||||||
|
def minimize_js(common_js_path):
|
||||||
|
try:
|
||||||
|
import rjsmin, hashlib, glob, os
|
||||||
|
# clean up old minimized js files, matching `common_js_path + '.min.*'`
|
||||||
|
for old_min_js in glob.glob(common_js_path + '.min.*.js'):
|
||||||
|
os.remove(old_min_js)
|
||||||
|
# use rjsmin to minimize `common_js_path`
|
||||||
|
c_jsmin = rjsmin.jsmin
|
||||||
|
with open(common_js_path, "r") as f:
|
||||||
|
js_content = f.read()
|
||||||
|
minimized_js_content = c_jsmin(js_content)
|
||||||
|
# compute sha256 hash of minimized js content
|
||||||
|
sha_hash = hashlib.sha256(minimized_js_content.encode()).hexdigest()[:8]
|
||||||
|
minimized_js_path = common_js_path + '.min.' + sha_hash + '.js'
|
||||||
|
# save to minimized js file
|
||||||
|
with open(minimized_js_path, "w") as f:
|
||||||
|
f.write(minimized_js_content)
|
||||||
|
# return minimized js file path
|
||||||
|
return minimized_js_path
|
||||||
|
except:
|
||||||
|
return common_js_path
|
||||||
|
|
||||||
def get_common_html_javascript_code():
|
def get_common_html_javascript_code():
|
||||||
js = "\n"
|
js = "\n"
|
||||||
|
common_js_path = "themes/common.js"
|
||||||
|
minimized_js_path = minimize_js(common_js_path)
|
||||||
for jsf in [
|
for jsf in [
|
||||||
"file=themes/common.js",
|
f"file={minimized_js_path}",
|
||||||
]:
|
]:
|
||||||
js += f"""<script src="{jsf}"></script>\n"""
|
js += f"""<script src="{jsf}"></script>\n"""
|
||||||
|
|
||||||
|
0
themes/sovits_audio.js
Normal file
0
themes/sovits_audio.js
Normal file
@ -111,10 +111,10 @@ js_code_for_persistent_cookie_init = """(web_cookie_cache, cookie) => {
|
|||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# 详见 themes/common.js
|
||||||
js_code_reset = """
|
js_code_reset = """
|
||||||
(a,b,c)=>{
|
(a,b,c)=>{
|
||||||
return [[], [], "已重置"];
|
return reset_conversation(a,b);
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
4
version
4
version
@ -1,5 +1,5 @@
|
|||||||
{
|
{
|
||||||
"version": 3.74,
|
"version": 3.75,
|
||||||
"show_feature": true,
|
"show_feature": true,
|
||||||
"new_feature": "增加多用户文件鉴权验证提高安全性 <-> 优化oneapi接入方法 <-> 接入Cohere和月之暗面模型 <-> 简化挂载二级目录的步骤 <-> 支持Mermaid绘图库(让大模型绘制脑图)"
|
"new_feature": "添加TTS语音输出(EdgeTTS和SoVits语音克隆) <-> Doc2x PDF翻译 <-> 添加回溯对话按钮"
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user