diff --git a/README.md b/README.md
index 39b091f..859a4c4 100644
--- a/README.md
+++ b/README.md
@@ -36,14 +36,16 @@ https://github.com/polarwinkel/mdtex2html
自定义快捷键 | 支持自定义快捷键
配置代理服务器 | 支持配置代理服务器
模块化设计 | 支持自定义高阶的实验性功能
-自我程序剖析 | [实验性功能] 一键读懂本项目的源代码
-程序剖析 | [实验性功能] 一键可以剖析其他Python/C++项目
-读论文 | [实验性功能] 一键解读latex论文全文并生成摘要
-批量注释生成 | [实验性功能] 一键批量生成函数注释
-chat分析报告生成 | [实验性功能] 运行后自动生成总结汇报
+自我程序剖析 | [函数插件] 一键读懂本项目的源代码
+程序剖析 | [函数插件] 一键可以剖析其他Python/C++等项目
+读论文 | [函数插件] 一键解读latex论文全文并生成摘要
+arxiv小助手 | [函数插件] 输入url一键翻译摘要+下载论文
+批量注释生成 | [函数插件] 一键批量生成函数注释
+chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
公式显示 | 可以同时显示公式的tex形式和渲染形式
图片显示 | 可以在markdown中显示图片
支持GPT输出的markdown表格 | 可以输出支持GPT的markdown表格
+本地大语言模型接口 | 借助[TGUI](https://github.com/oobabooga/text-generation-webui)接入galactica等本地语言模型
…… | ……
diff --git a/main.py b/main.py
index 533e590..0016d9c 100644
--- a/main.py
+++ b/main.py
@@ -11,8 +11,9 @@ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT =
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
if not AUTHENTICATION: AUTHENTICATION = None
+title = "ChatGPT 学术优化" if LLM_MODEL.startswith('gpt') else "ChatGPT / LLM 学术优化"
initial_prompt = "Serve me as a writing and programming assistant."
-title_html = """
ChatGPT 学术优化
"""
+title_html = f"{title}
"
# 问询记录, python 版本建议3.9+(越新越好)
import logging
@@ -140,5 +141,5 @@ def auto_opentab_delay():
threading.Thread(target=open, name="open-browser", daemon=True).start()
auto_opentab_delay()
-demo.title = "ChatGPT 学术优化"
+demo.title = title
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=True, server_port=PORT, auth=AUTHENTICATION)
diff --git a/request_llm/bridge_tgui.py b/request_llm/bridge_tgui.py
index 20a6352..d7cbe10 100644
--- a/request_llm/bridge_tgui.py
+++ b/request_llm/bridge_tgui.py
@@ -24,9 +24,9 @@ def random_hash():
letters = string.ascii_lowercase + string.digits
return ''.join(random.choice(letters) for i in range(9))
-async def run(context):
+async def run(context, max_token=512):
params = {
- 'max_new_tokens': 512,
+ 'max_new_tokens': max_token,
'do_sample': True,
'temperature': 0.5,
'top_p': 0.9,
@@ -116,12 +116,15 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom
prompt = inputs
tgui_say = ""
- mutable = [""]
+ mutable = ["", time.time()]
def run_coorotine(mutable):
async def get_result(mutable):
async for response in run(prompt):
print(response[len(mutable[0]):])
mutable[0] = response
+ if (time.time() - mutable[1]) > 3:
+ print('exit when no listener')
+ break
asyncio.run(get_result(mutable))
thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True)
@@ -129,6 +132,7 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom
while thread_listen.is_alive():
time.sleep(1)
+ mutable[1] = time.time()
# Print intermediate steps
if tgui_say != mutable[0]:
tgui_say = mutable[0]
@@ -147,12 +151,17 @@ def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""):
mutable = ["", time.time()]
def run_coorotine(mutable):
async def get_result(mutable):
- async for response in run(prompt):
+ async for response in run(prompt, max_token=20):
print(response[len(mutable[0]):])
mutable[0] = response
+ if (time.time() - mutable[1]) > 3:
+ print('exit when no listener')
+ break
asyncio.run(get_result(mutable))
thread_listen = threading.Thread(target=run_coorotine, args=(mutable,))
thread_listen.start()
- thread_listen.join()
+ while thread_listen.is_alive():
+ time.sleep(1)
+ mutable[1] = time.time()
tgui_say = mutable[0]
return tgui_say