Merge pull request #57 from GaiZhenbiao/master

Adding a bunch of nice-to-have features
This commit is contained in:
binary-husky 2023-03-29 13:37:58 +08:00 committed by GitHub
commit 515045a8d1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 33 additions and 28 deletions

View File

@ -6,7 +6,7 @@ API_URL = "https://api.openai.com/v1/chat/completions"
USE_PROXY = False USE_PROXY = False
if USE_PROXY: if USE_PROXY:
# 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284) # 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284)
proxies = { "http": "socks5h://localhost:11284", "https": "socks5h://localhost:11284", } proxies = { "http": "socks5h://localhost:11284", "https": "socks5h://localhost:11284", }
print('网络代理状态:运行。') print('网络代理状态:运行。')
else: else:
proxies = None proxies = None
@ -25,5 +25,11 @@ MAX_RETRY = 2
LLM_MODEL = "gpt-3.5-turbo" LLM_MODEL = "gpt-3.5-turbo"
# 检查一下是不是忘了改config # 检查一下是不是忘了改config
if API_KEY == "sk-此处填API秘钥": if len(API_KEY) != 51:
assert False, "请在config文件中修改API密钥, 添加海外代理之后再运行" assert False, "请在config文件中修改API密钥, 添加海外代理之后再运行"
# 设置并行使用的线程数
CONCURRENT_COUNT = 100
# 设置用户名和密码
AUTHENTICATION = [] # [("username", "password"), ("username2", "password2"), ...]

41
main.py
View File

@ -1,14 +1,15 @@
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
import gradio as gr import gradio as gr
from predict import predict from predict import predict
from toolbox import format_io, find_free_port from toolbox import format_io, find_free_port
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
try: from config_private import proxies, WEB_PORT, LLM_MODEL try: from config_private import proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION
except: from config import proxies, WEB_PORT, LLM_MODEL except: from config import proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION
# 如果WEB_PORT是-1, 则随机选取WEB端口 # 如果WEB_PORT是-1, 则随机选取WEB端口
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
AUTHENTICATION = None if AUTHENTICATION == [] else AUTHENTICATION
initial_prompt = "Serve me as a writing and programming assistant." initial_prompt = "Serve me as a writing and programming assistant."
title_html = """<h1 align="center">ChatGPT 学术优化</h1>""" title_html = """<h1 align="center">ChatGPT 学术优化</h1>"""
@ -16,7 +17,7 @@ title_html = """<h1 align="center">ChatGPT 学术优化</h1>"""
# 问询记录, python 版本建议3.9+(越新越好) # 问询记录, python 版本建议3.9+(越新越好)
import logging import logging
os.makedirs('gpt_log', exist_ok=True) os.makedirs('gpt_log', exist_ok=True)
try:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO, encoding='utf-8') try:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO, encoding='utf-8')
except:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO) except:logging.basicConfig(filename='gpt_log/chat_secrets.log', level=logging.INFO)
print('所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!') print('所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!')
@ -50,7 +51,9 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
with gr.Column(scale=12): with gr.Column(scale=12):
txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False) txt = gr.Textbox(show_label=False, placeholder="Input question here.").style(container=False)
with gr.Column(scale=1): with gr.Column(scale=1):
submitBtn = gr.Button("提交", variant="primary") with gr.Row():
resetBtn = gr.Button("重置", variant="secondary")
submitBtn = gr.Button("提交", variant="primary")
with gr.Row(): with gr.Row():
from check_proxy import check_proxy from check_proxy import check_proxy
statusDisplay = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行. \nNetwork: {check_proxy(proxies)}\nModel: {LLM_MODEL}") statusDisplay = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行. \nNetwork: {check_proxy(proxies)}\nModel: {LLM_MODEL}")
@ -73,31 +76,27 @@ with gr.Blocks(theme=set_theme, analytics_enabled=False) as demo:
#inputs, top_p, temperature, top_k, repetition_penalty #inputs, top_p, temperature, top_k, repetition_penalty
with gr.Accordion("arguments", open=False): with gr.Accordion("arguments", open=False):
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",) top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0, step=0.01, interactive=True, label="Temperature",) temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
txt.submit(predict, [txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay]) predict_args = dict(fn=predict, inputs=[txt, top_p, temperature, chatbot, history, systemPromptTxt], outputs=[chatbot, history, statusDisplay], show_progress=True)
submitBtn.click(predict, [txt, top_p, temperature, chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True) empty_txt_args = dict(fn=lambda: "", inputs=[], outputs=[txt])
txt.submit(**predict_args)
txt.submit(**empty_txt_args)
submitBtn.click(**predict_args)
submitBtn.click(**empty_txt_args)
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, statusDisplay])
for k in functional: for k in functional:
functional[k]["Button"].click(predict, functional[k]["Button"].click(predict,
[txt, top_p, temperature, chatbot, history, systemPromptTxt, TRUE, gr.State(k)], [chatbot, history, statusDisplay], show_progress=True) [txt, top_p, temperature, chatbot, history, systemPromptTxt, TRUE, gr.State(k)], [chatbot, history, statusDisplay], show_progress=True)
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt]) file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt], [chatbot, txt])
for k in crazy_functional: for k in crazy_functional:
click_handle = crazy_functional[k]["Button"].click(crazy_functional[k]["Function"], click_handle = crazy_functional[k]["Button"].click(crazy_functional[k]["Function"],
[txt, top_p, temperature, chatbot, history, systemPromptTxt, gr.State(PORT)], [chatbot, history, statusDisplay] [txt, top_p, temperature, chatbot, history, systemPromptTxt, gr.State(PORT)], [chatbot, history, statusDisplay]
) )
try: click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot]) try: click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
except: pass except: pass
# 延迟函数, 做一些准备工作, 最后尝试打开浏览器
def auto_opentab_delay():
import threading, webbrowser, time
print(f"URL http://localhost:{PORT}")
def open(): time.sleep(2)
webbrowser.open_new_tab(f'http://localhost:{PORT}')
t = threading.Thread(target=open)
t.daemon = True; t.start()
auto_opentab_delay()
demo.title = "ChatGPT 学术优化" demo.title = "ChatGPT 学术优化"
demo.queue().launch(server_name="0.0.0.0", share=True, server_port=PORT) demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=True, server_port=PORT, inbrowser=True, auth=AUTHENTICATION)

View File

@ -14,7 +14,7 @@ def predict_no_ui_but_counting_down(i_say, i_say_show_user, chatbot, top_p, temp
# list就是最简单的mutable结构我们第一个位置放gpt输出第二个位置传递报错信息 # list就是最简单的mutable结构我们第一个位置放gpt输出第二个位置传递报错信息
mutable = [None, ''] mutable = [None, '']
# multi-threading worker # multi-threading worker
def mt(i_say, history): def mt(i_say, history):
while True: while True:
try: try:
mutable[0] = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history) mutable[0] = predict_no_ui(inputs=i_say, top_p=top_p, temperature=temperature, history=history)
@ -124,7 +124,7 @@ def format_io(self, y):
""" """
将输入和输出解析为HTML格式将y中最后一项的输入部分段落化并将输出部分的Markdown和数学公式转换为HTML格式 将输入和输出解析为HTML格式将y中最后一项的输入部分段落化并将输出部分的Markdown和数学公式转换为HTML格式
""" """
if y is None: return [] if y is None or y == []: return []
i_ask, gpt_reply = y[-1] i_ask, gpt_reply = y[-1]
i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波 i_ask = text_divide_paragraph(i_ask) # 输入部分太自由,预处理一波
y[-1] = ( y[-1] = (
@ -144,7 +144,7 @@ def find_free_port():
s.bind(('', 0)) s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1] return s.getsockname()[1]
def extract_archive(file_path, dest_dir): def extract_archive(file_path, dest_dir):
import zipfile import zipfile
@ -165,7 +165,7 @@ def extract_archive(file_path, dest_dir):
print("Successfully extracted tar archive to {}".format(dest_dir)) print("Successfully extracted tar archive to {}".format(dest_dir))
else: else:
return return
def find_recent_files(directory): def find_recent_files(directory):
""" """
me: find files that is created with in one minutes under a directory with python, write a function me: find files that is created with in one minutes under a directory with python, write a function