Merge branch 'suburl' of https://github.com/yuxiaoyuan0406/chatgpt_academic into yuxiaoyuan0406-suburl

This commit is contained in:
Your Name 2023-04-22 16:44:15 +08:00
commit 6f7e8076c7
3 changed files with 21 additions and 4 deletions

View File

@ -60,3 +60,6 @@ AUTHENTICATION = []
# 重新URL重新定向实现更换API_URL的作用常规情况下不要修改 # 重新URL重新定向实现更换API_URL的作用常规情况下不要修改
# 格式 {"https://api.openai.com/v1/chat/completions": "重定向的URL"} # 格式 {"https://api.openai.com/v1/chat/completions": "重定向的URL"}
API_URL_REDIRECT = {} API_URL_REDIRECT = {}
# 如果你需要把网址放在二级地址下(常规情况下,不要修改!!)
CUSTOM_PATH = "/"

11
main.py
View File

@ -3,10 +3,10 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
def main(): def main():
import gradio as gr import gradio as gr
from request_llm.bridge_all import predict from request_llm.bridge_all import predict
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, run_gradio, DummyWith
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS, CUSTOM_PATH = \
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS') get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS', 'CUSTOM_PATH')
# 如果WEB_PORT是-1, 则随机选取WEB端口 # 如果WEB_PORT是-1, 则随机选取WEB端口
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
@ -186,7 +186,10 @@ def main():
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start() threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
auto_opentab_delay() auto_opentab_delay()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png") demo.queue(concurrency_count=CONCURRENT_COUNT)
run_gradio(demo, auth=AUTHENTICATION, favicon_path="docs/logo.png", port=PORT, custom_path=CUSTOM_PATH)
if __name__ == "__main__": if __name__ == "__main__":
main() main()

View File

@ -520,3 +520,14 @@ class DummyWith():
def __exit__(self, exc_type, exc_value, traceback): def __exit__(self, exc_type, exc_value, traceback):
return return
def run_gradio(demo, auth, favicon_path, port, custom_path):
import uvicorn
import gradio as gr
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def read_main():
return {"message": "NULL"}
app = gr.mount_gradio_app(app, demo, path=custom_path, auth=auth, favicon_path=favicon_path)
uvicorn.run(app, host="0.0.0.0", port=port)