diff --git a/config.py b/config.py index 0ad723b..a78af65 100644 --- a/config.py +++ b/config.py @@ -60,3 +60,6 @@ AUTHENTICATION = [] # 重新URL重新定向,实现更换API_URL的作用(常规情况下,不要修改!!) # 格式 {"https://api.openai.com/v1/chat/completions": "重定向的URL"} API_URL_REDIRECT = {} + +# 如果你需要把网址放在二级地址下(常规情况下,不要修改!!) +CUSTOM_PATH = "/" diff --git a/main.py b/main.py index 833720b..11f92f4 100644 --- a/main.py +++ b/main.py @@ -3,10 +3,10 @@ import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染 def main(): import gradio as gr from request_llm.bridge_all import predict - from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith + from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, run_gradio, DummyWith # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到 - proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \ - get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS') + proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS, CUSTOM_PATH = \ + get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS', 'CUSTOM_PATH') # 如果WEB_PORT是-1, 则随机选取WEB端口 PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT @@ -186,7 +186,10 @@ def main(): threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start() auto_opentab_delay() - demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png") + demo.queue(concurrency_count=CONCURRENT_COUNT) + run_gradio(demo, auth=AUTHENTICATION, favicon_path="docs/logo.png", port=PORT, custom_path=CUSTOM_PATH) if __name__ == "__main__": main() + + diff --git a/toolbox.py b/toolbox.py index d2c9e6e..bf32ccd 100644 --- a/toolbox.py +++ b/toolbox.py @@ -520,3 +520,14 @@ class DummyWith(): def __exit__(self, exc_type, exc_value, traceback): return + +def run_gradio(demo, auth, favicon_path, port, custom_path): + import uvicorn + import gradio as gr + from fastapi import FastAPI + app = FastAPI() + @app.get("/") + def read_main(): + return {"message": "NULL"} + app = gr.mount_gradio_app(app, demo, path=custom_path, auth=auth, favicon_path=favicon_path) + uvicorn.run(app, host="0.0.0.0", port=port)