stage llm model interface
This commit is contained in:
		
							parent
							
								
									eee4cb361c
								
							
						
					
					
						commit
						190b547373
					
				
							
								
								
									
										12
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								README.md
									
									
									
									
									
								
							@ -36,14 +36,16 @@ https://github.com/polarwinkel/mdtex2html
 | 
				
			|||||||
自定义快捷键 | 支持自定义快捷键
 | 
					自定义快捷键 | 支持自定义快捷键
 | 
				
			||||||
配置代理服务器 | 支持配置代理服务器
 | 
					配置代理服务器 | 支持配置代理服务器
 | 
				
			||||||
模块化设计 | 支持自定义高阶的实验性功能
 | 
					模块化设计 | 支持自定义高阶的实验性功能
 | 
				
			||||||
自我程序剖析 | [实验性功能] 一键读懂本项目的源代码
 | 
					自我程序剖析 | [函数插件] 一键读懂本项目的源代码
 | 
				
			||||||
程序剖析 | [实验性功能] 一键可以剖析其他Python/C++项目
 | 
					程序剖析 | [函数插件] 一键可以剖析其他Python/C++等项目
 | 
				
			||||||
读论文 | [实验性功能] 一键解读latex论文全文并生成摘要
 | 
					读论文 | [函数插件] 一键解读latex论文全文并生成摘要
 | 
				
			||||||
批量注释生成 | [实验性功能] 一键批量生成函数注释
 | 
					arxiv小助手 | [函数插件] 输入url一键翻译摘要+下载论文
 | 
				
			||||||
chat分析报告生成 | [实验性功能] 运行后自动生成总结汇报
 | 
					批量注释生成 | [函数插件] 一键批量生成函数注释
 | 
				
			||||||
 | 
					chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
 | 
				
			||||||
公式显示 | 可以同时显示公式的tex形式和渲染形式
 | 
					公式显示 | 可以同时显示公式的tex形式和渲染形式
 | 
				
			||||||
图片显示 | 可以在markdown中显示图片
 | 
					图片显示 | 可以在markdown中显示图片
 | 
				
			||||||
支持GPT输出的markdown表格 | 可以输出支持GPT的markdown表格
 | 
					支持GPT输出的markdown表格 | 可以输出支持GPT的markdown表格
 | 
				
			||||||
 | 
					本地大语言模型接口 | 借助[TGUI](https://github.com/oobabooga/text-generation-webui)接入galactica等本地语言模型
 | 
				
			||||||
…… | ……
 | 
					…… | ……
 | 
				
			||||||
 | 
					
 | 
				
			||||||
</div>
 | 
					</div>
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										5
									
								
								main.py
									
									
									
									
									
								
							
							
						
						
									
										5
									
								
								main.py
									
									
									
									
									
								
							@ -11,8 +11,9 @@ proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT =
 | 
				
			|||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
 | 
					PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
 | 
				
			||||||
if not AUTHENTICATION: AUTHENTICATION = None
 | 
					if not AUTHENTICATION: AUTHENTICATION = None
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					title = "ChatGPT 学术优化" if LLM_MODEL.startswith('gpt') else "ChatGPT / LLM 学术优化"
 | 
				
			||||||
initial_prompt = "Serve me as a writing and programming assistant."
 | 
					initial_prompt = "Serve me as a writing and programming assistant."
 | 
				
			||||||
title_html = """<h1 align="center">ChatGPT 学术优化</h1>"""
 | 
					title_html = f"<h1 align=\"center\">{title}</h1>"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# 问询记录, python 版本建议3.9+(越新越好)
 | 
					# 问询记录, python 版本建议3.9+(越新越好)
 | 
				
			||||||
import logging
 | 
					import logging
 | 
				
			||||||
@ -140,5 +141,5 @@ def auto_opentab_delay():
 | 
				
			|||||||
    threading.Thread(target=open, name="open-browser", daemon=True).start()
 | 
					    threading.Thread(target=open, name="open-browser", daemon=True).start()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
auto_opentab_delay()
 | 
					auto_opentab_delay()
 | 
				
			||||||
demo.title = "ChatGPT 学术优化"
 | 
					demo.title = title
 | 
				
			||||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=True, server_port=PORT, auth=AUTHENTICATION)
 | 
					demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=True, server_port=PORT, auth=AUTHENTICATION)
 | 
				
			||||||
 | 
				
			|||||||
@ -24,9 +24,9 @@ def random_hash():
 | 
				
			|||||||
    letters = string.ascii_lowercase + string.digits
 | 
					    letters = string.ascii_lowercase + string.digits
 | 
				
			||||||
    return ''.join(random.choice(letters) for i in range(9))
 | 
					    return ''.join(random.choice(letters) for i in range(9))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
async def run(context):
 | 
					async def run(context, max_token=512):
 | 
				
			||||||
    params = {
 | 
					    params = {
 | 
				
			||||||
        'max_new_tokens': 512,
 | 
					        'max_new_tokens': max_token,
 | 
				
			||||||
        'do_sample': True,
 | 
					        'do_sample': True,
 | 
				
			||||||
        'temperature': 0.5,
 | 
					        'temperature': 0.5,
 | 
				
			||||||
        'top_p': 0.9,
 | 
					        'top_p': 0.9,
 | 
				
			||||||
@ -116,12 +116,15 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom
 | 
				
			|||||||
    prompt = inputs
 | 
					    prompt = inputs
 | 
				
			||||||
    tgui_say = ""
 | 
					    tgui_say = ""
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    mutable = [""]
 | 
					    mutable = ["", time.time()]
 | 
				
			||||||
    def run_coorotine(mutable):
 | 
					    def run_coorotine(mutable):
 | 
				
			||||||
        async def get_result(mutable):
 | 
					        async def get_result(mutable):
 | 
				
			||||||
            async for response in run(prompt):
 | 
					            async for response in run(prompt):
 | 
				
			||||||
                print(response[len(mutable[0]):])
 | 
					                print(response[len(mutable[0]):])
 | 
				
			||||||
                mutable[0] = response
 | 
					                mutable[0] = response
 | 
				
			||||||
 | 
					                if (time.time() - mutable[1]) > 3: 
 | 
				
			||||||
 | 
					                    print('exit when no listener')
 | 
				
			||||||
 | 
					                    break
 | 
				
			||||||
        asyncio.run(get_result(mutable))
 | 
					        asyncio.run(get_result(mutable))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True)
 | 
					    thread_listen = threading.Thread(target=run_coorotine, args=(mutable,), daemon=True)
 | 
				
			||||||
@ -129,6 +132,7 @@ def predict_tgui(inputs, top_p, temperature, chatbot=[], history=[], system_prom
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    while thread_listen.is_alive():
 | 
					    while thread_listen.is_alive():
 | 
				
			||||||
        time.sleep(1)
 | 
					        time.sleep(1)
 | 
				
			||||||
 | 
					        mutable[1] = time.time()
 | 
				
			||||||
        # Print intermediate steps
 | 
					        # Print intermediate steps
 | 
				
			||||||
        if tgui_say != mutable[0]:
 | 
					        if tgui_say != mutable[0]:
 | 
				
			||||||
            tgui_say = mutable[0]
 | 
					            tgui_say = mutable[0]
 | 
				
			||||||
@ -147,12 +151,17 @@ def predict_tgui_no_ui(inputs, top_p, temperature, history=[], sys_prompt=""):
 | 
				
			|||||||
    mutable = ["", time.time()]
 | 
					    mutable = ["", time.time()]
 | 
				
			||||||
    def run_coorotine(mutable):
 | 
					    def run_coorotine(mutable):
 | 
				
			||||||
        async def get_result(mutable):
 | 
					        async def get_result(mutable):
 | 
				
			||||||
            async for response in run(prompt):
 | 
					            async for response in run(prompt, max_token=20):
 | 
				
			||||||
                print(response[len(mutable[0]):])
 | 
					                print(response[len(mutable[0]):])
 | 
				
			||||||
                mutable[0] = response
 | 
					                mutable[0] = response
 | 
				
			||||||
 | 
					                if (time.time() - mutable[1]) > 3: 
 | 
				
			||||||
 | 
					                    print('exit when no listener')
 | 
				
			||||||
 | 
					                    break
 | 
				
			||||||
        asyncio.run(get_result(mutable))
 | 
					        asyncio.run(get_result(mutable))
 | 
				
			||||||
    thread_listen = threading.Thread(target=run_coorotine, args=(mutable,))
 | 
					    thread_listen = threading.Thread(target=run_coorotine, args=(mutable,))
 | 
				
			||||||
    thread_listen.start()
 | 
					    thread_listen.start()
 | 
				
			||||||
    thread_listen.join()
 | 
					    while thread_listen.is_alive():
 | 
				
			||||||
 | 
					        time.sleep(1)
 | 
				
			||||||
 | 
					        mutable[1] = time.time()
 | 
				
			||||||
    tgui_say = mutable[0]
 | 
					    tgui_say = mutable[0]
 | 
				
			||||||
    return tgui_say
 | 
					    return tgui_say
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user