修正一些细节
This commit is contained in:
		
							parent
							
								
									acaf8cdbf4
								
							
						
					
					
						commit
						23c5a77f82
					
				@ -45,7 +45,7 @@ MAX_RETRY = 2
 | 
			
		||||
 | 
			
		||||
# OpenAI模型选择是(gpt4现在只对申请成功的人开放)
 | 
			
		||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 "chatglm"
 | 
			
		||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "chatglm", "gpt-4", "api2d-gpt-4", "api2d-gpt-3.5-turbo"]
 | 
			
		||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm"]
 | 
			
		||||
 | 
			
		||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
 | 
			
		||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										4
									
								
								main.py
									
									
									
									
									
								
							
							
						
						
									
										4
									
								
								main.py
									
									
									
									
									
								
							@ -95,13 +95,13 @@ def main():
 | 
			
		||||
                    with gr.Row():
 | 
			
		||||
                        with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
 | 
			
		||||
                            file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
 | 
			
		||||
                with gr.Accordion("展开SysPrompt & 交互界面布局 & Github地址", open=(LAYOUT == "TOP-DOWN")):
 | 
			
		||||
                with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN")):
 | 
			
		||||
                    system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
 | 
			
		||||
                    top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
 | 
			
		||||
                    temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
 | 
			
		||||
                    max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="MaxLength",)
 | 
			
		||||
                    checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
 | 
			
		||||
                    md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="").style(container=False)
 | 
			
		||||
                    md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
 | 
			
		||||
 | 
			
		||||
                    gr.Markdown(description)
 | 
			
		||||
                with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
 | 
			
		||||
 | 
			
		||||
@ -175,15 +175,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
 | 
			
		||||
            
 | 
			
		||||
            if chunk:
 | 
			
		||||
                try:
 | 
			
		||||
                    if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
 | 
			
		||||
                    chunk_decoded = chunk.decode()
 | 
			
		||||
                    # 前者API2D的
 | 
			
		||||
                    if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
 | 
			
		||||
                        # 判定为数据流的结束,gpt_replying_buffer也写完了
 | 
			
		||||
                        logging.info(f'[response] {gpt_replying_buffer}')
 | 
			
		||||
                        break
 | 
			
		||||
                    # 处理数据流的主体
 | 
			
		||||
                    chunkjson = json.loads(chunk.decode()[6:])
 | 
			
		||||
                    chunkjson = json.loads(chunk_decoded[6:])
 | 
			
		||||
                    status_text = f"finish_reason: {chunkjson['choices'][0]['finish_reason']}"
 | 
			
		||||
                    # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
 | 
			
		||||
                    gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk.decode()[6:])['choices'][0]["delta"]["content"]
 | 
			
		||||
                    gpt_replying_buffer = gpt_replying_buffer + json.loads(chunk_decoded[6:])['choices'][0]["delta"]["content"]
 | 
			
		||||
                    history[-1] = gpt_replying_buffer
 | 
			
		||||
                    chatbot[-1] = (history[-2], history[-1])
 | 
			
		||||
                    yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
 | 
			
		||||
@ -192,7 +194,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
 | 
			
		||||
                    traceback.print_exc()
 | 
			
		||||
                    yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
 | 
			
		||||
                    chunk = get_full_error(chunk, stream_response)
 | 
			
		||||
                    error_msg = chunk.decode()
 | 
			
		||||
                    error_msg = chunk_decoded
 | 
			
		||||
                    if "reduce the length" in error_msg:
 | 
			
		||||
                        chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长,或历史数据过长. 历史缓存数据现已释放,您可以请再次尝试.")
 | 
			
		||||
                        history = []    # 清除历史
 | 
			
		||||
@ -205,7 +207,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
 | 
			
		||||
                    else:
 | 
			
		||||
                        from toolbox import regular_txt_to_markdown
 | 
			
		||||
                        tb_str = '```\n' + traceback.format_exc() + '```'
 | 
			
		||||
                        chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk.decode()[4:])}")
 | 
			
		||||
                        chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded[4:])}")
 | 
			
		||||
                    yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
 | 
			
		||||
                    return
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								version
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								version
									
									
									
									
									
								
							@ -1,5 +1,5 @@
 | 
			
		||||
{
 | 
			
		||||
  "version": 3.1,
 | 
			
		||||
  "show_feature": true,
 | 
			
		||||
  "new_feature": "添加支持清华ChatGLM和GPT-4 <-> 添加支持API2D(国内不需要代理) <-> 改进架构,支持与多个LLM模型同时对话  <-> 支持多API-KEY负载均衡(并列填写,逗号分割) <-> 添加输入区文本清除按键"
 | 
			
		||||
  "new_feature": "添加支持清华ChatGLM和GPT-4 <-> 改进架构,支持与多个LLM模型同时对话 <-> 添加支持API2D(国内,可支持gpt4)<-> 支持多API-KEY负载均衡(并列填写,逗号分割) <-> 添加输入区文本清除按键"
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user