correct the misuse of spark image understanding
This commit is contained in:
parent
867ddd355e
commit
e3e9921f6b
@ -139,6 +139,8 @@ def can_multi_process(llm):
|
||||
if llm.startswith('gpt-'): return True
|
||||
if llm.startswith('api2d-'): return True
|
||||
if llm.startswith('azure-'): return True
|
||||
if llm.startswith('spark'): return True
|
||||
if llm.startswith('zhipuai'): return True
|
||||
return False
|
||||
|
||||
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
||||
|
@ -26,7 +26,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
|
||||
|
||||
from .com_sparkapi import SparkRequestInstance
|
||||
sri = SparkRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
|
||||
for response in sri.generate(inputs, llm_kwargs, history, sys_prompt, use_image_api=False):
|
||||
if len(observe_window) >= 1:
|
||||
observe_window[0] = response
|
||||
if len(observe_window) >= 2:
|
||||
@ -52,7 +52,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
||||
# 开始接收回复
|
||||
from .com_sparkapi import SparkRequestInstance
|
||||
sri = SparkRequestInstance()
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
|
||||
for response in sri.generate(inputs, llm_kwargs, history, system_prompt, use_image_api=True):
|
||||
chatbot[-1] = (inputs, response)
|
||||
yield from update_ui(chatbot=chatbot, history=history)
|
||||
|
||||
|
@ -72,12 +72,12 @@ class SparkRequestInstance():
|
||||
|
||||
self.result_buf = ""
|
||||
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt):
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt, use_image_api=False):
|
||||
llm_kwargs = llm_kwargs
|
||||
history = history
|
||||
system_prompt = system_prompt
|
||||
import _thread as thread
|
||||
thread.start_new_thread(self.create_blocking_request, (inputs, llm_kwargs, history, system_prompt))
|
||||
thread.start_new_thread(self.create_blocking_request, (inputs, llm_kwargs, history, system_prompt, use_image_api))
|
||||
while True:
|
||||
self.time_to_yield_event.wait(timeout=1)
|
||||
if self.time_to_yield_event.is_set():
|
||||
@ -86,7 +86,7 @@ class SparkRequestInstance():
|
||||
return self.result_buf
|
||||
|
||||
|
||||
def create_blocking_request(self, inputs, llm_kwargs, history, system_prompt):
|
||||
def create_blocking_request(self, inputs, llm_kwargs, history, system_prompt, use_image_api):
|
||||
if llm_kwargs['llm_model'] == 'sparkv2':
|
||||
gpt_url = self.gpt_url_v2
|
||||
elif llm_kwargs['llm_model'] == 'sparkv3':
|
||||
@ -94,10 +94,12 @@ class SparkRequestInstance():
|
||||
else:
|
||||
gpt_url = self.gpt_url
|
||||
file_manifest = []
|
||||
if llm_kwargs.get('most_recent_uploaded'):
|
||||
if use_image_api and llm_kwargs.get('most_recent_uploaded'):
|
||||
if llm_kwargs['most_recent_uploaded'].get('path'):
|
||||
file_manifest = get_pictures_list(llm_kwargs['most_recent_uploaded']['path'])
|
||||
gpt_url = self.gpt_url_img
|
||||
if len(file_manifest) > 0:
|
||||
print('正在使用讯飞图片理解API')
|
||||
gpt_url = self.gpt_url_img
|
||||
wsParam = Ws_Param(self.appid, self.api_key, self.api_secret, gpt_url)
|
||||
websocket.enableTrace(False)
|
||||
wsUrl = wsParam.create_url()
|
||||
|
Loading…
x
Reference in New Issue
Block a user