diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py
index 89c9f76..88848a9 100644
--- a/request_llms/bridge_all.py
+++ b/request_llms/bridge_all.py
@@ -16,6 +16,9 @@ from toolbox import get_conf, trimmed_format_exc
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
from .bridge_chatgpt import predict as chatgpt_ui
+from .bridge_chatgpt_vision import predict_no_ui_long_connection as chatgpt_vision_noui
+from .bridge_chatgpt_vision import predict as chatgpt_vision_ui
+
from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui
from .bridge_chatglm import predict as chatglm_ui
@@ -162,6 +165,16 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
+ "gpt-4-vision-preview": {
+ "fn_with_ui": chatgpt_vision_ui,
+ "fn_without_ui": chatgpt_vision_noui,
+ "endpoint": openai_endpoint,
+ "max_token": 4096,
+ "tokenizer": tokenizer_gpt4,
+ "token_cnt": get_token_num_gpt4,
+ },
+
+
# azure openai
"azure-gpt-3.5":{
"fn_with_ui": chatgpt_ui,
diff --git a/request_llms/bridge_chatgpt_vision.py b/request_llms/bridge_chatgpt_vision.py
new file mode 100644
index 0000000..112391c
--- /dev/null
+++ b/request_llms/bridge_chatgpt_vision.py
@@ -0,0 +1,329 @@
+"""
+ 该文件中主要包含三个函数
+
+ 不具备多线程能力的函数:
+ 1. predict: 正常对话时使用,具备完备的交互功能,不可多线程
+
+ 具备多线程调用能力的函数
+ 2. predict_no_ui_long_connection:支持多线程
+"""
+
+import json
+import time
+import logging
+import requests
+import base64
+import os
+import glob
+
+from toolbox import get_conf, update_ui, is_any_api_key, select_api_key, what_keys, clip_history, trimmed_format_exc, is_the_upload_folder, update_ui_lastest_msg, get_max_token
+proxies, TIMEOUT_SECONDS, MAX_RETRY, API_ORG, AZURE_CFG_ARRAY = \
+ get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY', 'API_ORG', 'AZURE_CFG_ARRAY')
+
+timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
+ '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
+
+def have_any_recent_upload_image_files(chatbot):
+ _5min = 5 * 60
+ if chatbot is None: return False, None # chatbot is None
+ most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
+ if not most_recent_uploaded: return False, None # most_recent_uploaded is None
+ if time.time() - most_recent_uploaded["time"] < _5min:
+ most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
+ path = most_recent_uploaded['path']
+ file_manifest = [f for f in glob.glob(f'{path}/**/*.jpg', recursive=True)]
+ file_manifest += [f for f in glob.glob(f'{path}/**/*.jpeg', recursive=True)]
+ file_manifest += [f for f in glob.glob(f'{path}/**/*.png', recursive=True)]
+ if len(file_manifest) == 0: return False, None
+ return True, file_manifest # most_recent_uploaded is new
+ else:
+ return False, None # most_recent_uploaded is too old
+
+def report_invalid_key(key):
+ if get_conf("BLOCK_INVALID_APIKEY"):
+ # 实验性功能,自动检测并屏蔽失效的KEY,请勿使用
+ from request_llms.key_manager import ApiKeyManager
+ api_key = ApiKeyManager().add_key_to_blacklist(key)
+
+def get_full_error(chunk, stream_response):
+ """
+ 获取完整的从Openai返回的报错
+ """
+ while True:
+ try:
+ chunk += next(stream_response)
+ except:
+ break
+ return chunk
+
+def decode_chunk(chunk):
+ # 提前读取一些信息 (用于判断异常)
+ chunk_decoded = chunk.decode()
+ chunkjson = None
+ has_choices = False
+ choice_valid = False
+ has_content = False
+ has_role = False
+ try:
+ chunkjson = json.loads(chunk_decoded[6:])
+ has_choices = 'choices' in chunkjson
+ if has_choices: choice_valid = (len(chunkjson['choices']) > 0)
+ if has_choices and choice_valid: has_content = "content" in chunkjson['choices'][0]["delta"]
+ if has_choices and choice_valid: has_role = "role" in chunkjson['choices'][0]["delta"]
+ except:
+ pass
+ return chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role
+
+from functools import lru_cache
+@lru_cache(maxsize=32)
+def verify_endpoint(endpoint):
+ """
+ 检查endpoint是否可用
+ """
+ return endpoint
+
+def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
+ raise NotImplementedError
+
+
+def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
+
+ have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
+
+ if is_any_api_key(inputs):
+ chatbot._cookies['api_key'] = inputs
+ chatbot.append(("输入已识别为openai的api_key", what_keys(inputs)))
+ yield from update_ui(chatbot=chatbot, history=history, msg="api_key已导入") # 刷新界面
+ return
+ elif not is_any_api_key(chatbot._cookies['api_key']):
+ chatbot.append((inputs, "缺少api_key。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。"))
+ yield from update_ui(chatbot=chatbot, history=history, msg="缺少api_key") # 刷新界面
+ return
+ if not have_recent_file:
+ chatbot.append((inputs, "没有检测到任何近期上传的图像文件,请上传jpg格式的图片,此外,请注意拓展名需要小写"))
+ yield from update_ui(chatbot=chatbot, history=history, msg="等待图片") # 刷新界面
+ return
+ if os.path.exists(inputs):
+ chatbot.append((inputs, "已经接收到您上传的文件,您不需要再重复强调该文件的路径了,请直接输入您的问题。"))
+ yield from update_ui(chatbot=chatbot, history=history, msg="等待指令") # 刷新界面
+ return
+
+
+ user_input = inputs
+ if additional_fn is not None:
+ from core_functional import handle_core_functionality
+ inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
+
+ raw_input = inputs
+ logging.info(f'[raw_input] {raw_input}')
+ def make_media_input(inputs, image_paths):
+ for image_path in image_paths:
+ inputs = inputs + f'
})
'
+ return inputs
+ chatbot.append((make_media_input(inputs, image_paths), ""))
+ yield from update_ui(chatbot=chatbot, history=history, msg="等待响应") # 刷新界面
+
+ # check mis-behavior
+ if is_the_upload_folder(user_input):
+ chatbot[-1] = (inputs, f"[Local Message] 检测到操作错误!当您上传文档之后,需点击“**函数插件区**”按钮进行处理,请勿点击“提交”按钮或者“基础功能区”按钮。")
+ yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
+ time.sleep(2)
+
+ try:
+ headers, payload, api_key = generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths)
+ except RuntimeError as e:
+ chatbot[-1] = (inputs, f"您提供的api-key不满足要求,不包含任何可用于{llm_kwargs['llm_model']}的api-key。您可能选择了错误的模型或请求源。")
+ yield from update_ui(chatbot=chatbot, history=history, msg="api-key不满足要求") # 刷新界面
+ return
+
+ # 检查endpoint是否合法
+ try:
+ from .bridge_all import model_info
+ endpoint = verify_endpoint(model_info[llm_kwargs['llm_model']]['endpoint'])
+ except:
+ tb_str = '```\n' + trimmed_format_exc() + '```'
+ chatbot[-1] = (inputs, tb_str)
+ yield from update_ui(chatbot=chatbot, history=history, msg="Endpoint不满足要求") # 刷新界面
+ return
+
+ history.append(make_media_input(inputs, image_paths))
+ history.append("")
+
+ retry = 0
+ while True:
+ try:
+ # make a POST request to the API endpoint, stream=True
+ response = requests.post(endpoint, headers=headers, proxies=proxies,
+ json=payload, stream=True, timeout=TIMEOUT_SECONDS);break
+ except:
+ retry += 1
+ chatbot[-1] = ((chatbot[-1][0], timeout_bot_msg))
+ retry_msg = f",正在重试 ({retry}/{MAX_RETRY}) ……" if MAX_RETRY > 0 else ""
+ yield from update_ui(chatbot=chatbot, history=history, msg="请求超时"+retry_msg) # 刷新界面
+ if retry > MAX_RETRY: raise TimeoutError
+
+ gpt_replying_buffer = ""
+
+ is_head_of_the_stream = True
+ if stream:
+ stream_response = response.iter_lines()
+ while True:
+ try:
+ chunk = next(stream_response)
+ except StopIteration:
+ # 非OpenAI官方接口的出现这样的报错,OpenAI和API2D不会走这里
+ chunk_decoded = chunk.decode()
+ error_msg = chunk_decoded
+ # 首先排除一个one-api没有done数据包的第三方Bug情形
+ if len(gpt_replying_buffer.strip()) > 0 and len(error_msg) == 0:
+ yield from update_ui(chatbot=chatbot, history=history, msg="检测到有缺陷的非OpenAI官方接口,建议选择更稳定的接口。")
+ break
+ # 其他情况,直接返回报错
+ chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key)
+ yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面
+ return
+
+ # 提前读取一些信息 (用于判断异常)
+ chunk_decoded, chunkjson, has_choices, choice_valid, has_content, has_role = decode_chunk(chunk)
+
+ if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r"content" not in chunk_decoded):
+ # 数据流的第一帧不携带content
+ is_head_of_the_stream = False; continue
+
+ if chunk:
+ try:
+ if has_choices and not choice_valid:
+ # 一些垃圾第三方接口的出现这样的错误
+ continue
+ # 前者是API2D的结束条件,后者是OPENAI的结束条件
+ if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
+ # 判定为数据流的结束,gpt_replying_buffer也写完了
+ lastmsg = chatbot[-1][-1] + f"
{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。"
+ yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1)
+ logging.info(f'[response] {gpt_replying_buffer}')
+ break
+ # 处理数据流的主体
+ status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
+ # 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
+ if has_content:
+ # 正常情况
+ gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
+ elif has_role:
+ # 一些第三方接口的出现这样的错误,兼容一下吧
+ continue
+ else:
+ # 一些垃圾第三方接口的出现这样的错误
+ gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
+
+ history[-1] = gpt_replying_buffer
+ chatbot[-1] = (history[-2], history[-1])
+ yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
+ except Exception as e:
+ yield from update_ui(chatbot=chatbot, history=history, msg="Json解析不合常规") # 刷新界面
+ chunk = get_full_error(chunk, stream_response)
+ chunk_decoded = chunk.decode()
+ error_msg = chunk_decoded
+ chatbot, history = handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key)
+ yield from update_ui(chatbot=chatbot, history=history, msg="Json异常" + error_msg) # 刷新界面
+ print(error_msg)
+ return
+
+def handle_error(inputs, llm_kwargs, chatbot, history, chunk_decoded, error_msg, api_key=""):
+ from .bridge_all import model_info
+ openai_website = ' 请登录OpenAI查看详情 https://platform.openai.com/signup'
+ if "reduce the length" in error_msg:
+ if len(history) >= 2: history[-1] = ""; history[-2] = "" # 清除当前溢出的输入:history[-2] 是本次输入, history[-1] 是本次输出
+ history = clip_history(inputs=inputs, history=history, tokenizer=model_info[llm_kwargs['llm_model']]['tokenizer'],
+ max_token_limit=(model_info[llm_kwargs['llm_model']]['max_token'])) # history至少释放二分之一
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Reduce the length. 本次输入过长, 或历史数据过长. 历史缓存数据已部分释放, 您可以请再次尝试. (若再次失败则更可能是因为输入过长.)")
+ elif "does not exist" in error_msg:
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] Model {llm_kwargs['llm_model']} does not exist. 模型不存在, 或者您没有获得体验资格.")
+ elif "Incorrect API key" in error_msg:
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Incorrect API key. OpenAI以提供了不正确的API_KEY为由, 拒绝服务. " + openai_website); report_invalid_key(api_key)
+ elif "exceeded your current quota" in error_msg:
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] You exceeded your current quota. OpenAI以账户额度不足为由, 拒绝服务." + openai_website); report_invalid_key(api_key)
+ elif "account is not active" in error_msg:
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Your account is not active. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key)
+ elif "associated with a deactivated account" in error_msg:
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] You are associated with a deactivated account. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key)
+ elif "API key has been deactivated" in error_msg:
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] API key has been deactivated. OpenAI以账户失效为由, 拒绝服务." + openai_website); report_invalid_key(api_key)
+ elif "bad forward key" in error_msg:
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Bad forward key. API2D账户额度不足.")
+ elif "Not enough point" in error_msg:
+ chatbot[-1] = (chatbot[-1][0], "[Local Message] Not enough point. API2D账户点数不足.")
+ else:
+ from toolbox import regular_txt_to_markdown
+ tb_str = '```\n' + trimmed_format_exc() + '```'
+ chatbot[-1] = (chatbot[-1][0], f"[Local Message] 异常 \n\n{tb_str} \n\n{regular_txt_to_markdown(chunk_decoded)}")
+ return chatbot, history
+
+# Function to encode the image
+def encode_image(image_path):
+ with open(image_path, "rb") as image_file:
+ return base64.b64encode(image_file.read()).decode('utf-8')
+
+def generate_payload(inputs, llm_kwargs, history, system_prompt, image_paths):
+ """
+ 整合所有信息,选择LLM模型,生成http请求,为发送请求做准备
+ """
+ if not is_any_api_key(llm_kwargs['api_key']):
+ raise AssertionError("你提供了错误的API_KEY。\n\n1. 临时解决方案:直接在输入区键入api_key,然后回车提交。\n\n2. 长效解决方案:在config.py中配置。")
+
+ api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
+
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {api_key}"
+ }
+ if API_ORG.startswith('org-'): headers.update({"OpenAI-Organization": API_ORG})
+ if llm_kwargs['llm_model'].startswith('azure-'):
+ headers.update({"api-key": api_key})
+ if llm_kwargs['llm_model'] in AZURE_CFG_ARRAY.keys():
+ azure_api_key_unshared = AZURE_CFG_ARRAY[llm_kwargs['llm_model']]["AZURE_API_KEY"]
+ headers.update({"api-key": azure_api_key_unshared})
+
+ base64_images = []
+ for image_path in image_paths:
+ base64_images.append(encode_image(image_path))
+
+ messages = []
+ what_i_ask_now = {}
+ what_i_ask_now["role"] = "user"
+ what_i_ask_now["content"] = []
+ what_i_ask_now["content"].append({
+ "type": "text",
+ "text": inputs
+ })
+
+ for image_path, base64_image in zip(image_paths, base64_images):
+ what_i_ask_now["content"].append({
+ "type": "image_url",
+ "image_url": {
+ "url": f"data:image/jpeg;base64,{base64_image}"
+ }
+ })
+
+ messages.append(what_i_ask_now)
+ model = llm_kwargs['llm_model']
+ if llm_kwargs['llm_model'].startswith('api2d-'):
+ model = llm_kwargs['llm_model'][len('api2d-'):]
+
+ payload = {
+ "model": model,
+ "messages": messages,
+ "temperature": llm_kwargs['temperature'], # 1.0,
+ "top_p": llm_kwargs['top_p'], # 1.0,
+ "n": 1,
+ "stream": True,
+ "max_tokens": get_max_token(llm_kwargs),
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ }
+ try:
+ print(f" {llm_kwargs['llm_model']} : {inputs[:100]} ..........")
+ except:
+ print('输入中可能存在乱码。')
+ return headers, payload, api_key
+
+
diff --git a/toolbox.py b/toolbox.py
index 30f7176..3f3c68b 100644
--- a/toolbox.py
+++ b/toolbox.py
@@ -279,9 +279,12 @@ def text_divide_paragraph(text):
if '```' in text:
# careful input
- return pre + text + suf
+ return text
+ elif '' in text:
+ # careful input
+ return text
else:
- # wtf input
+ # whatever input
lines = text.split("\n")
for i, line in enumerate(lines):
lines[i] = lines[i].replace(" ", " ")