Merge branch 'frontier'
This commit is contained in:
commit
4421219c2b
14
config.py
14
config.py
@ -83,9 +83,9 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
|||||||
|
|
||||||
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
||||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
|
||||||
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
"gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
||||||
# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613",
|
# P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
|
||||||
# "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
# "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
|
||||||
|
|
||||||
@ -125,6 +125,11 @@ AUTHENTICATION = []
|
|||||||
CUSTOM_PATH = "/"
|
CUSTOM_PATH = "/"
|
||||||
|
|
||||||
|
|
||||||
|
# HTTPS 秘钥和证书(不需要修改)
|
||||||
|
SSL_KEYFILE = ""
|
||||||
|
SSL_CERTFILE = ""
|
||||||
|
|
||||||
|
|
||||||
# 极少数情况下,openai的官方KEY需要伴随组织编码(格式如org-xxxxxxxxxxxxxxxxxxxxxxxx)使用
|
# 极少数情况下,openai的官方KEY需要伴随组织编码(格式如org-xxxxxxxxxxxxxxxxxxxxxxxx)使用
|
||||||
API_ORG = ""
|
API_ORG = ""
|
||||||
|
|
||||||
@ -140,7 +145,7 @@ AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处
|
|||||||
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
|
AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
|
||||||
|
|
||||||
|
|
||||||
# 使用Newbing
|
# 使用Newbing (不推荐使用,未来将删除)
|
||||||
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
|
||||||
NEWBING_COOKIES = """
|
NEWBING_COOKIES = """
|
||||||
put your new bing cookies here
|
put your new bing cookies here
|
||||||
@ -198,6 +203,9 @@ PATH_LOGGING = "gpt_log"
|
|||||||
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", "Warmup_Modules"]
|
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", "Warmup_Modules"]
|
||||||
|
|
||||||
|
|
||||||
|
# 自定义按钮的最大数量限制
|
||||||
|
NUM_CUSTOM_BASIC_BTN = 4
|
||||||
|
|
||||||
"""
|
"""
|
||||||
在线大模型配置关联关系示意图
|
在线大模型配置关联关系示意图
|
||||||
│
|
│
|
||||||
|
@ -91,8 +91,15 @@ def handle_core_functionality(additional_fn, inputs, history, chatbot):
|
|||||||
import core_functional
|
import core_functional
|
||||||
importlib.reload(core_functional) # 热更新prompt
|
importlib.reload(core_functional) # 热更新prompt
|
||||||
core_functional = core_functional.get_core_functions()
|
core_functional = core_functional.get_core_functions()
|
||||||
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
addition = chatbot._cookies['customize_fn_overwrite']
|
||||||
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
if additional_fn in addition:
|
||||||
if core_functional[additional_fn].get("AutoClearHistory", False):
|
# 自定义功能
|
||||||
history = []
|
inputs = addition[additional_fn]["Prefix"] + inputs + addition[additional_fn]["Suffix"]
|
||||||
return inputs, history
|
return inputs, history
|
||||||
|
else:
|
||||||
|
# 预制功能
|
||||||
|
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
||||||
|
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
||||||
|
if core_functional[additional_fn].get("AutoClearHistory", False):
|
||||||
|
history = []
|
||||||
|
return inputs, history
|
||||||
|
@ -69,12 +69,15 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
|||||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||||
executor = ThreadPoolExecutor(max_workers=16)
|
executor = ThreadPoolExecutor(max_workers=16)
|
||||||
mutable = ["", time.time(), ""]
|
mutable = ["", time.time(), ""]
|
||||||
|
# 看门狗耐心
|
||||||
|
watch_dog_patience = 5
|
||||||
|
# 请求任务
|
||||||
def _req_gpt(inputs, history, sys_prompt):
|
def _req_gpt(inputs, history, sys_prompt):
|
||||||
retry_op = retry_times_at_unknown_error
|
retry_op = retry_times_at_unknown_error
|
||||||
exceeded_cnt = 0
|
exceeded_cnt = 0
|
||||||
while True:
|
while True:
|
||||||
# watchdog error
|
# watchdog error
|
||||||
if len(mutable) >= 2 and (time.time()-mutable[1]) > 5:
|
if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience:
|
||||||
raise RuntimeError("检测到程序终止。")
|
raise RuntimeError("检测到程序终止。")
|
||||||
try:
|
try:
|
||||||
# 【第一种情况】:顺利完成
|
# 【第一种情况】:顺利完成
|
||||||
@ -193,6 +196,9 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
# 跨线程传递
|
# 跨线程传递
|
||||||
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
|
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
|
||||||
|
|
||||||
|
# 看门狗耐心
|
||||||
|
watch_dog_patience = 5
|
||||||
|
|
||||||
# 子线程任务
|
# 子线程任务
|
||||||
def _req_gpt(index, inputs, history, sys_prompt):
|
def _req_gpt(index, inputs, history, sys_prompt):
|
||||||
gpt_say = ""
|
gpt_say = ""
|
||||||
@ -201,7 +207,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
mutable[index][2] = "执行中"
|
mutable[index][2] = "执行中"
|
||||||
while True:
|
while True:
|
||||||
# watchdog error
|
# watchdog error
|
||||||
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5:
|
if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience:
|
||||||
raise RuntimeError("检测到程序终止。")
|
raise RuntimeError("检测到程序终止。")
|
||||||
try:
|
try:
|
||||||
# 【第一种情况】:顺利完成
|
# 【第一种情况】:顺利完成
|
||||||
@ -275,7 +281,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
# 在前端打印些好玩的东西
|
# 在前端打印些好玩的东西
|
||||||
for thread_index, _ in enumerate(worker_done):
|
for thread_index, _ in enumerate(worker_done):
|
||||||
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
|
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
|
||||||
replace('\n', '').replace('```', '...').replace(
|
replace('\n', '').replace('`', '.').replace(
|
||||||
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
||||||
observe_win.append(print_something_really_funny)
|
observe_win.append(print_something_really_funny)
|
||||||
# 在前端打印些好玩的东西
|
# 在前端打印些好玩的东西
|
||||||
@ -301,7 +307,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|||||||
gpt_res = f.result()
|
gpt_res = f.result()
|
||||||
chatbot.append([inputs_show_user, gpt_res])
|
chatbot.append([inputs_show_user, gpt_res])
|
||||||
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
|
||||||
time.sleep(0.3)
|
time.sleep(0.5)
|
||||||
return gpt_response_collection
|
return gpt_response_collection
|
||||||
|
|
||||||
|
|
||||||
|
@ -342,10 +342,33 @@ def merge_tex_files(project_foler, main_file, mode):
|
|||||||
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
|
pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
|
||||||
match_opt1 = pattern_opt1.search(main_file)
|
match_opt1 = pattern_opt1.search(main_file)
|
||||||
match_opt2 = pattern_opt2.search(main_file)
|
match_opt2 = pattern_opt2.search(main_file)
|
||||||
|
if (match_opt1 is None) and (match_opt2 is None):
|
||||||
|
# "Cannot find paper abstract section!"
|
||||||
|
main_file = insert_abstract(main_file)
|
||||||
|
match_opt1 = pattern_opt1.search(main_file)
|
||||||
|
match_opt2 = pattern_opt2.search(main_file)
|
||||||
assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
|
assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
|
||||||
return main_file
|
return main_file
|
||||||
|
|
||||||
|
|
||||||
|
insert_missing_abs_str = r"""
|
||||||
|
\begin{abstract}
|
||||||
|
The GPT-Academic program cannot find abstract section in this paper.
|
||||||
|
\end{abstract}
|
||||||
|
"""
|
||||||
|
|
||||||
|
def insert_abstract(tex_content):
|
||||||
|
if "\\maketitle" in tex_content:
|
||||||
|
# find the position of "\maketitle"
|
||||||
|
find_index = tex_content.index("\\maketitle")
|
||||||
|
# find the nearest ending line
|
||||||
|
end_line_index = tex_content.find("\n", find_index)
|
||||||
|
# insert "abs_str" on the next line
|
||||||
|
modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
|
||||||
|
return modified_tex
|
||||||
|
else:
|
||||||
|
return tex_content
|
||||||
|
|
||||||
"""
|
"""
|
||||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
Post process
|
Post process
|
||||||
|
@ -1,4 +1,106 @@
|
|||||||
import time, logging, json
|
import time, logging, json, sys, struct
|
||||||
|
import numpy as np
|
||||||
|
from scipy.io.wavfile import WAVE_FORMAT
|
||||||
|
|
||||||
|
def write_numpy_to_wave(filename, rate, data, add_header=False):
|
||||||
|
"""
|
||||||
|
Write a NumPy array as a WAV file.
|
||||||
|
"""
|
||||||
|
def _array_tofile(fid, data):
|
||||||
|
# ravel gives a c-contiguous buffer
|
||||||
|
fid.write(data.ravel().view('b').data)
|
||||||
|
|
||||||
|
if hasattr(filename, 'write'):
|
||||||
|
fid = filename
|
||||||
|
else:
|
||||||
|
fid = open(filename, 'wb')
|
||||||
|
|
||||||
|
fs = rate
|
||||||
|
|
||||||
|
try:
|
||||||
|
dkind = data.dtype.kind
|
||||||
|
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
|
||||||
|
data.dtype.itemsize == 1)):
|
||||||
|
raise ValueError("Unsupported data type '%s'" % data.dtype)
|
||||||
|
|
||||||
|
header_data = b''
|
||||||
|
|
||||||
|
header_data += b'RIFF'
|
||||||
|
header_data += b'\x00\x00\x00\x00'
|
||||||
|
header_data += b'WAVE'
|
||||||
|
|
||||||
|
# fmt chunk
|
||||||
|
header_data += b'fmt '
|
||||||
|
if dkind == 'f':
|
||||||
|
format_tag = WAVE_FORMAT.IEEE_FLOAT
|
||||||
|
else:
|
||||||
|
format_tag = WAVE_FORMAT.PCM
|
||||||
|
if data.ndim == 1:
|
||||||
|
channels = 1
|
||||||
|
else:
|
||||||
|
channels = data.shape[1]
|
||||||
|
bit_depth = data.dtype.itemsize * 8
|
||||||
|
bytes_per_second = fs*(bit_depth // 8)*channels
|
||||||
|
block_align = channels * (bit_depth // 8)
|
||||||
|
|
||||||
|
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
|
||||||
|
bytes_per_second, block_align, bit_depth)
|
||||||
|
if not (dkind == 'i' or dkind == 'u'):
|
||||||
|
# add cbSize field for non-PCM files
|
||||||
|
fmt_chunk_data += b'\x00\x00'
|
||||||
|
|
||||||
|
header_data += struct.pack('<I', len(fmt_chunk_data))
|
||||||
|
header_data += fmt_chunk_data
|
||||||
|
|
||||||
|
# fact chunk (non-PCM files)
|
||||||
|
if not (dkind == 'i' or dkind == 'u'):
|
||||||
|
header_data += b'fact'
|
||||||
|
header_data += struct.pack('<II', 4, data.shape[0])
|
||||||
|
|
||||||
|
# check data size (needs to be immediately before the data chunk)
|
||||||
|
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
|
||||||
|
raise ValueError("Data exceeds wave file size limit")
|
||||||
|
if add_header:
|
||||||
|
fid.write(header_data)
|
||||||
|
# data chunk
|
||||||
|
fid.write(b'data')
|
||||||
|
fid.write(struct.pack('<I', data.nbytes))
|
||||||
|
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
|
||||||
|
sys.byteorder == 'big'):
|
||||||
|
data = data.byteswap()
|
||||||
|
_array_tofile(fid, data)
|
||||||
|
|
||||||
|
if add_header:
|
||||||
|
# Determine file size and place it in correct
|
||||||
|
# position at start of the file.
|
||||||
|
size = fid.tell()
|
||||||
|
fid.seek(4)
|
||||||
|
fid.write(struct.pack('<I', size-8))
|
||||||
|
|
||||||
|
finally:
|
||||||
|
if not hasattr(filename, 'write'):
|
||||||
|
fid.close()
|
||||||
|
else:
|
||||||
|
fid.seek(0)
|
||||||
|
|
||||||
|
def is_speaker_speaking(vad, data, sample_rate):
|
||||||
|
# Function to detect if the speaker is speaking
|
||||||
|
# The WebRTC VAD only accepts 16-bit mono PCM audio,
|
||||||
|
# sampled at 8000, 16000, 32000 or 48000 Hz.
|
||||||
|
# A frame must be either 10, 20, or 30 ms in duration:
|
||||||
|
frame_duration = 30
|
||||||
|
n_bit_each = int(sample_rate * frame_duration / 1000)*2 # x2 because audio is 16 bit (2 bytes)
|
||||||
|
res_list = []
|
||||||
|
for t in range(len(data)):
|
||||||
|
if t!=0 and t % n_bit_each == 0:
|
||||||
|
res_list.append(vad.is_speech(data[t-n_bit_each:t], sample_rate))
|
||||||
|
|
||||||
|
info = ''.join(['^' if r else '.' for r in res_list])
|
||||||
|
info = info[:10]
|
||||||
|
if any(res_list):
|
||||||
|
return True, info
|
||||||
|
else:
|
||||||
|
return False, info
|
||||||
|
|
||||||
|
|
||||||
class AliyunASR():
|
class AliyunASR():
|
||||||
@ -66,12 +168,22 @@ class AliyunASR():
|
|||||||
on_close=self.test_on_close,
|
on_close=self.test_on_close,
|
||||||
callback_args=[uuid.hex]
|
callback_args=[uuid.hex]
|
||||||
)
|
)
|
||||||
|
timeout_limit_second = 20
|
||||||
r = sr.start(aformat="pcm",
|
r = sr.start(aformat="pcm",
|
||||||
|
timeout=timeout_limit_second,
|
||||||
enable_intermediate_result=True,
|
enable_intermediate_result=True,
|
||||||
enable_punctuation_prediction=True,
|
enable_punctuation_prediction=True,
|
||||||
enable_inverse_text_normalization=True)
|
enable_inverse_text_normalization=True)
|
||||||
|
|
||||||
|
import webrtcvad
|
||||||
|
vad = webrtcvad.Vad()
|
||||||
|
vad.set_mode(1)
|
||||||
|
|
||||||
|
is_previous_frame_transmitted = False # 上一帧是否有人说话
|
||||||
|
previous_frame_data = None
|
||||||
|
echo_cnt = 0 # 在没有声音之后,继续向服务器发送n次音频数据
|
||||||
|
echo_cnt_max = 4 # 在没有声音之后,继续向服务器发送n次音频数据
|
||||||
|
keep_alive_last_send_time = time.time()
|
||||||
while not self.stop:
|
while not self.stop:
|
||||||
# time.sleep(self.capture_interval)
|
# time.sleep(self.capture_interval)
|
||||||
audio = rad.read(uuid.hex)
|
audio = rad.read(uuid.hex)
|
||||||
@ -79,12 +191,32 @@ class AliyunASR():
|
|||||||
# convert to pcm file
|
# convert to pcm file
|
||||||
temp_file = f'{temp_folder}/{uuid.hex}.pcm' #
|
temp_file = f'{temp_folder}/{uuid.hex}.pcm' #
|
||||||
dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000
|
dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000
|
||||||
io.wavfile.write(temp_file, NEW_SAMPLERATE, dsdata)
|
write_numpy_to_wave(temp_file, NEW_SAMPLERATE, dsdata)
|
||||||
# read pcm binary
|
# read pcm binary
|
||||||
with open(temp_file, "rb") as f: data = f.read()
|
with open(temp_file, "rb") as f: data = f.read()
|
||||||
# print('audio len:', len(audio), '\t ds len:', len(dsdata), '\t need n send:', len(data)//640)
|
is_speaking, info = is_speaker_speaking(vad, data, NEW_SAMPLERATE)
|
||||||
slices = zip(*(iter(data),) * 640) # 640个字节为一组
|
|
||||||
for i in slices: sr.send_audio(bytes(i))
|
if is_speaking or echo_cnt > 0:
|
||||||
|
# 如果话筒激活 / 如果处于回声收尾阶段
|
||||||
|
echo_cnt -= 1
|
||||||
|
if not is_previous_frame_transmitted: # 上一帧没有人声,但是我们把上一帧同样加上
|
||||||
|
if previous_frame_data is not None: data = previous_frame_data + data
|
||||||
|
if is_speaking:
|
||||||
|
echo_cnt = echo_cnt_max
|
||||||
|
slices = zip(*(iter(data),) * 640) # 640个字节为一组
|
||||||
|
for i in slices: sr.send_audio(bytes(i))
|
||||||
|
keep_alive_last_send_time = time.time()
|
||||||
|
is_previous_frame_transmitted = True
|
||||||
|
else:
|
||||||
|
is_previous_frame_transmitted = False
|
||||||
|
echo_cnt = 0
|
||||||
|
# 保持链接激活,即使没有声音,也根据时间间隔,发送一些音频片段给服务器
|
||||||
|
if time.time() - keep_alive_last_send_time > timeout_limit_second/2:
|
||||||
|
slices = zip(*(iter(data),) * 640) # 640个字节为一组
|
||||||
|
for i in slices: sr.send_audio(bytes(i))
|
||||||
|
keep_alive_last_send_time = time.time()
|
||||||
|
is_previous_frame_transmitted = True
|
||||||
|
self.audio_shape = info
|
||||||
else:
|
else:
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ class RealtimeAudioDistribution():
|
|||||||
def read(self, uuid):
|
def read(self, uuid):
|
||||||
if uuid in self.data:
|
if uuid in self.data:
|
||||||
res = self.data.pop(uuid)
|
res = self.data.pop(uuid)
|
||||||
print('\r read-', len(res), '-', max(res), end='', flush=True)
|
# print('\r read-', len(res), '-', max(res), end='', flush=True)
|
||||||
else:
|
else:
|
||||||
res = None
|
res = None
|
||||||
return res
|
return res
|
||||||
|
@ -6,6 +6,7 @@ import threading, time
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from .live_audio.aliyunASR import AliyunASR
|
from .live_audio.aliyunASR import AliyunASR
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
class WatchDog():
|
class WatchDog():
|
||||||
def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
|
def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
|
||||||
@ -38,10 +39,22 @@ def chatbot2history(chatbot):
|
|||||||
history = []
|
history = []
|
||||||
for c in chatbot:
|
for c in chatbot:
|
||||||
for q in c:
|
for q in c:
|
||||||
if q not in ["[请讲话]", "[等待GPT响应]", "[正在等您说完问题]"]:
|
if q in ["[ 请讲话 ]", "[ 等待GPT响应 ]", "[ 正在等您说完问题 ]"]:
|
||||||
|
continue
|
||||||
|
elif q.startswith("[ 正在等您说完问题 ]"):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
history.append(q.strip('<div class="markdown-body">').strip('</div>').strip('<p>').strip('</p>'))
|
history.append(q.strip('<div class="markdown-body">').strip('</div>').strip('<p>').strip('</p>'))
|
||||||
return history
|
return history
|
||||||
|
|
||||||
|
def visualize_audio(chatbot, audio_shape):
|
||||||
|
if len(chatbot) == 0: chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"])
|
||||||
|
chatbot[-1] = list(chatbot[-1])
|
||||||
|
p1 = '「'
|
||||||
|
p2 = '」'
|
||||||
|
chatbot[-1][-1] = re.sub(p1+r'(.*)'+p2, '', chatbot[-1][-1])
|
||||||
|
chatbot[-1][-1] += (p1+f"`{audio_shape}`"+p2)
|
||||||
|
|
||||||
class AsyncGptTask():
|
class AsyncGptTask():
|
||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self.observe_future = []
|
self.observe_future = []
|
||||||
@ -81,8 +94,9 @@ class InterviewAssistant(AliyunASR):
|
|||||||
self.capture_interval = 0.5 # second
|
self.capture_interval = 0.5 # second
|
||||||
self.stop = False
|
self.stop = False
|
||||||
self.parsed_text = "" # 下个句子中已经说完的部分, 由 test_on_result_chg() 写入
|
self.parsed_text = "" # 下个句子中已经说完的部分, 由 test_on_result_chg() 写入
|
||||||
self.parsed_sentence = "" # 某段话的整个句子,由 test_on_sentence_end() 写入
|
self.parsed_sentence = "" # 某段话的整个句子, 由 test_on_sentence_end() 写入
|
||||||
self.buffered_sentence = "" #
|
self.buffered_sentence = "" #
|
||||||
|
self.audio_shape = "" # 音频的可视化表现, 由 audio_convertion_thread() 写入
|
||||||
self.event_on_result_chg = threading.Event()
|
self.event_on_result_chg = threading.Event()
|
||||||
self.event_on_entence_end = threading.Event()
|
self.event_on_entence_end = threading.Event()
|
||||||
self.event_on_commit_question = threading.Event()
|
self.event_on_commit_question = threading.Event()
|
||||||
@ -117,7 +131,7 @@ class InterviewAssistant(AliyunASR):
|
|||||||
def begin(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
def begin(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
||||||
# main plugin function
|
# main plugin function
|
||||||
self.init(chatbot)
|
self.init(chatbot)
|
||||||
chatbot.append(["[请讲话]", "[正在等您说完问题]"])
|
chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
self.plugin_wd.begin_watch()
|
self.plugin_wd.begin_watch()
|
||||||
self.agt = AsyncGptTask()
|
self.agt = AsyncGptTask()
|
||||||
@ -157,14 +171,18 @@ class InterviewAssistant(AliyunASR):
|
|||||||
|
|
||||||
self.commit_wd.begin_watch()
|
self.commit_wd.begin_watch()
|
||||||
chatbot[-1] = list(chatbot[-1])
|
chatbot[-1] = list(chatbot[-1])
|
||||||
chatbot[-1] = [self.buffered_sentence, "[等待GPT响应]"]
|
chatbot[-1] = [self.buffered_sentence, "[ 等待GPT响应 ]"]
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
# add gpt task 创建子线程请求gpt,避免线程阻塞
|
# add gpt task 创建子线程请求gpt,避免线程阻塞
|
||||||
history = chatbot2history(chatbot)
|
history = chatbot2history(chatbot)
|
||||||
self.agt.add_async_gpt_task(self.buffered_sentence, len(chatbot)-1, llm_kwargs, history, system_prompt)
|
self.agt.add_async_gpt_task(self.buffered_sentence, len(chatbot)-1, llm_kwargs, history, system_prompt)
|
||||||
|
|
||||||
self.buffered_sentence = ""
|
self.buffered_sentence = ""
|
||||||
chatbot.append(["[请讲话]", "[正在等您说完问题]"])
|
chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"])
|
||||||
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
|
if not self.event_on_result_chg.is_set() and not self.event_on_entence_end.is_set() and not self.event_on_commit_question.is_set():
|
||||||
|
visualize_audio(chatbot, self.audio_shape)
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
|
|
||||||
if len(self.stop_msg) != 0:
|
if len(self.stop_msg) != 0:
|
||||||
@ -183,7 +201,7 @@ def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|||||||
import nls
|
import nls
|
||||||
from scipy import io
|
from scipy import io
|
||||||
except:
|
except:
|
||||||
chatbot.append(["导入依赖失败", "使用该模块需要额外依赖, 安装方法:```pip install --upgrade aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git```"])
|
chatbot.append(["导入依赖失败", "使用该模块需要额外依赖, 安装方法:```pip install --upgrade aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git```"])
|
||||||
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ RUN python3 -m pip install colorama Markdown pygments pymupdf
|
|||||||
RUN python3 -m pip install python-docx moviepy pdfminer
|
RUN python3 -m pip install python-docx moviepy pdfminer
|
||||||
RUN python3 -m pip install zh_langchain==0.2.1 pypinyin
|
RUN python3 -m pip install zh_langchain==0.2.1 pypinyin
|
||||||
RUN python3 -m pip install rarfile py7zr
|
RUN python3 -m pip install rarfile py7zr
|
||||||
RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
RUN python3 -m pip install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
||||||
# 下载分支
|
# 下载分支
|
||||||
WORKDIR /gpt
|
WORKDIR /gpt
|
||||||
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
RUN git clone --depth=1 https://github.com/binary-husky/gpt_academic.git
|
||||||
|
@ -5,6 +5,9 @@
|
|||||||
|
|
||||||
FROM fuqingxu/python311_texlive_ctex:latest
|
FROM fuqingxu/python311_texlive_ctex:latest
|
||||||
|
|
||||||
|
# 删除文档文件以节约空间
|
||||||
|
rm -rf /usr/local/texlive/2023/texmf-dist/doc
|
||||||
|
|
||||||
# 指定路径
|
# 指定路径
|
||||||
WORKDIR /gpt
|
WORKDIR /gpt
|
||||||
|
|
||||||
|
105
main.py
105
main.py
@ -1,4 +1,7 @@
|
|||||||
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
||||||
|
import pickle
|
||||||
|
import codecs
|
||||||
|
import base64
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
@ -10,7 +13,7 @@ def main():
|
|||||||
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
||||||
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
||||||
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
|
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
|
||||||
DARK_MODE, = get_conf('DARK_MODE')
|
DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
||||||
|
|
||||||
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
||||||
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
||||||
@ -68,9 +71,11 @@ def main():
|
|||||||
CHATBOT_HEIGHT /= 2
|
CHATBOT_HEIGHT /= 2
|
||||||
|
|
||||||
cancel_handles = []
|
cancel_handles = []
|
||||||
|
customize_btns = {}
|
||||||
|
predefined_btns = {}
|
||||||
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
||||||
gr.HTML(title_html)
|
gr.HTML(title_html)
|
||||||
secret_css, dark_mode = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False)
|
secret_css, dark_mode, persistent_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False)
|
||||||
cookies = gr.State(load_chat_cookies())
|
cookies = gr.State(load_chat_cookies())
|
||||||
with gr_L1():
|
with gr_L1():
|
||||||
with gr_L2(scale=2, elem_id="gpt-chat"):
|
with gr_L2(scale=2, elem_id="gpt-chat"):
|
||||||
@ -94,11 +99,16 @@ def main():
|
|||||||
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
|
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
|
||||||
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
|
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
|
for k in range(NUM_CUSTOM_BASIC_BTN):
|
||||||
|
customize_btn = gr.Button("自定义按钮" + str(k+1), visible=False, variant="secondary", info_str=f'基础功能区: 自定义按钮')
|
||||||
|
customize_btn.style(size="sm")
|
||||||
|
customize_btns.update({"自定义按钮" + str(k+1): customize_btn})
|
||||||
for k in functional:
|
for k in functional:
|
||||||
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
||||||
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
||||||
functional[k]["Button"] = gr.Button(k, variant=variant, info_str=f'基础功能区: {k}')
|
functional[k]["Button"] = gr.Button(k, variant=variant, info_str=f'基础功能区: {k}')
|
||||||
functional[k]["Button"].style(size="sm")
|
functional[k]["Button"].style(size="sm")
|
||||||
|
predefined_btns.update({k: functional[k]["Button"]})
|
||||||
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
|
with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
|
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
|
||||||
@ -149,6 +159,8 @@ def main():
|
|||||||
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
||||||
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
|
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
|
||||||
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
||||||
|
checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
|
||||||
|
value=[], label="显示/隐藏自定义菜单", elem_id='cbs').style(container=False)
|
||||||
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
||||||
dark_mode_btn.click(None, None, None, _js="""() => {
|
dark_mode_btn.click(None, None, None, _js="""() => {
|
||||||
if (document.querySelectorAll('.dark').length) {
|
if (document.querySelectorAll('.dark').length) {
|
||||||
@ -173,6 +185,77 @@ def main():
|
|||||||
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
||||||
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
||||||
|
|
||||||
|
def to_cookie_str(d):
|
||||||
|
# Pickle the dictionary and encode it as a string
|
||||||
|
pickled_dict = pickle.dumps(d)
|
||||||
|
cookie_value = base64.b64encode(pickled_dict).decode('utf-8')
|
||||||
|
return cookie_value
|
||||||
|
|
||||||
|
def from_cookie_str(c):
|
||||||
|
# Decode the base64-encoded string and unpickle it into a dictionary
|
||||||
|
pickled_dict = base64.b64decode(c.encode('utf-8'))
|
||||||
|
return pickle.loads(pickled_dict)
|
||||||
|
|
||||||
|
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
|
||||||
|
with gr.Accordion("自定义菜单", open=True, elem_id="edit-panel"):
|
||||||
|
with gr.Row() as row:
|
||||||
|
with gr.Column(scale=10):
|
||||||
|
AVAIL_BTN = [btn for btn in customize_btns.keys()] + [k for k in functional]
|
||||||
|
basic_btn_dropdown = gr.Dropdown(AVAIL_BTN, value="自定义按钮1", label="选择一个需要自定义基础功能区按钮").style(container=False)
|
||||||
|
basic_fn_title = gr.Textbox(show_label=False, placeholder="输入新按钮名称", lines=1).style(container=False)
|
||||||
|
basic_fn_prefix = gr.Textbox(show_label=False, placeholder="输入新提示前缀", lines=4).style(container=False)
|
||||||
|
basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False)
|
||||||
|
with gr.Column(scale=1, min_width=70):
|
||||||
|
basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
|
||||||
|
basic_fn_load = gr.Button("加载已保存", variant="primary"); basic_fn_load.style(size="sm")
|
||||||
|
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix):
|
||||||
|
ret = {}
|
||||||
|
customize_fn_overwrite_ = cookies_['customize_fn_overwrite']
|
||||||
|
customize_fn_overwrite_.update({
|
||||||
|
basic_btn_dropdown_:
|
||||||
|
{
|
||||||
|
"Title":basic_fn_title,
|
||||||
|
"Prefix":basic_fn_prefix,
|
||||||
|
"Suffix":basic_fn_suffix,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
cookies_.update(customize_fn_overwrite_)
|
||||||
|
if basic_btn_dropdown_ in customize_btns:
|
||||||
|
ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
|
||||||
|
else:
|
||||||
|
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
|
||||||
|
ret.update({cookies: cookies_})
|
||||||
|
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
|
except: persistent_cookie_ = {}
|
||||||
|
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
||||||
|
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
|
ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def reflesh_btn(persistent_cookie_, cookies_):
|
||||||
|
ret = {}
|
||||||
|
for k in customize_btns:
|
||||||
|
ret.update({customize_btns[k]: gr.update(visible=False, value="")})
|
||||||
|
|
||||||
|
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
||||||
|
except: return ret
|
||||||
|
|
||||||
|
customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {})
|
||||||
|
cookies_['customize_fn_overwrite'] = customize_fn_overwrite_
|
||||||
|
ret.update({cookies: cookies_})
|
||||||
|
|
||||||
|
for k,v in persistent_cookie_["custom_bnt"].items():
|
||||||
|
if v['Title'] == "": continue
|
||||||
|
if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])})
|
||||||
|
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
|
||||||
|
return ret
|
||||||
|
|
||||||
|
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies],[cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||||
|
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
||||||
|
[persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
||||||
|
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""") # save persistent cookie
|
||||||
|
|
||||||
# 功能区显示开关与功能区的互动
|
# 功能区显示开关与功能区的互动
|
||||||
def fn_area_visibility(a):
|
def fn_area_visibility(a):
|
||||||
ret = {}
|
ret = {}
|
||||||
@ -186,6 +269,14 @@ def main():
|
|||||||
if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
|
if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
|
||||||
return ret
|
return ret
|
||||||
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
|
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
|
||||||
|
|
||||||
|
# 功能区显示开关与功能区的互动
|
||||||
|
def fn_area_visibility_2(a):
|
||||||
|
ret = {}
|
||||||
|
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
|
||||||
|
return ret
|
||||||
|
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
|
||||||
|
|
||||||
# 整理反复出现的控件句柄组合
|
# 整理反复出现的控件句柄组合
|
||||||
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
||||||
output_combo = [cookies, chatbot, history, status]
|
output_combo = [cookies, chatbot, history, status]
|
||||||
@ -209,6 +300,9 @@ def main():
|
|||||||
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
||||||
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
||||||
cancel_handles.append(click_handle)
|
cancel_handles.append(click_handle)
|
||||||
|
for btn in customize_btns.values():
|
||||||
|
click_handle = btn.click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(btn.value)], outputs=output_combo)
|
||||||
|
cancel_handles.append(click_handle)
|
||||||
# 文件上传区,接收文件后与chatbot的互动
|
# 文件上传区,接收文件后与chatbot的互动
|
||||||
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
||||||
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
|
||||||
@ -307,6 +401,10 @@ def main():
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}"""
|
}"""
|
||||||
|
load_cookie_js = """(persistent_cookie) => {
|
||||||
|
return getCookie("persistent_cookie");
|
||||||
|
}"""
|
||||||
|
demo.load(None, inputs=None, outputs=[persistent_cookie], _js=load_cookie_js)
|
||||||
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
|
demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
|
||||||
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
||||||
|
|
||||||
@ -327,6 +425,9 @@ def main():
|
|||||||
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
|
||||||
quiet=True,
|
quiet=True,
|
||||||
server_name="0.0.0.0",
|
server_name="0.0.0.0",
|
||||||
|
ssl_keyfile=None if SSL_KEYFILE == "" else SSL_KEYFILE,
|
||||||
|
ssl_certfile=None if SSL_CERTFILE == "" else SSL_CERTFILE,
|
||||||
|
ssl_verify=False,
|
||||||
server_port=PORT,
|
server_port=PORT,
|
||||||
favicon_path="docs/logo.png",
|
favicon_path="docs/logo.png",
|
||||||
auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None,
|
auth=AUTHENTICATION if len(AUTHENTICATION) != 0 else None,
|
||||||
|
@ -134,6 +134,15 @@ model_info = {
|
|||||||
"tokenizer": tokenizer_gpt4,
|
"tokenizer": tokenizer_gpt4,
|
||||||
"token_cnt": get_token_num_gpt4,
|
"token_cnt": get_token_num_gpt4,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"gpt-3.5-random": {
|
||||||
|
"fn_with_ui": chatgpt_ui,
|
||||||
|
"fn_without_ui": chatgpt_noui,
|
||||||
|
"endpoint": openai_endpoint,
|
||||||
|
"max_token": 4096,
|
||||||
|
"tokenizer": tokenizer_gpt4,
|
||||||
|
"token_cnt": get_token_num_gpt4,
|
||||||
|
},
|
||||||
|
|
||||||
# azure openai
|
# azure openai
|
||||||
"azure-gpt-3.5":{
|
"azure-gpt-3.5":{
|
||||||
|
@ -18,6 +18,7 @@ import logging
|
|||||||
import traceback
|
import traceback
|
||||||
import requests
|
import requests
|
||||||
import importlib
|
import importlib
|
||||||
|
import random
|
||||||
|
|
||||||
# config_private.py放自己的秘密如API和代理网址
|
# config_private.py放自己的秘密如API和代理网址
|
||||||
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
# 读取时首先看是否存在私密的config_private配置文件(不受git管控),如果有,则覆盖原config文件
|
||||||
@ -39,6 +40,21 @@ def get_full_error(chunk, stream_response):
|
|||||||
break
|
break
|
||||||
return chunk
|
return chunk
|
||||||
|
|
||||||
|
def decode_chunk(chunk):
|
||||||
|
# 提前读取一些信息 (用于判断异常)
|
||||||
|
chunk_decoded = chunk.decode()
|
||||||
|
chunkjson = None
|
||||||
|
has_choices = False
|
||||||
|
has_content = False
|
||||||
|
has_role = False
|
||||||
|
try:
|
||||||
|
chunkjson = json.loads(chunk_decoded[6:])
|
||||||
|
has_choices = 'choices' in chunkjson
|
||||||
|
if has_choices: has_content = "content" in chunkjson['choices'][0]["delta"]
|
||||||
|
if has_choices: has_role = "role" in chunkjson['choices'][0]["delta"]
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return chunk_decoded, chunkjson, has_choices, has_content, has_role
|
||||||
|
|
||||||
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
|
||||||
"""
|
"""
|
||||||
@ -191,7 +207,9 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg="非OpenAI官方接口返回了错误:" + chunk.decode()) # 刷新界面
|
||||||
return
|
return
|
||||||
|
|
||||||
chunk_decoded = chunk.decode()
|
# 提前读取一些信息 (用于判断异常)
|
||||||
|
chunk_decoded, chunkjson, has_choices, has_content, has_role = decode_chunk(chunk)
|
||||||
|
|
||||||
if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r"content" not in chunk_decoded):
|
if is_head_of_the_stream and (r'"object":"error"' not in chunk_decoded) and (r"content" not in chunk_decoded):
|
||||||
# 数据流的第一帧不携带content
|
# 数据流的第一帧不携带content
|
||||||
is_head_of_the_stream = False; continue
|
is_head_of_the_stream = False; continue
|
||||||
@ -199,15 +217,23 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|||||||
if chunk:
|
if chunk:
|
||||||
try:
|
try:
|
||||||
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
# 前者是API2D的结束条件,后者是OPENAI的结束条件
|
||||||
if ('data: [DONE]' in chunk_decoded) or (len(json.loads(chunk_decoded[6:])['choices'][0]["delta"]) == 0):
|
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
|
||||||
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
# 判定为数据流的结束,gpt_replying_buffer也写完了
|
||||||
logging.info(f'[response] {gpt_replying_buffer}')
|
logging.info(f'[response] {gpt_replying_buffer}')
|
||||||
break
|
break
|
||||||
# 处理数据流的主体
|
# 处理数据流的主体
|
||||||
chunkjson = json.loads(chunk_decoded[6:])
|
|
||||||
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
|
||||||
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
# 如果这里抛出异常,一般是文本过长,详情见get_full_error的输出
|
||||||
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
if has_content:
|
||||||
|
# 正常情况
|
||||||
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||||
|
elif has_role:
|
||||||
|
# 一些第三方接口的出现这样的错误,兼容一下吧
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# 一些垃圾第三方接口的出现这样的错误
|
||||||
|
gpt_replying_buffer = gpt_replying_buffer + chunkjson['choices'][0]["delta"]["content"]
|
||||||
|
|
||||||
history[-1] = gpt_replying_buffer
|
history[-1] = gpt_replying_buffer
|
||||||
chatbot[-1] = (history[-2], history[-1])
|
chatbot[-1] = (history[-2], history[-1])
|
||||||
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
yield from update_ui(chatbot=chatbot, history=history, msg=status_text) # 刷新界面
|
||||||
@ -288,9 +314,19 @@ def generate_payload(inputs, llm_kwargs, history, system_prompt, stream):
|
|||||||
what_i_ask_now["role"] = "user"
|
what_i_ask_now["role"] = "user"
|
||||||
what_i_ask_now["content"] = inputs
|
what_i_ask_now["content"] = inputs
|
||||||
messages.append(what_i_ask_now)
|
messages.append(what_i_ask_now)
|
||||||
|
model = llm_kwargs['llm_model'].strip('api2d-')
|
||||||
|
if model == "gpt-3.5-random": # 随机选择, 绕过openai访问频率限制
|
||||||
|
model = random.choice([
|
||||||
|
"gpt-3.5-turbo",
|
||||||
|
"gpt-3.5-turbo-16k",
|
||||||
|
"gpt-3.5-turbo-0613",
|
||||||
|
"gpt-3.5-turbo-16k-0613",
|
||||||
|
"gpt-3.5-turbo-0301",
|
||||||
|
])
|
||||||
|
logging.info("Random select model:" + model)
|
||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
"model": llm_kwargs['llm_model'].strip('api2d-'),
|
"model": model,
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"temperature": llm_kwargs['temperature'], # 1.0,
|
"temperature": llm_kwargs['temperature'], # 1.0,
|
||||||
"top_p": llm_kwargs['top_p'], # 1.0,
|
"top_p": llm_kwargs['top_p'], # 1.0,
|
||||||
|
@ -9,7 +9,9 @@ validate_path() # 返回项目根路径
|
|||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
from tests.test_utils import plugin_test
|
from tests.test_utils import plugin_test
|
||||||
plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
|
# plugin_test(plugin='crazy_functions.函数动态生成->函数动态生成', main_input='交换图像的蓝色通道和红色通道', advanced_arg={"file_path_arg": "./build/ants.jpg"})
|
||||||
|
|
||||||
|
plugin_test(plugin='crazy_functions.Latex输出PDF结果->Latex翻译中文并重新编译PDF', main_input="2307.07522")
|
||||||
|
|
||||||
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
|
# plugin_test(plugin='crazy_functions.虚空终端->虚空终端', main_input='修改api-key为sk-jhoejriotherjep')
|
||||||
|
|
||||||
|
@ -10,9 +10,33 @@ function gradioApp() {
|
|||||||
return elem.shadowRoot ? elem.shadowRoot : elem;
|
return elem.shadowRoot ? elem.shadowRoot : elem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function setCookie(name, value, days) {
|
||||||
|
var expires = "";
|
||||||
|
|
||||||
|
if (days) {
|
||||||
|
var date = new Date();
|
||||||
|
date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
|
||||||
|
expires = "; expires=" + date.toUTCString();
|
||||||
|
}
|
||||||
|
|
||||||
|
document.cookie = name + "=" + value + expires + "; path=/";
|
||||||
|
}
|
||||||
|
|
||||||
|
function getCookie(name) {
|
||||||
|
var decodedCookie = decodeURIComponent(document.cookie);
|
||||||
|
var cookies = decodedCookie.split(';');
|
||||||
|
|
||||||
|
for (var i = 0; i < cookies.length; i++) {
|
||||||
|
var cookie = cookies[i].trim();
|
||||||
|
|
||||||
|
if (cookie.indexOf(name + "=") === 0) {
|
||||||
|
return cookie.substring(name.length + 1, cookie.length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
function addCopyButton(botElement) {
|
function addCopyButton(botElement) {
|
||||||
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
// https://github.com/GaiZhenbiao/ChuanhuChatGPT/tree/main/web_assets/javascript
|
||||||
// Copy bot button
|
// Copy bot button
|
||||||
|
12
toolbox.py
12
toolbox.py
@ -621,10 +621,20 @@ def on_report_generated(cookies, files, chatbot):
|
|||||||
|
|
||||||
def load_chat_cookies():
|
def load_chat_cookies():
|
||||||
API_KEY, LLM_MODEL, AZURE_API_KEY = get_conf('API_KEY', 'LLM_MODEL', 'AZURE_API_KEY')
|
API_KEY, LLM_MODEL, AZURE_API_KEY = get_conf('API_KEY', 'LLM_MODEL', 'AZURE_API_KEY')
|
||||||
|
DARK_MODE, NUM_CUSTOM_BASIC_BTN = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN')
|
||||||
if is_any_api_key(AZURE_API_KEY):
|
if is_any_api_key(AZURE_API_KEY):
|
||||||
if is_any_api_key(API_KEY): API_KEY = API_KEY + ',' + AZURE_API_KEY
|
if is_any_api_key(API_KEY): API_KEY = API_KEY + ',' + AZURE_API_KEY
|
||||||
else: API_KEY = AZURE_API_KEY
|
else: API_KEY = AZURE_API_KEY
|
||||||
return {'api_key': API_KEY, 'llm_model': LLM_MODEL}
|
customize_fn_overwrite_ = {}
|
||||||
|
for k in range(NUM_CUSTOM_BASIC_BTN):
|
||||||
|
customize_fn_overwrite_.update({
|
||||||
|
"自定义按钮" + str(k+1):{
|
||||||
|
"Title": r"",
|
||||||
|
"Prefix": r"请在自定义菜单中定义提示词前缀.",
|
||||||
|
"Suffix": r"请在自定义菜单中定义提示词后缀",
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return {'api_key': API_KEY, 'llm_model': LLM_MODEL, 'customize_fn_overwrite': customize_fn_overwrite_}
|
||||||
|
|
||||||
def is_openai_api_key(key):
|
def is_openai_api_key(key):
|
||||||
CUSTOM_API_KEY_PATTERN, = get_conf('CUSTOM_API_KEY_PATTERN')
|
CUSTOM_API_KEY_PATTERN, = get_conf('CUSTOM_API_KEY_PATTERN')
|
||||||
|
Loading…
x
Reference in New Issue
Block a user