change claude model name to stack-claude
This commit is contained in:
parent
be6b42324d
commit
c43e22bc41
12
config.py
12
config.py
@ -44,9 +44,10 @@ WEB_PORT = -1
|
|||||||
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制
|
||||||
MAX_RETRY = 2
|
MAX_RETRY = 2
|
||||||
|
|
||||||
# OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
|
# 模型选择是
|
||||||
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
||||||
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude"]
|
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
|
||||||
|
# P.S. 其他可用的模型还包括 ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
|
||||||
|
|
||||||
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
# 本地LLM模型如ChatGLM的执行方式 CPU/GPU
|
||||||
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
|
||||||
@ -76,7 +77,6 @@ NEWBING_COOKIES = """
|
|||||||
your bing cookies here
|
your bing cookies here
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# slack-claude bot
|
# Slack Claude bot, 使用教程详情见 request_llm/README.md
|
||||||
# 下面的id怎么填写具体参见https://zhuanlan.zhihu.com/p/627485689
|
SLACK_CLAUDE_BOT_ID = ''
|
||||||
CLAUDE_BOT_ID = ''
|
SLACK_CLAUDE_USER_TOKEN = ''
|
||||||
SLACK_USER_TOKEN = ''
|
|
||||||
|
@ -13,6 +13,31 @@ LLM_MODEL = "chatglm"
|
|||||||
`python main.py`
|
`python main.py`
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Claude-Stack
|
||||||
|
|
||||||
|
- 请参考此教程获取 https://zhuanlan.zhihu.com/p/627485689
|
||||||
|
- 1、SLACK_CLAUDE_BOT_ID
|
||||||
|
- 2、SLACK_CLAUDE_USER_TOKEN
|
||||||
|
|
||||||
|
- 把token加入config.py
|
||||||
|
|
||||||
|
## Newbing
|
||||||
|
|
||||||
|
- 使用cookie editor获取cookie(json)
|
||||||
|
- 把cookie(json)加入config.py (NEWBING_COOKIES)
|
||||||
|
|
||||||
|
## Moss
|
||||||
|
- 使用docker-compose
|
||||||
|
|
||||||
|
## RWKV
|
||||||
|
- 使用docker-compose
|
||||||
|
|
||||||
|
## LLAMA
|
||||||
|
- 使用docker-compose
|
||||||
|
|
||||||
|
## 盘古
|
||||||
|
- 使用docker-compose
|
||||||
|
|
||||||
|
|
||||||
---
|
---
|
||||||
## Text-Generation-UI (TGUI,调试中,暂不可用)
|
## Text-Generation-UI (TGUI,调试中,暂不可用)
|
||||||
|
@ -22,9 +22,6 @@ from .bridge_chatglm import predict as chatglm_ui
|
|||||||
from .bridge_newbing import predict_no_ui_long_connection as newbing_noui
|
from .bridge_newbing import predict_no_ui_long_connection as newbing_noui
|
||||||
from .bridge_newbing import predict as newbing_ui
|
from .bridge_newbing import predict as newbing_ui
|
||||||
|
|
||||||
from .bridge_claude import predict_no_ui_long_connection as claude_noui
|
|
||||||
from .bridge_claude import predict as claude_ui
|
|
||||||
|
|
||||||
# from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
|
# from .bridge_tgui import predict_no_ui_long_connection as tgui_noui
|
||||||
# from .bridge_tgui import predict as tgui_ui
|
# from .bridge_tgui import predict as tgui_ui
|
||||||
|
|
||||||
@ -133,15 +130,7 @@ model_info = {
|
|||||||
"tokenizer": tokenizer_gpt35,
|
"tokenizer": tokenizer_gpt35,
|
||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
# claude
|
|
||||||
"claude": {
|
|
||||||
"fn_with_ui": claude_ui,
|
|
||||||
"fn_without_ui": claude_noui,
|
|
||||||
"endpoint": None,
|
|
||||||
"max_token": 4096,
|
|
||||||
"tokenizer": tokenizer_gpt35,
|
|
||||||
"token_cnt": get_token_num_gpt35,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -198,8 +187,20 @@ if "moss" in AVAIL_LLM_MODELS:
|
|||||||
"token_cnt": get_token_num_gpt35,
|
"token_cnt": get_token_num_gpt35,
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
if "stack-claude" in AVAIL_LLM_MODELS:
|
||||||
|
from .bridge_stackclaude import predict_no_ui_long_connection as claude_noui
|
||||||
|
from .bridge_stackclaude import predict as claude_ui
|
||||||
|
# claude
|
||||||
|
model_info.update({
|
||||||
|
"stack-claude": {
|
||||||
|
"fn_with_ui": claude_ui,
|
||||||
|
"fn_without_ui": claude_noui,
|
||||||
|
"endpoint": None,
|
||||||
|
"max_token": 8192,
|
||||||
|
"tokenizer": tokenizer_gpt35,
|
||||||
|
"token_cnt": get_token_num_gpt35,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
def LLM_CATCH_EXCEPTION(f):
|
def LLM_CATCH_EXCEPTION(f):
|
||||||
|
@ -153,7 +153,7 @@ class NewBingHandle(Process):
|
|||||||
# 进入任务等待状态
|
# 进入任务等待状态
|
||||||
asyncio.run(self.async_run())
|
asyncio.run(self.async_run())
|
||||||
except Exception:
|
except Exception:
|
||||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||||
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
|
self.child.send(f'[Local Message] Newbing失败 {tb_str}.')
|
||||||
self.child.send('[Fail]')
|
self.child.send('[Fail]')
|
||||||
self.child.send('[Finish]')
|
self.child.send('[Finish]')
|
||||||
|
@ -9,8 +9,6 @@ from toolbox import get_conf
|
|||||||
from slack_sdk.errors import SlackApiError
|
from slack_sdk.errors import SlackApiError
|
||||||
from slack_sdk.web.async_client import AsyncWebClient
|
from slack_sdk.web.async_client import AsyncWebClient
|
||||||
import asyncio
|
import asyncio
|
||||||
import sys
|
|
||||||
sys.path.append('..')
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -38,7 +36,7 @@ class SlackClient(AsyncWebClient):
|
|||||||
CHANNEL_ID = None
|
CHANNEL_ID = None
|
||||||
|
|
||||||
async def open_channel(self):
|
async def open_channel(self):
|
||||||
response = await self.conversations_open(users=get_conf('CLAUDE_BOT_ID')[0])
|
response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')[0])
|
||||||
self.CHANNEL_ID = response["channel"]["id"]
|
self.CHANNEL_ID = response["channel"]["id"]
|
||||||
|
|
||||||
async def chat(self, text):
|
async def chat(self, text):
|
||||||
@ -53,7 +51,7 @@ class SlackClient(AsyncWebClient):
|
|||||||
# TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题
|
# TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题
|
||||||
resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1)
|
resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1)
|
||||||
msg = [msg for msg in resp["messages"]
|
msg = [msg for msg in resp["messages"]
|
||||||
if msg.get("user") == get_conf('CLAUDE_BOT_ID')[0]]
|
if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')[0]]
|
||||||
return msg
|
return msg
|
||||||
except (SlackApiError, KeyError) as e:
|
except (SlackApiError, KeyError) as e:
|
||||||
raise RuntimeError(f"获取Slack消息失败。")
|
raise RuntimeError(f"获取Slack消息失败。")
|
||||||
@ -174,8 +172,8 @@ class ClaudeHandle(Process):
|
|||||||
self.proxies_https = proxies['https']
|
self.proxies_https = proxies['https']
|
||||||
|
|
||||||
try:
|
try:
|
||||||
SLACK_USER_TOKEN, = get_conf('SLACK_USER_TOKEN')
|
SLACK_CLAUDE_USER_TOKEN, = get_conf('SLACK_CLAUDE_USER_TOKEN')
|
||||||
self.claude_model = SlackClient(token=SLACK_USER_TOKEN, proxy=self.proxies_https)
|
self.claude_model = SlackClient(token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https)
|
||||||
print('Claude组件初始化成功。')
|
print('Claude组件初始化成功。')
|
||||||
except:
|
except:
|
||||||
self.success = False
|
self.success = False
|
||||||
@ -190,7 +188,7 @@ class ClaudeHandle(Process):
|
|||||||
# 进入任务等待状态
|
# 进入任务等待状态
|
||||||
asyncio.run(self.async_run())
|
asyncio.run(self.async_run())
|
||||||
except Exception:
|
except Exception:
|
||||||
tb_str = '```\n' + trimmed_format_exc() + '```'
|
tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
|
||||||
self.child.send(f'[Local Message] Claude失败 {tb_str}.')
|
self.child.send(f'[Local Message] Claude失败 {tb_str}.')
|
||||||
self.child.send('[Fail]')
|
self.child.send('[Fail]')
|
||||||
self.child.send('[Finish]')
|
self.child.send('[Finish]')
|
Loading…
x
Reference in New Issue
Block a user