From c43e22bc4198e358caa12cdf09a06444b85588a5 Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Fri, 19 May 2023 10:46:12 +0800 Subject: [PATCH] change claude model name to stack-claude --- config.py | 12 ++++---- request_llm/README.md | 25 ++++++++++++++++ request_llm/bridge_all.py | 29 ++++++++++--------- request_llm/bridge_newbing.py | 2 +- ...bridge_claude.py => bridge_stackclaude.py} | 12 ++++---- 5 files changed, 52 insertions(+), 28 deletions(-) rename request_llm/{bridge_claude.py => bridge_stackclaude.py} (97%) diff --git a/config.py b/config.py index 99b72d91..baaa4102 100644 --- a/config.py +++ b/config.py @@ -44,9 +44,10 @@ WEB_PORT = -1 # 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制 MAX_RETRY = 2 -# OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d) +# 模型选择是 LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓ -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "claude"] +AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"] +# P.S. 其他可用的模型还包括 ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] # 本地LLM模型如ChatGLM的执行方式 CPU/GPU LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda" @@ -76,7 +77,6 @@ NEWBING_COOKIES = """ your bing cookies here """ -# slack-claude bot -# 下面的id怎么填写具体参见https://zhuanlan.zhihu.com/p/627485689 -CLAUDE_BOT_ID = '' -SLACK_USER_TOKEN = '' \ No newline at end of file +# Slack Claude bot, 使用教程详情见 request_llm/README.md +SLACK_CLAUDE_BOT_ID = '' +SLACK_CLAUDE_USER_TOKEN = '' diff --git a/request_llm/README.md b/request_llm/README.md index 4a912d10..545bc1ff 100644 --- a/request_llm/README.md +++ b/request_llm/README.md @@ -13,6 +13,31 @@ LLM_MODEL = "chatglm" `python main.py` ``` +## Claude-Stack + +- 请参考此教程获取 https://zhuanlan.zhihu.com/p/627485689 + - 1、SLACK_CLAUDE_BOT_ID + - 2、SLACK_CLAUDE_USER_TOKEN + +- 把token加入config.py + +## Newbing + +- 使用cookie editor获取cookie(json) +- 把cookie(json)加入config.py (NEWBING_COOKIES) + +## Moss +- 使用docker-compose + +## RWKV +- 使用docker-compose + +## LLAMA +- 使用docker-compose + +## 盘古 +- 使用docker-compose + --- ## Text-Generation-UI (TGUI,调试中,暂不可用) diff --git a/request_llm/bridge_all.py b/request_llm/bridge_all.py index 55605921..0c468125 100644 --- a/request_llm/bridge_all.py +++ b/request_llm/bridge_all.py @@ -22,9 +22,6 @@ from .bridge_chatglm import predict as chatglm_ui from .bridge_newbing import predict_no_ui_long_connection as newbing_noui from .bridge_newbing import predict as newbing_ui -from .bridge_claude import predict_no_ui_long_connection as claude_noui -from .bridge_claude import predict as claude_ui - # from .bridge_tgui import predict_no_ui_long_connection as tgui_noui # from .bridge_tgui import predict as tgui_ui @@ -133,15 +130,7 @@ model_info = { "tokenizer": tokenizer_gpt35, "token_cnt": get_token_num_gpt35, }, - # claude - "claude": { - "fn_with_ui": claude_ui, - "fn_without_ui": claude_noui, - "endpoint": None, - "max_token": 4096, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, + } @@ -198,8 +187,20 @@ if "moss" in AVAIL_LLM_MODELS: "token_cnt": get_token_num_gpt35, }, }) - - +if "stack-claude" in AVAIL_LLM_MODELS: + from .bridge_stackclaude import predict_no_ui_long_connection as claude_noui + from .bridge_stackclaude import predict as claude_ui + # claude + model_info.update({ + "stack-claude": { + "fn_with_ui": claude_ui, + "fn_without_ui": claude_noui, + "endpoint": None, + "max_token": 8192, + "tokenizer": tokenizer_gpt35, + "token_cnt": get_token_num_gpt35, + } + }) def LLM_CATCH_EXCEPTION(f): diff --git a/request_llm/bridge_newbing.py b/request_llm/bridge_newbing.py index dca74850..2136f01b 100644 --- a/request_llm/bridge_newbing.py +++ b/request_llm/bridge_newbing.py @@ -153,7 +153,7 @@ class NewBingHandle(Process): # 进入任务等待状态 asyncio.run(self.async_run()) except Exception: - tb_str = '```\n' + trimmed_format_exc() + '```' + tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n' self.child.send(f'[Local Message] Newbing失败 {tb_str}.') self.child.send('[Fail]') self.child.send('[Finish]') diff --git a/request_llm/bridge_claude.py b/request_llm/bridge_stackclaude.py similarity index 97% rename from request_llm/bridge_claude.py rename to request_llm/bridge_stackclaude.py index 4e12bc90..65ea8812 100644 --- a/request_llm/bridge_claude.py +++ b/request_llm/bridge_stackclaude.py @@ -9,8 +9,6 @@ from toolbox import get_conf from slack_sdk.errors import SlackApiError from slack_sdk.web.async_client import AsyncWebClient import asyncio -import sys -sys.path.append('..') """ @@ -38,7 +36,7 @@ class SlackClient(AsyncWebClient): CHANNEL_ID = None async def open_channel(self): - response = await self.conversations_open(users=get_conf('CLAUDE_BOT_ID')[0]) + response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID')[0]) self.CHANNEL_ID = response["channel"]["id"] async def chat(self, text): @@ -53,7 +51,7 @@ class SlackClient(AsyncWebClient): # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题 resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1) msg = [msg for msg in resp["messages"] - if msg.get("user") == get_conf('CLAUDE_BOT_ID')[0]] + if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')[0]] return msg except (SlackApiError, KeyError) as e: raise RuntimeError(f"获取Slack消息失败。") @@ -174,8 +172,8 @@ class ClaudeHandle(Process): self.proxies_https = proxies['https'] try: - SLACK_USER_TOKEN, = get_conf('SLACK_USER_TOKEN') - self.claude_model = SlackClient(token=SLACK_USER_TOKEN, proxy=self.proxies_https) + SLACK_CLAUDE_USER_TOKEN, = get_conf('SLACK_CLAUDE_USER_TOKEN') + self.claude_model = SlackClient(token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https) print('Claude组件初始化成功。') except: self.success = False @@ -190,7 +188,7 @@ class ClaudeHandle(Process): # 进入任务等待状态 asyncio.run(self.async_run()) except Exception: - tb_str = '```\n' + trimmed_format_exc() + '```' + tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n' self.child.send(f'[Local Message] Claude失败 {tb_str}.') self.child.send('[Fail]') self.child.send('[Finish]')