diff --git a/README.md b/README.md index a00b4da5..937554c6 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ > [!IMPORTANT] > `frontier开发分支`最新动态(2024.12.9): 更新对话时间线功能,优化xelatex论文翻译 > `wiki文档`最新动态(2024.12.5): 更新ollama接入指南 +> `master主分支`最新动态(2024.12.19): 更新3.91版本,更新release页一键安装脚本 > > 2024.10.10: 突发停电,紧急恢复了提供[whl包](https://drive.google.com/file/d/19U_hsLoMrjOlQSzYS3pzWX9fTzyusArP/view?usp=sharing)的文件服务器 > 2024.10.8: 版本3.90加入对llama-index的初步支持,版本3.80加入插件二级菜单功能(详见wiki) diff --git a/config.py b/config.py index 0ddf715d..913182c9 100644 --- a/config.py +++ b/config.py @@ -7,10 +7,10 @@ Configuration reading priority: environment variable > config_private.py > config.py """ -# [step 1-1]>> (接入GPT等模型) API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项 +# [step 1-1]>> ( 接入GPT等模型 ) API_KEY = "sk-123456789xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx123456789"。极少数情况下,还需要填写组织(格式如org-123456789abcdefghijklmno的),请向下翻,找 API_ORG 设置项 API_KEY = "在此处填写APIKEY" # 可同时填写多个API-KEY,用英文逗号分割,例如API_KEY = "sk-openaikey1,sk-openaikey2,fkxxxx-api2dkey3,azure-apikey4" -# [step 1-2]>> (推荐使用中文原生大模型!) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/ +# [step 1-2]>> ( 接入通义 qwen-max ) 接入通义千问在线大模型,api-key获取地址 https://dashscope.console.aliyun.com/ DASHSCOPE_API_KEY = "" # 阿里灵积云API_KEY # [step 2]>> 改为True应用代理,如果直接在海外服务器部署,此处不修改;如果使用本地或无地域限制的大模型时,此处也不需要修改 diff --git a/request_llms/bridge_all.py b/request_llms/bridge_all.py index 1aef7089..20d7798b 100644 --- a/request_llms/bridge_all.py +++ b/request_llms/bridge_all.py @@ -274,6 +274,7 @@ model_info = { "openai_disable_system_prompt": True, "openai_disable_stream": True, }, + "o1-mini": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, @@ -285,6 +286,28 @@ model_info = { "openai_disable_stream": True, }, + "o1-2024-12-17": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 200000, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + "openai_disable_system_prompt": True, + "openai_disable_stream": True, + }, + + "o1": { + "fn_with_ui": chatgpt_ui, + "fn_without_ui": chatgpt_noui, + "endpoint": openai_endpoint, + "max_token": 200000, + "tokenizer": tokenizer_gpt4, + "token_cnt": get_token_num_gpt4, + "openai_disable_system_prompt": True, + "openai_disable_stream": True, + }, + "gpt-4-turbo": { "fn_with_ui": chatgpt_ui, "fn_without_ui": chatgpt_noui, diff --git a/shared_utils/key_pattern_manager.py b/shared_utils/key_pattern_manager.py index d0d7f6d4..e0d101d1 100644 --- a/shared_utils/key_pattern_manager.py +++ b/shared_utils/key_pattern_manager.py @@ -79,7 +79,7 @@ def select_api_key(keys, llm_model): key_list = keys.split(',') if llm_model.startswith('gpt-') or llm_model.startswith('chatgpt-') or \ - llm_model.startswith('one-api-') or llm_model.startswith('o1-'): + llm_model.startswith('one-api-') or llm_model == 'o1' or llm_model.startswith('o1-'): for k in key_list: if is_openai_api_key(k): avail_key_list.append(k)