logging sys to loguru: stage 1 complete
This commit is contained in:
@@ -9,6 +9,7 @@
|
||||
2. predict_no_ui_long_connection(...)
|
||||
"""
|
||||
import tiktoken, copy, re
|
||||
from loguru import logger
|
||||
from functools import lru_cache
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask, read_one_api_model_name
|
||||
@@ -51,9 +52,9 @@ class LazyloadTiktoken(object):
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=128)
|
||||
def get_encoder(model):
|
||||
print('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数')
|
||||
logger.info('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数')
|
||||
tmp = tiktoken.encoding_for_model(model)
|
||||
print('加载tokenizer完毕')
|
||||
logger.info('加载tokenizer完毕')
|
||||
return tmp
|
||||
|
||||
def encode(self, *args, **kwargs):
|
||||
@@ -83,7 +84,7 @@ try:
|
||||
API_URL = get_conf("API_URL")
|
||||
if API_URL != "https://api.openai.com/v1/chat/completions":
|
||||
openai_endpoint = API_URL
|
||||
print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
|
||||
logger.warning("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
|
||||
except:
|
||||
pass
|
||||
# 新版配置
|
||||
@@ -662,7 +663,7 @@ if "newbing" in AVAIL_LLM_MODELS: # same with newbing-free
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free
|
||||
try:
|
||||
from .bridge_chatglmft import predict_no_ui_long_connection as chatglmft_noui
|
||||
@@ -678,7 +679,7 @@ if "chatglmft" in AVAIL_LLM_MODELS: # same with newbing-free
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 上海AI-LAB书生大模型 -=-=-=-=-=-=-
|
||||
if "internlm" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
@@ -695,7 +696,7 @@ if "internlm" in AVAIL_LLM_MODELS:
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
if "chatglm_onnx" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
from .bridge_chatglmonnx import predict_no_ui_long_connection as chatglm_onnx_noui
|
||||
@@ -711,7 +712,7 @@ if "chatglm_onnx" in AVAIL_LLM_MODELS:
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 通义-本地模型 -=-=-=-=-=-=-
|
||||
if "qwen-local" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
@@ -729,7 +730,7 @@ if "qwen-local" in AVAIL_LLM_MODELS:
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 通义-在线模型 -=-=-=-=-=-=-
|
||||
if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-max" in AVAIL_LLM_MODELS: # zhipuai
|
||||
try:
|
||||
@@ -765,7 +766,7 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 零一万物模型 -=-=-=-=-=-=-
|
||||
yi_models = ["yi-34b-chat-0205","yi-34b-chat-200k","yi-large","yi-medium","yi-spark","yi-large-turbo","yi-large-preview"]
|
||||
if any(item in yi_models for item in AVAIL_LLM_MODELS):
|
||||
@@ -845,7 +846,7 @@ if any(item in yi_models for item in AVAIL_LLM_MODELS):
|
||||
},
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 讯飞星火认知大模型 -=-=-=-=-=-=-
|
||||
if "spark" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
@@ -863,7 +864,7 @@ if "spark" in AVAIL_LLM_MODELS:
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
||||
try:
|
||||
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
||||
@@ -880,7 +881,7 @@ if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
if any(x in AVAIL_LLM_MODELS for x in ("sparkv3", "sparkv3.5", "sparkv4")): # 讯飞星火认知大模型
|
||||
try:
|
||||
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
||||
@@ -915,7 +916,7 @@ if any(x in AVAIL_LLM_MODELS for x in ("sparkv3", "sparkv3.5", "sparkv4")): #
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
if "llama2" in AVAIL_LLM_MODELS: # llama2
|
||||
try:
|
||||
from .bridge_llama2 import predict_no_ui_long_connection as llama2_noui
|
||||
@@ -931,7 +932,7 @@ if "llama2" in AVAIL_LLM_MODELS: # llama2
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 智谱 -=-=-=-=-=-=-
|
||||
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容配置
|
||||
try:
|
||||
@@ -946,7 +947,7 @@ if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容
|
||||
},
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 幻方-深度求索大模型 -=-=-=-=-=-=-
|
||||
if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||
try:
|
||||
@@ -963,7 +964,7 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- 幻方-深度求索大模型在线API -=-=-=-=-=-=-
|
||||
if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS:
|
||||
try:
|
||||
@@ -991,7 +992,7 @@ if "deepseek-chat" in AVAIL_LLM_MODELS or "deepseek-coder" in AVAIL_LLM_MODELS:
|
||||
},
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
logger.error(trimmed_format_exc())
|
||||
# -=-=-=-=-=-=- one-api 对齐支持 -=-=-=-=-=-=-
|
||||
for model in [m for m in AVAIL_LLM_MODELS if m.startswith("one-api-")]:
|
||||
# 为了更灵活地接入one-api多模型管理界面,设计了此接口,例子:AVAIL_LLM_MODELS = ["one-api-mixtral-8x7b(max_token=6666)"]
|
||||
|
||||
Reference in New Issue
Block a user