diff --git a/request_llms/bridge_deepseekcoder.py b/request_llms/bridge_deepseekcoder.py index 97919407..2242eec7 100644 --- a/request_llms/bridge_deepseekcoder.py +++ b/request_llms/bridge_deepseekcoder.py @@ -19,7 +19,7 @@ def download_huggingface_model(model_name, max_retry, local_dir): # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» Local Model # ------------------------------------------------------------------------------------------------------------------------ -class GetONNXGLMHandle(LocalLLMHandle): +class GetCoderLMHandle(LocalLLMHandle): def load_model_info(self): # πŸƒβ€β™‚οΈπŸƒβ€β™‚οΈπŸƒβ€β™‚οΈ ε­θΏ›η¨‹ζ‰§θ‘Œ @@ -85,4 +85,4 @@ class GetONNXGLMHandle(LocalLLMHandle): # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» GPT-Academic Interface # ------------------------------------------------------------------------------------------------------------------------ -predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name, history_format='chatglm3') \ No newline at end of file +predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetCoderLMHandle, model_name, history_format='chatglm3') \ No newline at end of file diff --git a/request_llms/bridge_llama2.py b/request_llms/bridge_llama2.py index e6da4b75..bfa3c14a 100644 --- a/request_llms/bridge_llama2.py +++ b/request_llms/bridge_llama2.py @@ -12,7 +12,7 @@ from threading import Thread # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» Local Model # ------------------------------------------------------------------------------------------------------------------------ -class GetONNXGLMHandle(LocalLLMHandle): +class GetLlamaHandle(LocalLLMHandle): def load_model_info(self): # πŸƒβ€β™‚οΈπŸƒβ€β™‚οΈπŸƒβ€β™‚οΈ ε­θΏ›η¨‹ζ‰§θ‘Œ @@ -87,4 +87,4 @@ class GetONNXGLMHandle(LocalLLMHandle): # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» GPT-Academic Interface # ------------------------------------------------------------------------------------------------------------------------ -predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name) \ No newline at end of file +predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetLlamaHandle, model_name) \ No newline at end of file diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index afd886bf..85a4d80c 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -15,7 +15,7 @@ from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» Local Model # ------------------------------------------------------------------------------------------------------------------------ -class GetONNXGLMHandle(LocalLLMHandle): +class GetQwenLMHandle(LocalLLMHandle): def load_model_info(self): # πŸƒβ€β™‚οΈπŸƒβ€β™‚οΈπŸƒβ€β™‚οΈ ε­θΏ›η¨‹ζ‰§θ‘Œ @@ -64,4 +64,4 @@ class GetONNXGLMHandle(LocalLLMHandle): # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» GPT-Academic Interface # ------------------------------------------------------------------------------------------------------------------------ -predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetONNXGLMHandle, model_name) \ No newline at end of file +predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name) \ No newline at end of file