From 2b917edf26502b2e3c1e81794093f18839cbc42e Mon Sep 17 00:00:00 2001 From: qingxu fu <505030475@qq.com> Date: Sat, 11 Nov 2023 17:58:17 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=9C=AC=E5=9C=B0=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B=E5=9C=A8windows=E4=B8=8A=E7=9A=84=E5=85=BC=E5=AE=B9?= =?UTF-8?q?=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- request_llms/bridge_chatglm.py | 3 +-- request_llms/bridge_chatglm3.py | 3 +-- request_llms/bridge_chatglmonnx.py | 3 +-- request_llms/bridge_internlm.py | 3 +-- request_llms/bridge_llama2.py | 3 +-- request_llms/bridge_qwen.py | 3 +-- request_llms/local_llm_class.py | 7 ++++--- 7 files changed, 10 insertions(+), 15 deletions(-) diff --git a/request_llms/bridge_chatglm.py b/request_llms/bridge_chatglm.py index 16e1d8fc..83c50da1 100644 --- a/request_llms/bridge_chatglm.py +++ b/request_llms/bridge_chatglm.py @@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetGLM2Handle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatglm3.py b/request_llms/bridge_chatglm3.py index 461c3064..44656608 100644 --- a/request_llms/bridge_chatglm3.py +++ b/request_llms/bridge_chatglm3.py @@ -4,14 +4,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModel, AutoTokenizer from toolbox import get_conf, ProxyNetworkActivate -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetGLM3Handle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_chatglmonnx.py b/request_llms/bridge_chatglmonnx.py index 312c6846..4b905718 100644 --- a/request_llms/bridge_chatglmonnx.py +++ b/request_llms/bridge_chatglmonnx.py @@ -8,7 +8,7 @@ import threading import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from .chatglmoonx import ChatGLMModel, chat_template @@ -17,7 +17,6 @@ from .chatglmoonx import ChatGLMModel, chat_template # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_internlm.py b/request_llms/bridge_internlm.py index 073c193a..b831dc59 100644 --- a/request_llms/bridge_internlm.py +++ b/request_llms/bridge_internlm.py @@ -7,7 +7,7 @@ import threading import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ @@ -34,7 +34,6 @@ def combine_history(prompt, hist): # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetInternlmHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_llama2.py b/request_llms/bridge_llama2.py index bc8ef7eb..e6da4b75 100644 --- a/request_llms/bridge_llama2.py +++ b/request_llms/bridge_llama2.py @@ -5,14 +5,13 @@ cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`" from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer from toolbox import update_ui, get_conf, ProxyNetworkActivate from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns from threading import Thread # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/bridge_qwen.py b/request_llms/bridge_qwen.py index 62682cfa..29168f6d 100644 --- a/request_llms/bridge_qwen.py +++ b/request_llms/bridge_qwen.py @@ -8,14 +8,13 @@ import threading import importlib from toolbox import update_ui, get_conf from multiprocessing import Process, Pipe -from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns, SingletonLocalLLM +from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns # ------------------------------------------------------------------------------------------------------------------------ # πŸ”ŒπŸ’» Local Model # ------------------------------------------------------------------------------------------------------------------------ -@SingletonLocalLLM class GetONNXGLMHandle(LocalLLMHandle): def load_model_info(self): diff --git a/request_llms/local_llm_class.py b/request_llms/local_llm_class.py index b6ce801e..fe6be961 100644 --- a/request_llms/local_llm_class.py +++ b/request_llms/local_llm_class.py @@ -76,7 +76,6 @@ class LocalLLMHandle(Process): self.parent_state, self.child_state = create_queue_pipe() # allow redirect_stdout self.std_tag = "[Subprocess Message] " - self.child.write = lambda x: self.child.send(self.std_tag + x) self.running = True self._model = None self._tokenizer = None @@ -137,6 +136,8 @@ class LocalLLMHandle(Process): def run(self): # πŸƒβ€β™‚οΈπŸƒβ€β™‚οΈπŸƒβ€β™‚οΈ run in child process # η¬¬δΈ€ζ¬‘θΏθ‘ŒοΌŒεŠ θ½½ε‚ζ•° + self.child.flush = lambda *args: None + self.child.write = lambda x: self.child.send(self.std_tag + x) reset_tqdm_output() self.set_state("`ε°θ―•εŠ θ½½ζ¨‘εž‹`") try: @@ -220,7 +221,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla """ refer to request_llms/bridge_all.py """ - _llm_handle = LLMSingletonClass() + _llm_handle = SingletonLocalLLM(LLMSingletonClass)() if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + _llm_handle.get_state() if not _llm_handle.running: @@ -268,7 +269,7 @@ def get_local_llm_predict_fns(LLMSingletonClass, model_name, history_format='cla """ chatbot.append((inputs, "")) - _llm_handle = LLMSingletonClass() + _llm_handle = SingletonLocalLLM(LLMSingletonClass)() chatbot[-1] = (inputs, load_message + "\n\n" + _llm_handle.get_state()) yield from update_ui(chatbot=chatbot, history=[]) if not _llm_handle.running: