修复插件导入时的pytorch加载问题

This commit is contained in:
binary-husky
2023-11-13 00:15:15 +08:00
parent 7e56ace2c0
commit b9b7bf38ab
6 changed files with 45 additions and 24 deletions

View File

@@ -2,7 +2,6 @@ model_name = "ChatGLM"
cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
from transformers import AutoModel, AutoTokenizer
from toolbox import get_conf, ProxyNetworkActivate
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
@@ -23,6 +22,7 @@ class GetGLM2Handle(LocalLLMHandle):
import os, glob
import os
import platform
from transformers import AutoModel, AutoTokenizer
LOCAL_MODEL_QUANT, device = get_conf('LOCAL_MODEL_QUANT', 'LOCAL_MODEL_DEVICE')
if LOCAL_MODEL_QUANT == "INT4": # INT4

View File

@@ -2,7 +2,6 @@ model_name = "ChatGLM3"
cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
from transformers import AutoModel, AutoTokenizer
from toolbox import get_conf, ProxyNetworkActivate
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
@@ -20,6 +19,7 @@ class GetGLM3Handle(LocalLLMHandle):
def load_model_and_tokenizer(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
from transformers import AutoModel, AutoTokenizer
import os, glob
import os
import platform

View File

@@ -1,8 +1,6 @@
from transformers import AutoModel, AutoTokenizer
import time
import threading
import importlib
from toolbox import update_ui, get_conf
from multiprocessing import Process, Pipe