修复插件导入时的pytorch加载问题
This commit is contained in:
@@ -2,7 +2,6 @@ model_name = "ChatGLM"
|
||||
cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
|
||||
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
from toolbox import get_conf, ProxyNetworkActivate
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
|
||||
@@ -23,6 +22,7 @@ class GetGLM2Handle(LocalLLMHandle):
|
||||
import os, glob
|
||||
import os
|
||||
import platform
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
LOCAL_MODEL_QUANT, device = get_conf('LOCAL_MODEL_QUANT', 'LOCAL_MODEL_DEVICE')
|
||||
|
||||
if LOCAL_MODEL_QUANT == "INT4": # INT4
|
||||
|
||||
@@ -2,7 +2,6 @@ model_name = "ChatGLM3"
|
||||
cmd_to_install = "`pip install -r request_llms/requirements_chatglm.txt`"
|
||||
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
from toolbox import get_conf, ProxyNetworkActivate
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
|
||||
@@ -20,6 +19,7 @@ class GetGLM3Handle(LocalLLMHandle):
|
||||
|
||||
def load_model_and_tokenizer(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
import os, glob
|
||||
import os
|
||||
import platform
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
from toolbox import update_ui, get_conf
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
|
||||
Reference in New Issue
Block a user