combine qwen model family
This commit is contained in:
@@ -1,13 +1,7 @@
|
||||
model_name = "Qwen-7B"
|
||||
model_name = "Qwen"
|
||||
cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`"
|
||||
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
||||
from multiprocessing import Process, Pipe
|
||||
from toolbox import ProxyNetworkActivate, get_conf
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
|
||||
|
||||
@@ -24,16 +18,14 @@ class GetQwenLMHandle(LocalLLMHandle):
|
||||
|
||||
def load_model_and_tokenizer(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
import os, glob
|
||||
import os
|
||||
import platform
|
||||
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||
|
||||
# from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.generation import GenerationConfig
|
||||
with ProxyNetworkActivate('Download_LLM'):
|
||||
model_id = 'qwen/Qwen-7B-Chat' #在这里更改路径,如果你已经下载好了的话,同时,别忘记tokenizer
|
||||
self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-7B-Chat', trust_remote_code=True, resume_download=True)
|
||||
model_id = get_conf('QWEN_MODEL_SELECTION') #在这里更改路径,如果你已经下载好了的话,同时,别忘记tokenizer
|
||||
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
|
||||
# use fp16
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval()
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
|
||||
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
||||
self._model = model
|
||||
|
||||
@@ -1,2 +1,4 @@
|
||||
modelscope
|
||||
transformers_stream_generator
|
||||
transformers_stream_generator
|
||||
auto-gptq
|
||||
optimum
|
||||
Reference in New Issue
Block a user