添加了qwen1.8b模型
This commit is contained in:
@@ -431,12 +431,12 @@ if "chatglm_onnx" in AVAIL_LLM_MODELS:
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
if "qwen" in AVAIL_LLM_MODELS:
|
||||
if "qwen-1_8B" in AVAIL_LLM_MODELS: # qwen-1.8B
|
||||
try:
|
||||
from .bridge_qwen import predict_no_ui_long_connection as qwen_noui
|
||||
from .bridge_qwen import predict as qwen_ui
|
||||
from .bridge_qwen_1_8B import predict_no_ui_long_connection as qwen_noui
|
||||
from .bridge_qwen_1_8B import predict as qwen_ui
|
||||
model_info.update({
|
||||
"qwen": {
|
||||
"qwen-1_8B": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"endpoint": None,
|
||||
@@ -447,6 +447,24 @@ if "qwen" in AVAIL_LLM_MODELS:
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
|
||||
if "qwen-7B" in AVAIL_LLM_MODELS: # qwen-7B
|
||||
try:
|
||||
from .bridge_qwen_7B import predict_no_ui_long_connection as qwen_noui
|
||||
from .bridge_qwen_7B import predict as qwen_ui
|
||||
model_info.update({
|
||||
"qwen-7B": {
|
||||
"fn_with_ui": qwen_ui,
|
||||
"fn_without_ui": qwen_noui,
|
||||
"endpoint": None,
|
||||
"max_token": 4096,
|
||||
"tokenizer": tokenizer_gpt35,
|
||||
"token_cnt": get_token_num_gpt35,
|
||||
}
|
||||
})
|
||||
except:
|
||||
print(trimmed_format_exc())
|
||||
|
||||
if "chatgpt_website" in AVAIL_LLM_MODELS: # 接入一些逆向工程https://github.com/acheong08/ChatGPT-to-API/
|
||||
try:
|
||||
from .bridge_chatgpt_website import predict_no_ui_long_connection as chatgpt_website_noui
|
||||
|
||||
67
request_llms/bridge_qwen_1_8B.py
Normal file
67
request_llms/bridge_qwen_1_8B.py
Normal file
@@ -0,0 +1,67 @@
|
||||
model_name = "Qwen1_8B"
|
||||
cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`"
|
||||
|
||||
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
import time
|
||||
import threading
|
||||
import importlib
|
||||
from toolbox import update_ui, get_conf, ProxyNetworkActivate
|
||||
from multiprocessing import Process, Pipe
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 Local Model
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
class GetQwenLMHandle(LocalLLMHandle):
|
||||
|
||||
def load_model_info(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
self.model_name = model_name
|
||||
self.cmd_to_install = cmd_to_install
|
||||
|
||||
def load_model_and_tokenizer(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
import os, glob
|
||||
import os
|
||||
import platform
|
||||
from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||
|
||||
with ProxyNetworkActivate('Download_LLM'):
|
||||
model_id = 'Qwen/Qwen-1_8B-Chat'
|
||||
self._tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen-1_8B-Chat', trust_remote_code=True, resume_download=True)
|
||||
# use fp16
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, fp16=True).eval()
|
||||
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
||||
self._model = model
|
||||
|
||||
return self._model, self._tokenizer
|
||||
|
||||
def llm_stream_generator(self, **kwargs):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
def adaptor(kwargs):
|
||||
query = kwargs['query']
|
||||
max_length = kwargs['max_length']
|
||||
top_p = kwargs['top_p']
|
||||
temperature = kwargs['temperature']
|
||||
history = kwargs['history']
|
||||
return query, max_length, top_p, temperature, history
|
||||
|
||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||
|
||||
for response in self._model.chat_stream(self._tokenizer, query, history=history):
|
||||
yield response
|
||||
|
||||
def try_to_import_special_deps(self, **kwargs):
|
||||
# import something that will raise error if the user does not install requirement_*.txt
|
||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||
import importlib
|
||||
importlib.import_module('modelscope')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 GPT-Academic Interface
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name)
|
||||
@@ -1,4 +1,4 @@
|
||||
model_name = "Qwen"
|
||||
model_name = "Qwen-7B"
|
||||
cmd_to_install = "`pip install -r request_llms/requirements_qwen.txt`"
|
||||
|
||||
|
||||
Reference in New Issue
Block a user