new
This commit is contained in:
59
request_llms/bridge_qwen_local.py
Normal file
59
request_llms/bridge_qwen_local.py
Normal file
@@ -0,0 +1,59 @@
|
||||
model_name = "Qwen_Local"
|
||||
cmd_to_install = "`pip install -r request_llms/requirements_qwen_local.txt`"
|
||||
|
||||
from toolbox import ProxyNetworkActivate, get_conf
|
||||
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
|
||||
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 Local Model
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
class GetQwenLMHandle(LocalLLMHandle):
|
||||
|
||||
def load_model_info(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
self.model_name = model_name
|
||||
self.cmd_to_install = cmd_to_install
|
||||
|
||||
def load_model_and_tokenizer(self):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
# from modelscope import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
from transformers.generation import GenerationConfig
|
||||
with ProxyNetworkActivate('Download_LLM'):
|
||||
model_id = get_conf('QWEN_LOCAL_MODEL_SELECTION')
|
||||
self._tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, resume_download=True)
|
||||
# use fp16
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True).eval()
|
||||
model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True) # 可指定不同的生成长度、top_p等相关超参
|
||||
self._model = model
|
||||
|
||||
return self._model, self._tokenizer
|
||||
|
||||
def llm_stream_generator(self, **kwargs):
|
||||
# 🏃♂️🏃♂️🏃♂️ 子进程执行
|
||||
def adaptor(kwargs):
|
||||
query = kwargs['query']
|
||||
max_length = kwargs['max_length']
|
||||
top_p = kwargs['top_p']
|
||||
temperature = kwargs['temperature']
|
||||
history = kwargs['history']
|
||||
return query, max_length, top_p, temperature, history
|
||||
|
||||
query, max_length, top_p, temperature, history = adaptor(kwargs)
|
||||
|
||||
for response in self._model.chat_stream(self._tokenizer, query, history=history):
|
||||
yield response
|
||||
|
||||
def try_to_import_special_deps(self, **kwargs):
|
||||
# import something that will raise error if the user does not install requirement_*.txt
|
||||
# 🏃♂️🏃♂️🏃♂️ 主进程执行
|
||||
import importlib
|
||||
importlib.import_module('modelscope')
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
# 🔌💻 GPT-Academic Interface
|
||||
# ------------------------------------------------------------------------------------------------------------------------
|
||||
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(GetQwenLMHandle, model_name)
|
||||
94
request_llms/com_qwenapi.py
Normal file
94
request_llms/com_qwenapi.py
Normal file
@@ -0,0 +1,94 @@
|
||||
from http import HTTPStatus
|
||||
from toolbox import get_conf
|
||||
import threading
|
||||
import logging
|
||||
|
||||
timeout_bot_msg = '[Local Message] Request timeout. Network error.'
|
||||
|
||||
class QwenRequestInstance():
|
||||
def __init__(self):
|
||||
import dashscope
|
||||
self.time_to_yield_event = threading.Event()
|
||||
self.time_to_exit_event = threading.Event()
|
||||
self.result_buf = ""
|
||||
|
||||
def validate_key():
|
||||
DASHSCOPE_API_KEY = get_conf("DASHSCOPE_API_KEY")
|
||||
if DASHSCOPE_API_KEY == '': return False
|
||||
return True
|
||||
|
||||
if not validate_key():
|
||||
raise RuntimeError('请配置 DASHSCOPE_API_KEY')
|
||||
dashscope.api_key = get_conf("DASHSCOPE_API_KEY")
|
||||
|
||||
|
||||
def generate(self, inputs, llm_kwargs, history, system_prompt):
|
||||
# import _thread as thread
|
||||
from dashscope import Generation
|
||||
QWEN_MODEL = {
|
||||
'qwen-turbo': Generation.Models.qwen_turbo,
|
||||
'qwen-plus': Generation.Models.qwen_plus,
|
||||
'qwen-max': Generation.Models.qwen_max,
|
||||
}[llm_kwargs['llm_model']]
|
||||
top_p = llm_kwargs.get('top_p', 0.8)
|
||||
if top_p == 0: top_p += 1e-5
|
||||
if top_p == 1: top_p -= 1e-5
|
||||
|
||||
self.result_buf = ""
|
||||
responses = Generation.call(
|
||||
model=QWEN_MODEL,
|
||||
messages=generate_message_payload(inputs, llm_kwargs, history, system_prompt),
|
||||
top_p=top_p,
|
||||
temperature=llm_kwargs.get('temperature', 1.0),
|
||||
result_format='message',
|
||||
stream=True,
|
||||
incremental_output=True
|
||||
)
|
||||
|
||||
for response in responses:
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
if response.output.choices[0].finish_reason == 'stop':
|
||||
yield self.result_buf
|
||||
break
|
||||
elif response.output.choices[0].finish_reason == 'length':
|
||||
self.result_buf += "[Local Message] 生成长度过长,后续输出被截断"
|
||||
yield self.result_buf
|
||||
break
|
||||
else:
|
||||
self.result_buf += response.output.choices[0].message.content
|
||||
yield self.result_buf
|
||||
else:
|
||||
self.result_buf += f"[Local Message] 请求错误:状态码:{response.status_code},错误码:{response.code},消息:{response.message}"
|
||||
yield self.result_buf
|
||||
break
|
||||
logging.info(f'[raw_input] {inputs}')
|
||||
logging.info(f'[response] {self.result_buf}')
|
||||
return self.result_buf
|
||||
|
||||
|
||||
def generate_message_payload(inputs, llm_kwargs, history, system_prompt):
|
||||
conversation_cnt = len(history) // 2
|
||||
if system_prompt == '': system_prompt = 'Hello!'
|
||||
messages = [{"role": "user", "content": system_prompt}, {"role": "assistant", "content": "Certainly!"}]
|
||||
if conversation_cnt:
|
||||
for index in range(0, 2*conversation_cnt, 2):
|
||||
what_i_have_asked = {}
|
||||
what_i_have_asked["role"] = "user"
|
||||
what_i_have_asked["content"] = history[index]
|
||||
what_gpt_answer = {}
|
||||
what_gpt_answer["role"] = "assistant"
|
||||
what_gpt_answer["content"] = history[index+1]
|
||||
if what_i_have_asked["content"] != "":
|
||||
if what_gpt_answer["content"] == "":
|
||||
continue
|
||||
if what_gpt_answer["content"] == timeout_bot_msg:
|
||||
continue
|
||||
messages.append(what_i_have_asked)
|
||||
messages.append(what_gpt_answer)
|
||||
else:
|
||||
messages[-1]['content'] = what_gpt_answer['content']
|
||||
what_i_ask_now = {}
|
||||
what_i_ask_now["role"] = "user"
|
||||
what_i_ask_now["content"] = inputs
|
||||
messages.append(what_i_ask_now)
|
||||
return messages
|
||||
5
request_llms/requirements_qwen_local.txt
Normal file
5
request_llms/requirements_qwen_local.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
modelscope
|
||||
transformers_stream_generator
|
||||
auto-gptq
|
||||
optimum
|
||||
urllib3<2
|
||||
Reference in New Issue
Block a user