Add ChatGLM4 local deployment support and refactor ChatGLM bridge's path configuration (#2062)

*  feat(request_llms and config.py): ChatGLM4 Deployment

Add support for local deployment of ChatGLM4 model

* 🦄 refactor(bridge_chatglm3.py): ChatGLM3 model path

Added ChatGLM3 path customization (in config.py).
Removed useless quantization model options that have been annotated

---------

Co-authored-by: MarkDeia <17290550+MarkDeia@users.noreply.github.com>
This commit is contained in:
YE Ke 叶柯
2024-12-07 23:43:51 +08:00
committed by GitHub
parent 239894544e
commit 294df6c2d5
6 changed files with 124 additions and 20 deletions

View File

@@ -170,26 +170,32 @@ flowchart TD
```
<details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端请点击展开此处</summary>
<details><summary>如果需要支持清华ChatGLM系列/复旦MOSS/RWKV作为后端请点击展开此处</summary>
<p>
【可选步骤】如果需要支持清华ChatGLM3/复旦MOSS作为后端需要额外安装更多依赖前提条件熟悉Python + 用过Pytorch + 电脑配置够强):
【可选步骤】如果需要支持清华ChatGLM系列/复旦MOSS作为后端需要额外安装更多依赖前提条件熟悉Python + 用过Pytorch + 电脑配置够强):
```sh
# 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1以上默认安装的为torch+cpu版使用cuda需要卸载torch重新安装torch+cuda 2如因本机配置不够无法加载模型可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
python -m pip install -r request_llms/requirements_chatglm.txt
# 【可选步骤II】支持复旦MOSS
# 【可选步骤II】支持清华ChatGLM4 注意此模型至少需要24G显存
python -m pip install -r request_llms/requirements_chatglm4.txt
# 可使用modelscope下载ChatGLM4模型
# pip install modelscope
# modelscope download --model ZhipuAI/glm-4-9b-chat --local_dir ./THUDM/glm-4-9b-chat
# 【可选步骤III】支持复旦MOSS
python -m pip install -r request_llms/requirements_moss.txt
git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # 注意执行此行代码时,必须处于项目根路径
# 【可选步骤III】支持RWKV Runner
# 【可选步骤IV】支持RWKV Runner
参考wikihttps://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner
# 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型目前支持的全部模型如下(jittorllms系列目前仅支持docker方案)
# 【可选步骤V】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型目前支持的全部模型如下(jittorllms系列目前仅支持docker方案)
AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
# 【可选步骤V】支持本地模型INT8,INT4量化这里所指的模型本身不是量化版本目前deepseek-coder支持后面测试后会加入更多模型量化选择
# 【可选步骤VI】支持本地模型INT8,INT4量化这里所指的模型本身不是量化版本目前deepseek-coder支持后面测试后会加入更多模型量化选择
pip install bitsandbyte
# windows用户安装bitsandbytes需要使用下面bitsandbytes-windows-webui
python -m pip install bitsandbytes --prefer-binary --extra-index-url=https://jllllll.github.io/bitsandbytes-windows-webui

View File

@@ -36,7 +36,7 @@ AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-p
"gpt-4o", "gpt-4o-mini", "gpt-4-turbo", "gpt-4-turbo-2024-04-09",
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-4v", "glm-3-turbo",
"gemini-1.5-pro", "chatglm3"
"gemini-1.5-pro", "chatglm3", "chatglm4"
]
EMBEDDING_MODEL = "text-embedding-3-small"
@@ -143,6 +143,9 @@ BAIDU_CLOUD_SECRET_KEY = ''
BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat", "ERNIE-Speed-128K", "ERNIE-Speed-8K", "ERNIE-Lite-8K"
# 如果使用ChatGLM3或ChatGLM4本地模型请把 LLM_MODEL="chatglm3" 或LLM_MODEL="chatglm4",并在此处指定模型路径
CHATGLM_LOCAL_MODEL_PATH = "THUDM/glm-4-9b-chat" # 例如"/home/hmp/ChatGLM3-6B/"
# 如果使用ChatGLM2微调模型请把 LLM_MODEL="chatglmft",并在此处指定模型路径
CHATGLM_PTUNING_CHECKPOINT = "" # 例如"/home/hmp/ChatGLM2-6B/ptuning/output/6b-pt-128-1e-2/checkpoint-100"
@@ -375,6 +378,7 @@ DAAS_SERVER_URLS = [ f"https://niuziniu-biligpt{i}.hf.space/stream" for i in ran
本地大模型示意图
├── "chatglm4"
├── "chatglm3"
├── "chatglm"
├── "chatglm_onnx"

View File

@@ -26,6 +26,9 @@ from .bridge_chatglm import predict as chatglm_ui
from .bridge_chatglm3 import predict_no_ui_long_connection as chatglm3_noui
from .bridge_chatglm3 import predict as chatglm3_ui
from .bridge_chatglm4 import predict_no_ui_long_connection as chatglm4_noui
from .bridge_chatglm4 import predict as chatglm4_ui
from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
from .bridge_qianfan import predict as qianfan_ui
@@ -416,6 +419,7 @@ model_info = {
"token_cnt": get_token_num_gpt4,
},
# ChatGLM本地模型
# 将 chatglm 直接对齐到 chatglm2
"chatglm": {
"fn_with_ui": chatglm_ui,
@@ -441,6 +445,14 @@ model_info = {
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"chatglm4": {
"fn_with_ui": chatglm4_ui,
"fn_without_ui": chatglm4_noui,
"endpoint": None,
"max_token": 8192,
"tokenizer": tokenizer_gpt35,
"token_cnt": get_token_num_gpt35,
},
"qianfan": {
"fn_with_ui": qianfan_ui,
"fn_without_ui": qianfan_noui,

View File

@@ -23,39 +23,33 @@ class GetGLM3Handle(LocalLLMHandle):
import os
import platform
LOCAL_MODEL_QUANT, device = get_conf("LOCAL_MODEL_QUANT", "LOCAL_MODEL_DEVICE")
_model_name_ = "THUDM/chatglm3-6b"
# if LOCAL_MODEL_QUANT == "INT4": # INT4
# _model_name_ = "THUDM/chatglm3-6b-int4"
# elif LOCAL_MODEL_QUANT == "INT8": # INT8
# _model_name_ = "THUDM/chatglm3-6b-int8"
# else:
# _model_name_ = "THUDM/chatglm3-6b" # FP16
LOCAL_MODEL_PATH, LOCAL_MODEL_QUANT, device = get_conf("CHATGLM_LOCAL_MODEL_PATH", "LOCAL_MODEL_QUANT", "LOCAL_MODEL_DEVICE")
model_path = LOCAL_MODEL_PATH
with ProxyNetworkActivate("Download_LLM"):
chatglm_tokenizer = AutoTokenizer.from_pretrained(
_model_name_, trust_remote_code=True
model_path, trust_remote_code=True
)
if device == "cpu":
chatglm_model = AutoModel.from_pretrained(
_model_name_,
model_path,
trust_remote_code=True,
device="cpu",
).float()
elif LOCAL_MODEL_QUANT == "INT4": # INT4
chatglm_model = AutoModel.from_pretrained(
pretrained_model_name_or_path=_model_name_,
pretrained_model_name_or_path=model_path,
trust_remote_code=True,
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
elif LOCAL_MODEL_QUANT == "INT8": # INT8
chatglm_model = AutoModel.from_pretrained(
pretrained_model_name_or_path=_model_name_,
pretrained_model_name_or_path=model_path,
trust_remote_code=True,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
else:
chatglm_model = AutoModel.from_pretrained(
pretrained_model_name_or_path=_model_name_,
pretrained_model_name_or_path=model_path,
trust_remote_code=True,
device="cuda",
)

View File

@@ -0,0 +1,81 @@
model_name = "ChatGLM4"
cmd_to_install = """
`pip install -r request_llms/requirements_chatglm4.txt`
`pip install modelscope`
`modelscope download --model ZhipuAI/glm-4-9b-chat --local_dir ./THUDM/glm-4-9b-chat`
"""
from toolbox import get_conf, ProxyNetworkActivate
from .local_llm_class import LocalLLMHandle, get_local_llm_predict_fns
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 Local Model
# ------------------------------------------------------------------------------------------------------------------------
class GetGLM4Handle(LocalLLMHandle):
def load_model_info(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
self.model_name = model_name
self.cmd_to_install = cmd_to_install
def load_model_and_tokenizer(self):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
import torch
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
import os
LOCAL_MODEL_PATH, device = get_conf("CHATGLM_LOCAL_MODEL_PATH", "LOCAL_MODEL_DEVICE")
model_path = LOCAL_MODEL_PATH
chatglm_tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
chatglm_model = AutoModelForCausalLM.from_pretrained(
model_path,
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=True,
trust_remote_code=True,
device=device
).eval().to(device)
self._model = chatglm_model
self._tokenizer = chatglm_tokenizer
return self._model, self._tokenizer
def llm_stream_generator(self, **kwargs):
# 🏃‍♂️🏃‍♂️🏃‍♂️ 子进程执行
def adaptor(kwargs):
query = kwargs["query"]
max_length = kwargs["max_length"]
top_p = kwargs["top_p"]
temperature = kwargs["temperature"]
history = kwargs["history"]
return query, max_length, top_p, temperature, history
query, max_length, top_p, temperature, history = adaptor(kwargs)
inputs = self._tokenizer.apply_chat_template([{"role": "user", "content": query}],
add_generation_prompt=True,
tokenize=True,
return_tensors="pt",
return_dict=True
).to(self._model.device)
gen_kwargs = {"max_length": max_length, "do_sample": True, "top_k": top_p}
outputs = self._model.generate(**inputs, **gen_kwargs)
outputs = outputs[:, inputs['input_ids'].shape[1]:]
response = self._tokenizer.decode(outputs[0], skip_special_tokens=True)
yield response
def try_to_import_special_deps(self, **kwargs):
# import something that will raise error if the user does not install requirement_*.txt
# 🏃‍♂️🏃‍♂️🏃‍♂️ 主进程执行
import importlib
# importlib.import_module('modelscope')
# ------------------------------------------------------------------------------------------------------------------------
# 🔌💻 GPT-Academic Interface
# ------------------------------------------------------------------------------------------------------------------------
predict_no_ui_long_connection, predict = get_local_llm_predict_fns(
GetGLM4Handle, model_name, history_format="chatglm3"
)

View File

@@ -0,0 +1,7 @@
protobuf
cpm_kernels
torch>=1.10
transformers>=4.44
mdtex2html
sentencepiece
accelerate