在rk3588上部署InternVL2-1B
准备
首先要在hf上下载InternVL2-1B的模型传送门(镜像)
bash
git clone https://hf-mirror.com/OpenGVLab/InternVL2-1B
还要安装rknn转换的必要软件,这里不多赘述
转换
将vision_model和mlp转为onnx的代码如下:
python
import numpy as np
import os
import torch
import torch.nn as nn
from transformers import AutoTokenizer, AutoModel
import torch.nn.functional as F
from PIL import Image
import torchvision.transforms as T
from torchvision.transforms import InterpolationMode
from transformers.modeling_utils import PreTrainedModel
IMAGENET_MEAN = (0.485, 0.456, 0.406)
IMAGENET_STD = (0.229, 0.224, 0.225)
def build_transform(input_size):
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
transform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
T.ToTensor(),
T.Normalize(mean=MEAN, std=STD)
])
return transform
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
best_ratio_diff = float('inf')
best_ratio = (1, 1)
area = width * height
for ratio in target_ratios:
target_aspect_ratio = ratio[0] / ratio[1]
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
if ratio_diff < best_ratio_diff:
best_ratio_diff = ratio_diff
best_ratio = ratio
elif ratio_diff == best_ratio_diff:
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
best_ratio = ratio
return best_ratio
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False):
orig_width, orig_height = image.size
aspect_ratio = orig_width / orig_height
# calculate the existing image aspect ratio
target_ratios = set(
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
i * j <= max_num and i * j >= min_num)
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
# find the closest aspect ratio to the target
target_aspect_ratio = find_closest_aspect_ratio(
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
# calculate the target width and height
target_width = image_size * target_aspect_ratio[0]
target_height = image_size * target_aspect_ratio[1]
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
# resize the image
resized_img = image.resize((target_width, target_height))
processed_images = []
for i in range(blocks):
box = (
(i % (target_width // image_size)) * image_size,
(i // (target_width // image_size)) * image_size,
((i % (target_width // image_size)) + 1) * image_size,
((i // (target_width // image_size)) + 1) * image_size
)
# split the image
split_img = resized_img.crop(box)
processed_images.append(split_img)
assert len(processed_images) == blocks
if use_thumbnail and len(processed_images) != 1:
thumbnail_img = image.resize((image_size, image_size))
processed_images.append(thumbnail_img)
return processed_images
def load_image(image_file, input_size=448, max_num=12):
image = Image.open(image_file).convert('RGB')
transform = build_transform(input_size=input_size)
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num)
pixel_values = [transform(image) for image in images]
pixel_values = torch.stack(pixel_values)
return pixel_values
# 加载本地模型
path = './InternVL2-1B'
save_path = 'onnx/InternVL2-1B_vision.onnx'
image_file = 'InternVL2-1B/examples/image1.jpg'
def export_vision_InternVL(model_path: str, save_path: str):
"""
Export the vision encoder and projector of Janus-Pro-1B model to ONNX format
"""
# 设置默认数据类型为 float32
torch.set_default_dtype(torch.float32)
vl_gpt = AutoModel.from_pretrained(model_path,torch_dtype = torch.float32,trust_remote_code=True)
# Move model to CPU and convert to float32
vl_gpt = vl_gpt.cpu().eval().float() # 确保模型是 float32
# Create a wrapper class for vision encoder + projector
class VisionWrapper(nn.Sequential):
def __init__(self, model):
super().__init__()
self.vision_town = model.vision_model
self.mlp = model.mlp1
self.vision_mlp = model.extract_feature
def forward(self, pixel_values):
projected_features = self.vision_mlp(pixel_values)
return projected_features
# Create wrapper instance and convert to float32
vision_wrapper = VisionWrapper(vl_gpt)
vision_wrapper.eval().float() # 确保包装器也是 float32
# Create dummy input with float32
batch_size = 1
num_channels = 3
height = 448 # InternVL2 default image size
width = 448
dummy_input = load_image(image_file=image_file, max_num=12).to(torch.float32).cpu()
# dummy_input = torch.randn(batch_size, num_channels, height, width, dtype=torch.float32)
# Export to ONNX with higher opset version
torch.onnx.export(
vision_wrapper,
dummy_input,
save_path,
export_params=True,
opset_version=16, # 使用高版本 opset 以支持 scaled_dot_product_attention
do_constant_folding=True,
input_names=['pixel_values'],
output_names=['projected_features'],
dynamic_axes={
'pixel_values': {0: 'batch_size'},
'projected_features': {0: 'batch_size'}
},
# 添加额外的配置
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
training=torch.onnx.TrainingMode.EVAL,
verbose=False
)
print(f"Successfully exported vision components to {save_path}")
# Verify the exported model
import onnxruntime
# Create inference session
ort_session = onnxruntime.InferenceSession(save_path)
# Run inference with dummy input
ort_inputs = {
'pixel_values': dummy_input.numpy()
}
ort_outputs = ort_session.run(None, ort_inputs)
# Compare with PyTorch output
torch_output = vision_wrapper(dummy_input)
# Check numerical accuracy with更宽松的容忍度
import numpy as np
np.testing.assert_allclose(
torch_output.detach().numpy(),
ort_outputs[0],
rtol=1e-1, # 放宽相对误差容忍度
atol=1e-2 # 放宽绝对误差容忍度
)
print("ONNX model verification successful!")
# 打印一些统计信息
torch_output_np = torch_output.detach().numpy()
onnx_output_np = ort_outputs[0]
abs_diff = np.abs(torch_output_np - onnx_output_np)
rel_diff = np.abs((torch_output_np - onnx_output_np) / (torch_output_np + 1e-7))
print(f"\nValidation Statistics:")
print(f"Max absolute difference: {np.max(abs_diff):.6f}")
print(f"Mean absolute difference: {np.mean(abs_diff):.6f}")
print(f"Max relative difference: {np.max(rel_diff):.6f}")
print(f"Mean relative difference: {np.mean(rel_diff):.6f}")
if __name__ == "__main__":
os.makedirs('onnx', exist_ok=True)
try:
import onnx
try:
onnx_version = onnx.__version__
except AttributeError:
try:
onnx_version = onnx.version.version
except AttributeError:
onnx_version = "Unknown"
print(f"ONNX version: {onnx_version}")
except ImportError:
print("ONNX not installed")
import onnxruntime
print(f"ONNX Runtime version: {onnxruntime.__version__}")
export_vision_InternVL(path, save_path)
将onnx转为rknn的代码如下:
python
from rknn.api import RKNN
import numpy as np
import os
model_path = "./onnx/InternVL2-1B_vision.onnx"
target_platform = "rk3588"
rknn = RKNN(verbose=False)
rknn.config(
target_platform=target_platform,
mean_values=[[0.5 * 255, 0.5 * 255, 0.5 * 255]],
std_values=[[0.5 * 255, 0.5 * 255, 0.5 * 255]],
)
rknn.load_onnx(model_path, inputs=['pixel_values'], input_size_list=[[1,3,448,448]])
rknn.build(do_quantization=False, dataset=None)
os.makedirs("rknn", exist_ok=True)
rknn.export_rknn("./rknn/" + model_path.split("/")[-1].split(".")[0] + "_{}.rknn".format(target_platform))
语言模型参考rkllm的Qwen模型转换
如有疑问或需求,请联系QQ:1198807618,欢迎各位批评指正