
10.5 综合实战:基于RoboBrain大模型的人形机器人通用智能感知系统
本节介绍的实例RoboBrain是FlagOpen(北京智源人工智能研究院(BAAI))发布的一个用于机器人操作的通用"智能大脑"开源大模型,它构建了一个基于多模态大模型(MLLM)的统一智能系统,专注于让机器人理解视觉、语言和动作之间的关系,从而实现复杂的操控任务(如规划步骤、感知可交互对象的属性、预测动作轨迹等)。该项目特别面向具身智能系统和机器人操控场景,可以为未来的人形机器人提供从感知环境到生成操作计划的智能核心能力,推进人形机器人在现实世界中的泛化操作能力。
实例10-1 :基于RoboBrain 大模型的人形机器人通用智能感知系统(源码路径:codes\10\Point01.py )
10.5.1 构建模型
"model"目录是本人形机器人多模态大模型的核心实现区域,集中封装了从视觉感知到语言理解再到决策生成的完整模型体系。该目录通过模块化设计,将视觉编码、视觉重采样、多模态对齐、语言建模与生成能力有机融合:一方面对来自相机等传感器的高维视觉信息进行特征提取、压缩与结构化表达,另一方面将这些视觉语义无缝注入大语言模型,实现"看---想---说---推理"的统一建模。整体架构既兼容主流预训练大模型,又针对机器人场景中的感知密集、时序复杂、语义对齐要求高等特点进行了定制,是支撑人形机器人具备环境理解、任务推理与智能交互能力的模型中枢。
(1)文件model/llava_utils/convert_lora_weights_to_hf.py的作用是将项目中的LoRA微调权重(safetensors格式)转换为Hugging Face(HF)格式,方便在HF Transformers框架下使用。主要功能包括:
- 加载原始权重(*.safetensors 文件)到内存。
- 修改权重的键名,将项目内部命名映射到 HF 的命名规范(通过 KEYS_TO_MODIFY_MAPPING)。
- 保存转换后的权重为新的 safetensors 文件,并复制 LoRA 配置文件。
- 可以通过命令行指定输入模型目录和输出目录。
python
# 定义内部权重键名到 HF 命名的映射
KEYS_TO_MODIFY_MAPPING = {
"base_model.model.": "",
"model.vision_tower.": "",
"model.mm_projector": "multi_modal_projector",
"model": "model.model",
"vision_model.model": "vision_model",
"lm_head": "language_model.lm_head",
"model.model": "language_model.model",
"multi_modal_projector.0": "multi_modal_projector.linear_1",
"multi_modal_projector.2": "multi_modal_projector.linear_2",
"language_model.model.image_newline": "image_newline",
}
def load_original_state_dict(model_dir):
directory_path = model_dir
original_state_dict = {}
for path in glob.glob(f"{directory_path}/*"):
if path.endswith(".safetensors"):
with safe_open(path, framework="pt", device="cpu") as f:
for key in f.keys():
original_state_dict[key] = f.get_tensor(key)
return original_state_dict
def convert_state_dict_to_hf(state_dict):
new_state_dict = {}
for key, value in state_dict.items():
if key.endswith(".inv_freq"):
continue
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
key = key.replace(key_to_modify, new_key)
key = "base_model.model." + key
new_state_dict[key] = value.to(torch.float16)
return new_state_dict
def convert_lora_to_hf(model_dir, dump_path):
# 加载原始权重字典
print("加载原始权重字典 ...")
state_dict = load_original_state_dict(model_dir)
# 将键名转换为 Hugging Face 格式
print("将键名转换为 Hugging Face 格式 ...")
state_dict = convert_state_dict_to_hf(state_dict)
# 保存转换后的权重字典
print("保存转换后的权重字典 ...")
os.makedirs(dump_path, exist_ok=True)
save_file(state_dict, f"{dump_path}/adapter_model.safetensors")
# 复制 LoRA 配置文件
shutil.copy2(f"{model_dir}/adapter_config.json", f"{dump_path}/adapter_config.json")
print(f"转换后的 LoRA 权重已保存到 {dump_path}/adapter_model.safetensors")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", type=str, required=True, help="输入 PyTorch 模型目录路径"
)
parser.add_argument(
"--dump_path", type=str, required=True, help="输出 PyTorch 模型目录路径"
)
args = parser.parse_args()
convert_lora_to_hf(args.model_dir, args.dump_path)
(2)文件model/llava_utils/utils.py是本项目的通用工具模块,提供了多模态视频帧处理、分布式训练日志打印与管理、PyTorch模型初始化优化、文本内容审核接口 等功能,支持从视频文件中提取关键帧用于模型输入、在多 GPU 环境下规范输出日志、加速模型构建,并提供工具函数辅助多模态大模型的训练和推理。
python
def process_video_with_decord(video_file, data_args):
vr = VideoReader(video_file, ctx=cpu(0), num_threads=1)
total_frame_num = len(vr)
video_time = total_frame_num / vr.get_avg_fps()
avg_fps = round(vr.get_avg_fps() / data_args.video_fps)
frame_idx = [i for i in range(0, total_frame_num, avg_fps)]
frame_time = [i/avg_fps for i in frame_idx]
if data_args.frames_upbound > 0:
if len(frame_idx) > data_args.frames_upbound or data_args.force_sample:
uniform_sampled_frames = np.linspace(0, total_frame_num - 1, data_args.frames_upbound, dtype=int)
frame_idx = uniform_sampled_frames.tolist()
frame_time = [i/vr.get_avg_fps() for i in frame_idx]
video = vr.get_batch(frame_idx).asnumpy()
frame_time = ",".join([f"{i:.2f}s" for i in frame_time])
num_frames_to_sample = num_frames = len(frame_idx)
# https://github.com/dmlc/decord/issues/208
vr.seek(0)
return video, video_time, frame_time, num_frames_to_sample
def process_video_with_pyav(video_file, data_args):
container = av.open(video_file)
# !!! 唯一不同:使用自动线程模式
container.streams.video[0].thread_type = "AUTO"
video_frames = []
for packet in container.demux():
if packet.stream.type == 'video':
for frame in packet.decode():
video_frames.append(frame)
total_frame_num = len(video_frames)
video_time = video_frames[-1].time
avg_fps = round(total_frame_num / video_time / data_args.video_fps)
frame_idx = [i for i in range(0, total_frame_num, avg_fps)]
if data_args.frames_upbound > 0:
if len(frame_idx) > data_args.frames_upbound:
uniform_sampled_frames = np.linspace(0, total_frame_num - 1, data_args.frames_upbound, dtype=int)
frame_idx = uniform_sampled_frames.tolist()
frames = [video_frames[i] for i in frame_idx]
return np.stack([x.to_ndarray(format="rgb24") for x in frames])
def rank0_print(*args):
if dist.is_initialized():
if dist.get_rank() == 0:
print(f"Rank {dist.get_rank()}: ", *args)
else:
print(*args)
def rank_print(*args):
if dist.is_initialized():
print(f"Rank {dist.get_rank()}: ", *args)
else:
print(*args)
def build_logger(logger_name, logger_filename):
global handler
formatter = logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# 设置根日志处理器格式
if not logging.getLogger().handlers:
logging.basicConfig(level=logging.INFO)
logging.getLogger().handlers[0].setFormatter(formatter)
# 将 stdout 和 stderr 重定向到日志
stdout_logger = logging.getLogger("stdout")
stdout_logger.setLevel(logging.INFO)
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger("stderr")
stderr_logger.setLevel(logging.ERROR)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
# 获取 logger
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
# 为所有 logger 添加文件处理器
if handler is None:
os.makedirs(LOGDIR, exist_ok=True)
filename = os.path.join(LOGDIR, logger_filename)
handler = logging.handlers.TimedRotatingFileHandler(filename, when="D", utc=True)
handler.setFormatter(formatter)
for name, item in logging.root.manager.loggerDict.items():
if isinstance(item, logging.Logger):
item.addHandler(handler)
return logger
class StreamToLogger(object):
"""
类文件流对象,将写入重定向到 logger 实例
"""
def __init__(self, logger, log_level=logging.INFO):
self.terminal = sys.stdout
self.logger = logger
self.log_level = log_level
self.linebuf = ""
def __getattr__(self, attr):
return getattr(self.terminal, attr)
def write(self, buf):
temp_linebuf = self.linebuf + buf
self.linebuf = ""
for line in temp_linebuf.splitlines(True):
# 来自 io.TextIOWrapper 文档:
# 输出时,如果 newline 为 None,写入的 '\n' 会被转换为系统默认换行符
# 默认 sys.stdout.write() 期望 '\n' 换行符
# 这样仍然是跨平台的
if line[-1] == "\n":
self.logger.log(self.log_level, line.rstrip())
else:
self.linebuf += line
def flush(self):
if self.linebuf != "":
self.logger.log(self.log_level, self.linebuf.rstrip())
self.linebuf = ""
def disable_torch_init():
"""
禁用冗余的 torch 默认初始化以加速模型创建
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def violates_moderation(text):
"""
检查文本是否违反 OpenAI 内容审核 API
"""
url = "https://api.openai.com/v1/moderations"
headers = {"Content-Type": "application/json", "Authorization": "Bearer " + os.environ["OPENAI_API_KEY"]}
text = text.replace("\n", "")
data = "{" + '"input": ' + f'"{text}"' + "}"
data = data.encode("utf-8")
try:
ret = requests.post(url, headers=headers, data=data, timeout=5)
flagged = ret.json()["results"][0]["flagged"]
except requests.exceptions.RequestException as e:
print(f"######################### 审核错误: {e} #########################")
flagged = False
except KeyError as e:
print(f"######################### 审核错误: {e} #########################")
flagged = False
return flagged
def pretty_print_semaphore(semaphore):
if semaphore is None:
return "None"
return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})"
def auto_upgrade(config):
cfg = AutoConfig.from_pretrained(config)
if "llava" in config and "llava" not in cfg.model_type:
assert cfg.model_type == "llama"
print("你正在使用更新的 LLaVA 代码库,而 v0 checkpoint 来自旧版本。")
print("你必须将 checkpoint 升级到新代码库(可自动完成)。")
confirm = input("请确认是否升级 checkpoint. [Y/N]")
if confirm.lower() in ["y", "yes"]:
print("正在升级 checkpoint...")
assert len(cfg.architectures) == 1
setattr(cfg.__class__, "model_type", "llava")
cfg.architectures[0] = "LlavaLlamaForCausalLM"
cfg.save_pretrained(config)
print("Checkpoint 已升级。")
else:
print("Checkpoint 升级已取消。")
exit(1)
(3)文件model/language_model/llava_qwen.py定义了LLaVA-Qwen大模型的多模态语言模型类,用于结合文本和图像进行生成式任务。核心功能包括:
- 模型配置:LlavaQwenConfig 继承自Qwen2Config并指定模型类型为"llava_qwen"。
- 模型类封装:LlavaQwenModel继承自LlavaMetaModel和Qwen2Model,实现多模态特征融合;LlavaQwenForCausalLM继承自Qwen2ForCausalLM和LlavaMetaForCausalLM,实现语言建模与生成。
- 多模态前向计算:forward 支持文本+图像输入,并根据不同模式返回logits或完整CausalLM输出。
- 文本生成:generate和prepare_inputs_for_generation支持多模态输入的生成任务。
- 模型注册:将自定义配置与模型注册到Hugging Face Transformers框架中。
整体上,这个文件是RoboBrain中语言模型核心的多模态扩展实现,支持人形机器人在感知图像和理解语言后进行生成推理。
python
class LlavaQwenConfig(Qwen2Config):
model_type = "llava_qwen"
class LlavaQwenModel(LlavaMetaModel, Qwen2Model):
config_class = LlavaQwenConfig
def __init__(self, config: Qwen2Config):
super(LlavaQwenModel, self).__init__(config)
@dataclass
class LlavaOutputWithPast(CausalLMOutputWithPast):
labels: Optional[torch.FloatTensor] = None
class LlavaQwenForCausalLM(Qwen2ForCausalLM, LlavaMetaForCausalLM):
config_class = LlavaQwenConfig
def __init__(self, config):
# 注释掉的原始父类初始化方法
# super(Qwen2ForCausalLM, self).__init__(config)
Qwen2ForCausalLM.__init__(self, config)
config.model_type = "llava_qwen"
config.rope_scaling = None
self.model = LlavaQwenModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# 初始化权重并执行最终处理
self.post_init()
def get_model(self):
return self.model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
images: Optional[torch.FloatTensor] = None,
image_sizes: Optional[List[List[int]]] = None,
return_dict: Optional[bool] = None,
modalities: Optional[List[str]] = ["image"],
dpo_forward: Optional[bool] = False,
cache_position=None,
) -> Union[Tuple, CausalLMOutputWithPast]:
if inputs_embeds is None:
(input_ids, position_ids, attention_mask, past_key_values, inputs_embeds, labels) = self.prepare_inputs_labels_for_multimodal(
input_ids, position_ids, attention_mask, past_key_values, labels, images, modalities, image_sizes
)
if dpo_forward:
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
return logits, labels
else:
output = super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if return_dict:
output['labels'] = labels
return LlavaOutputWithPast(**output)
@torch.no_grad()
def generate(
self,
inputs: Optional[torch.Tensor] = None,
images: Optional[torch.Tensor] = None,
image_sizes: Optional[torch.Tensor] = None,
modalities: Optional[List[str]] = ["image"],
**kwargs,
) -> Union[GenerateOutput, torch.LongTensor]:
position_ids = kwargs.pop("position_ids", None)
attention_mask = kwargs.pop("attention_mask", None)
if "inputs_embeds" in kwargs:
raise NotImplementedError("`inputs_embeds` is not supported")
if images is not None:
(inputs, position_ids, attention_mask, _, inputs_embeds, _) = self.prepare_inputs_labels_for_multimodal(
inputs, position_ids, attention_mask, None, None, images, modalities, image_sizes=image_sizes
)
else:
inputs_embeds = self.get_model().embed_tokens(inputs)
return super().generate(position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, **kwargs)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs):
images = kwargs.pop("images", None)
image_sizes = kwargs.pop("image_sizes", None)
inputs = super().prepare_inputs_for_generation(input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, **kwargs)
if images is not None:
inputs["images"] = images
if image_sizes is not None:
inputs["image_sizes"] = image_sizes
return inputs
AutoConfig.register("llava_qwen", LlavaQwenConfig)
AutoModelForCausalLM.register(LlavaQwenConfig, LlavaQwenForCausalLM)
(4)文件model/vision_encoder/siglip_encoder.py定义了SigLip系列视觉编码器模块,用于将图像输入编码为可用于多模态模型(如LLaVA-Qwen)的特征向量。核心功能包括:
- 图像预处理:SigLipImageProcessor对图像进行裁剪、缩放、归一化、转换通道格式等处理,输出模型可接受的张量。
- 视觉模型配置:SigLipVisionConfig定义了视觉编码器的结构参数,包括隐藏维度、层数、注意力头数、激活函数等,并支持从预训练配置加载。
- 视觉特征编码器:SigLipVisionEmbeddings将图像分块并嵌入位置编码;SigLipAttention与SigLipMLP实现多头自注意力与前馈层;SigLipEncoderLayer用于构成Transformer 编码器层;SigLipEncoder用于堆叠多个编码器层形成完整视觉 Transformer。
- 视觉Transformer主体:SigLipVisionTransformer 对嵌入后的图像进行编码并通过多头注意力池化生成图像特征。
- 预训练模型封装:SigLipVisionModel 提供完整的前向接口,支持输出最后隐藏状态与池化特征。
- 延迟加载与多用途接口:SigLipVisionTower 封装可延迟加载的视觉编码器,并支持对单张或多张图像提取特征,兼容多模态训练和冻结参数场景。
总的来说,这个文件实现了一个可用于多模态模型的高性能视觉编码器框架,支持从图像到特征的完整处理流程,包括预处理、Transformer 编码、注意力池化和特征提取。
文件model/vision_encoder/siglip_encoder.py的主要代码如下所示。
- 下面代码的功能是通过SigLipImageProcessor 类实现图像预处理功能,包括裁剪、缩放、归一化、通道格式转换等,为视觉编码器提供标准化的图像张量输入。具体原理是通过对图像进行统一尺寸调整、像素值归一化以及通道维度变换,将图像数据标准化,使其适配Transformer 视觉模型的输入要求,同时兼顾视频帧序列处理。
python
class SigLipImageProcessor:
def __init__(self, image_mean=(0.5, 0.5, 0.5), image_std=(0.5, 0.5, 0.5), size=(384, 384), crop_size: Dict[str, int] = None, resample=PILImageResampling.BICUBIC, rescale_factor=1 / 255, data_format=ChannelDimension.FIRST):
# 如果没有提供裁剪尺寸,则使用默认的 384x384
crop_size = crop_size if crop_size is not None else {"height": 384, "width": 384}
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
self.image_mean = image_mean # 图像均值
self.image_std = image_std # 图像标准差
self.size = size # 缩放目标尺寸
self.resample = resample # 重采样方法
self.rescale_factor = rescale_factor # 归一化因子
self.data_format = data_format # 通道数据格式
self.crop_size = crop_size
def preprocess(self, images, return_tensors):
# 如果输入单张图像,则转换为列表
if isinstance(images, Image.Image):
images = [images]
else:
# 视频数据处理,将每帧转换为 numpy 数组
images = [to_numpy_array(image) for image in images]
assert isinstance(images, list)
# 定义图像处理流程:RGB 转换、大小调整、归一化、通道格式调整
transforms = [
convert_to_rgb, # 转为 RGB 图像
to_numpy_array, # 转为 numpy 数组
partial(resize, size=self.size, resample=self.resample, data_format=self.data_format), # 缩放
partial(rescale, scale=self.rescale_factor, data_format=self.data_format), # 归一化
partial(normalize, mean=self.image_mean, std=self.image_std, data_format=self.data_format), # 标准化
partial(to_channel_dimension_format, channel_dim=self.data_format, input_channel_dim=self.data_format), # 调整通道维度
]
# 按顺序应用处理函数
images = reduce(lambda x, f: [*map(f, x)], transforms, images)
data = {"pixel_values": images}
# 返回 BatchFeature 类型,可直接作为模型输入
return BatchFeature(data=data, tensor_type=return_tensors)
- 下面代码的功能是通过SigLipVisionEmbeddings类实现图像分块嵌入,将输入的图像转换为Transformer可处理的patch嵌入向量,并加入位置编码。具体原理是将图像按patch分块,使用卷积层投影到嵌入空间,并通过位置编码保留空间信息,使Transformer能够处理图像的局部与全局信息。
python
class SigLipVisionEmbeddings(nn.Module):
def __init__(self, config: SigLipVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size # 嵌入维度
self.image_size = config.image_size # 图像大小
self.patch_size = config.patch_size # patch 大小
# 使用卷积将图像分块并映射到嵌入维度
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels, # 输入通道数
out_channels=self.embed_dim, # 输出嵌入维度
kernel_size=self.patch_size, # 卷积核大小
stride=self.patch_size, # 步长与卷积核大小相同,实现非重叠切块
padding="valid",
)
# 计算 patch 数量和位置编码
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
# 分块卷积映射
patch_embeds = self.patch_embedding(pixel_values) # 输出形状 [batch, embed_dim, grid, grid]
embeddings = patch_embeds.flatten(2).transpose(1, 2) # 展平并调整维度为 [batch, num_patches, embed_dim]
# 加上位置编码
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
- 下面代码的功能是通过SigLipAttention类实现多头自注意力机制,用于捕捉图像patch之间的全局依赖关系。具体原理是将输入嵌入拆分为多头,通过query-key-value计算注意力权重,对每个patch的特征进行加权聚合,最后经过线性映射输出与输入相同维度。
python
class SigLipAttention(nn.Module):
"""基于'Attention Is All You Need'论文的多头注意力机制"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size # 嵌入维度
self.num_heads = config.num_attention_heads # 注意力头数
self.head_dim = self.embed_dim // self.num_heads # 每个头的维度
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(f"embed_dim 必须能被 num_heads 整除 (embed_dim={self.embed_dim}, num_heads={self.num_heads})")
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
# 定义 Q、K、V 线性映射
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""输入形状: batch x seq_len x embed_dim"""
batch_size, q_len, _ = hidden_states.size()
# 计算 Q, K, V
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
# 分头
query_states = query_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(batch_size, q_len, self.num_heads, self.head_dim).transpose(1, 2)
k_v_seq_len = key_states.shape[-2]
# 计算注意力权重
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scale
# 如果提供了注意力 mask,则加上 mask
if attention_mask is not None:
if attention_mask.size() != (batch_size, 1, q_len, k_v_seq_len):
raise ValueError(f"注意力 mask 大小应为 {(batch_size, 1, q_len, k_v_seq_len)}, 但实际为 {attention_mask.size()}")
attn_weights = attn_weights + attention_mask
# 软最大化并 dropout
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
# 输出加权特征
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous().reshape(batch_size, q_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
- 下面代码的功能是通过SigLipEncoderLayer类实现单层Transformer编码器,由多头自注意力和前馈网络组成,含残差连接和LayerNorm。具体原理是通过自注意力捕捉patch间依赖关系,MLP进行非线性映射,残差与规范化保证梯度稳定与训练收敛性。
python
class SigLipEncoderLayer(nn.Module):
def __init__(self, config: SigLipVisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = SigLipAttention(config) # 多头注意力
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) # 第一个 LayerNorm
self.mlp = SigLipMLP(config) # 前馈层
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) # 第二个 LayerNorm
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""前向传播"""
residual = hidden_states # 残差连接
hidden_states = self.layer_norm1(hidden_states) # 归一化
hidden_states, attn_weights = self.self_attn(hidden_states, attention_mask, output_attentions) # 注意力
hidden_states = residual + hidden_states # 残差叠加
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states) # 第二次归一化
hidden_states = self.mlp(hidden_states) # 前馈
hidden_states = residual + hidden_states # 残差叠加
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
- 下面代码的功能是通过SigLipVisionTransformer类实现完整的视觉Transformer模型,将图像嵌入送入多层编码器,经过LayerNorm和多头注意力池化输出最终图像特征。具体原理是先将图像patch嵌入,然后通过堆叠的Transformer编码器捕捉全局上下文信息,最后用多头注意力池化生成全局图像表示,可用于多模态对齐或下游任务。
python
class SigLipVisionTransformer(nn.Module):
def __init__(self, config: SigLipVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
# 图像嵌入
self.embeddings = SigLipVisionEmbeddings(config)
# Transformer 编码器
self.encoder = SigLipEncoder(config)
# 输出归一化
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
# 多头注意力池化生成图像特征
self.head = SigLipMultiheadAttentionPoolingHead(config)
def forward(
self,
pixel_values,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# 图像嵌入
hidden_states = self.embeddings(pixel_values)
# 编码器处理
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.post_layernorm(last_hidden_state) # LayerNorm
# 多头注意力池化输出图像特征
pooled_output = self.head(last_hidden_state)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
(5)"model/vision_resampler"目录负责将高维、高分辨率的视觉特征进行"重采样/压缩/对齐",生成适合大语言模型理解的紧凑视觉表示,是视觉感知到认知推理之间的关键桥梁。例如文件model/vision_resampler/qformer.py实现了本项目多模态人形机器人中的Q-Former(Query-based Transformer)视觉重采样模块,其核心功能是在保持视觉语义完整性的前提下,通过一组可学习的Query Token对高维视觉编码特征进行跨注意力交互,将密集的视觉patch表示压缩为少量、结构化、语言模型友好的视觉语义token。该模块基于改造后的BERT架构,引入周期性交叉注意力层,使Query Token 能主动"向视觉特征提问",从而提取对认知推理和任务决策最有价值的信息,是人形机器人实现"从视觉感知到语言理解与智能决策"的关键中间层。
文件model/vision_resampler/qformer.py的主要代码如下所示。
- 下面代码的核心作用是构建Q-Former的输入嵌入机制。与传统BERT依赖词表不同,Q-Former不接收文本token,而是引入固定数量的、可学习的Query Token。这些Query Token在训练过程中逐渐学会如何从视觉特征中"提问"并抽取信息,是Q-Former能够高效压缩视觉特征的基础。
python
class BertEmbeddings(nn.Module):
"""
构建 BERT/Q-Former 的嵌入层
这里既支持文本 token embedding,也支持 query embedding
"""
def __init__(self, config):
super().__init__()
# 词嵌入(在 Q-Former 中通常被置为 None)
self.word_embeddings = nn.Embedding(
config.vocab_size,
config.hidden_size,
padding_idx=config.pad_token_id
)
# 位置嵌入
self.position_embeddings = nn.Embedding(
config.max_position_embeddings,
config.hidden_size
)
# LayerNorm 与 Dropout
self.LayerNorm = nn.LayerNorm(
config.hidden_size,
eps=config.layer_norm_eps
)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# 注册 position_ids,避免每次 forward 重新创建
self.register_buffer(
"position_ids",
torch.arange(config.max_position_embeddings).expand((1, -1))
)
self.position_embedding_type = getattr(
config, "position_embedding_type", "absolute"
)
def forward(
self,
input_ids=None,
position_ids=None,
query_embeds=None,
past_key_values_length=0,
):
# 如果有 input_ids,说明是文本 token
if input_ids is not None:
seq_length = input_ids.size()[1]
else:
seq_length = 0
# 自动生成 position_ids
if position_ids is None:
position_ids = self.position_ids[
:, past_key_values_length : seq_length + past_key_values_length
].clone()
if input_ids is not None:
# 普通文本 embedding
embeddings = self.word_embeddings(input_ids)
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
# 如果同时存在 query embedding,则拼接
if query_embeds is not None:
embeddings = torch.cat((query_embeds, embeddings), dim=1)
else:
# 仅使用 query embedding(Q-Former 的典型情况)
embeddings = query_embeds
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
- 下面代码实现了Query Token与视觉特征之间的Cross-Attention。在该机制中,Query Token作为Query,而视觉编码器输出作为Key和Value,通过注意力机制让Query主动选择并聚合视觉信息,这是Q-Former 能够"以少量向量理解整幅图像"的关键。
python
class BertSelfAttention(nn.Module):
"""
BERT / Q-Former 的自注意力与跨注意力实现
"""
def __init__(self, config, is_cross_attention):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads
)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# Query 一定来自当前 hidden_states
self.query = nn.Linear(config.hidden_size, self.all_head_size)
# Cross-Attention:Key / Value 来自视觉编码器
if is_cross_attention:
self.key = nn.Linear(config.encoder_width, self.all_head_size)
self.value = nn.Linear(config.encoder_width, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
# (B, L, D) → (B, H, L, D_head)
new_x_shape = x.size()[:-1] + (
self.num_attention_heads,
self.attention_head_size,
)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
# 判断是否为跨注意力
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(
self.key(encoder_hidden_states)
)
value_layer = self.transpose_for_scores(
self.value(encoder_hidden_states)
)
attention_mask = encoder_attention_mask
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(
self.query(hidden_states)
)
# 注意力打分
attention_scores = torch.matmul(
query_layer, key_layer.transpose(-1, -2)
)
attention_scores = attention_scores / math.sqrt(
self.attention_head_size
)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Softmax 得到注意力权重
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
# 聚合 Value
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_shape)
return context_layer
- 下面代码定义了Q-Former的单层Encoder结构,它在标准 Transformer Encoder的基础上引入了"周期性Cross-Attention",使Query Token能在多层中反复与视觉特征交互,从而逐层提炼更高层次的视觉语义表示。
python
class BertLayer(nn.Module):
"""
Q-Former 的单层 Transformer 结构
"""
def __init__(self, config, layer_num):
super().__init__()
self.attention = BertAttention(config)
self.layer_num = layer_num
# 是否在该层加入跨注意力
if config.add_cross_attention and layer_num % config.cross_attention_freq == 0:
self.crossattention = BertAttention(
config, is_cross_attention=True
)
self.has_cross_attention = True
else:
self.has_cross_attention = False
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
query_length=0,
):
# Self-Attention
attention_output = self.attention(
hidden_states,
attention_mask,
)[0]
# 仅 Query Token 参与 Cross-Attention
if self.has_cross_attention and query_length > 0:
query_states = attention_output[:, :query_length, :]
query_states = self.crossattention(
query_states,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)[0]
attention_output = torch.cat(
[query_states, attention_output[:, query_length:, :]],
dim=1,
)
# 前馈网络
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(
intermediate_output, attention_output
)
return layer_output
- 下面代码的功能是构建完整的Q-Former编码器(多层堆叠),通过堆叠多个BertLayer,形成完整的Q-Former 编码器,使Query Token能够在多层中不断与视觉特征交互、聚合和抽象,是Q-Former表达能力的主要来源。
python
class BertEncoder(nn.Module):
"""
Q-Former Encoder,由多层 BertLayer 组成
"""
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList(
[BertLayer(config, i) for i in range(config.num_hidden_layers)]
)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
query_length=0,
):
for layer_module in self.layer:
hidden_states = layer_module(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
query_length=query_length,
)
return hidden_states
- 下面代码是Q-Former模块的整体封装与前向推理入口,主要完成三个功能:构建Q-Former结构、定义可学习Query Token、在forward中用Query Token对视觉特征进行重采样,输出紧凑的多模态表示。
python
class Qformer(nn.Module):
"""
Q-Former 视觉重采样模块
"""
def __init__(self, model_args, vision_tower):
super().__init__()
self.depth = model_args.mm_qformer_depth
self.num_latents = model_args.mm_qformer_latents
self.Qformer, self.query_tokens, self.ln_vision = \
self.build_Qformer(
vision_tower.hidden_size,
self.depth,
self.num_latents
)
def build_Qformer(self, vision_width, cross_attention_freq, num_query_token):
config = BertConfig.from_pretrained("bert-base-uncased")
config.encoder_width = vision_width
config.add_cross_attention = True
config.cross_attention_freq = cross_attention_freq
config.query_length = num_query_token
Qformer = BertLMHeadModel(config)
query_tokens = nn.Parameter(
torch.zeros(1, num_query_token, config.hidden_size)
)
query_tokens.data.normal_(
mean=0.0, std=config.initializer_range
)
return Qformer, query_tokens, nn.LayerNorm(vision_width)
def forward(self, image_features):
# 视觉特征归一化
image_features = self.ln_vision(image_features)
image_atts = torch.ones(
image_features.size()[:-1],
dtype=torch.long,
device=image_features.device,
)
# 扩展 Query Token 到 batch 维度
query_tokens = self.query_tokens.expand(
image_features.shape[0], -1, -1
)
# Query 与视觉特征进行 Cross-Attention
output = self.Qformer.bert(
query_embeds=query_tokens,
encoder_hidden_states=image_features,
encoder_attention_mask=image_atts,
return_dict=True,
)
return output.last_hidden_state