目录
[提取特征 测试代码:](#提取特征 测试代码:)
图像编码成特征向量
siglip-so400m-patch14-384
-
名称 :
siglip-so400m-patch14-384 -
系列:CLIP(Contrastive Language--Image Pretraining)
-
架构:Vision Transformer (ViT)
-
Patch size: 14×14
-
输入图像尺寸: 384×384
-
-
目标:将图像编码成特征向量(embedding),用于与文本 embedding 对齐。
下载地址:
https://huggingface.co/google/siglip-so400m-patch14-384
推理示例:
python
from PIL import Image
import requests
from transformers import AutoProcessor, AutoModel
import torch
model = AutoModel.from_pretrained("google/siglip-so400m-patch14-384")
processor = AutoProcessor.from_pretrained("google/siglip-so400m-patch14-384")
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
texts = ["a photo of 2 cats", "a photo of 2 dogs"]
inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image
probs = torch.sigmoid(logits_per_image) # these are the probabilities
print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'")
提取特征 测试代码:
python
import torch
from PIL import Image
import requests
from io import BytesIO
from transformers import AutoProcessor, AutoModel
# 1. 加载本地模型和处理器
LOCAL_MODEL_PATH = "/data/lbg/models/textoon/ComfyUI/models/clip/siglip-so400m-patch14-384"
LOCAL_MODEL_PATH = r"D:\data\models\siglip-so400m-patch14-384"
print(f"正在从本地路径加载模型: {LOCAL_MODEL_PATH}")
try:
clip_processor = AutoProcessor.from_pretrained(LOCAL_MODEL_PATH)
clip_model = AutoModel.from_pretrained(
LOCAL_MODEL_PATH,
trust_remote_code=True # SigLIP 模型可能需要此参数
)
clip_model = clip_model.vision_model
print("✅ 模型加载成功")
except Exception as e:
print(f"❌ 加载失败: {e}")
exit(1)
clip_model.eval()
clip_model.requires_grad_(False)
clip_model.to("cuda")
local_image_path ="D:\soft\mm03.png"
test_image = Image.open(local_image_path).convert("RGB")
try:
inputs = clip_processor(
images=test_image,
return_tensors="pt",
padding=True
)
pixel_values = inputs.pixel_values.to("cuda")
print(f"✅ 图像预处理成功,输入形状: {pixel_values.shape}")
except Exception as e:
print(f"❌ 预处理失败: {e}")
exit(1)
DEVICE = "cuda"
target_hidden_state = []
# 定义钩子函数
def hook_fn(module, input, output):
# output 通常是 (hidden_state,) 元组
target_hidden_state.append(output[0] if isinstance(output, tuple) else output)
# 注册钩子到倒数第二层编码器层
# 假设 clip_model 是 vision_model,且其编码器有 layers 属性
target_layer = clip_model.encoder.layers[-2] # 获取倒数第二层
hook_handle = target_layer.register_forward_hook(hook_fn)
with torch.amp.autocast_mode.autocast('cuda', enabled=True):
# 正常前向传播,钩子会自动捕获目标层输出
vision_outputs = clip_model(pixel_values=pixel_values)
# 从列表中获取捕获的特征
if target_hidden_state:
image_features = target_hidden_state[0] # 这就是倒数第二层的输出
else:
# 备选方案
image_features = vision_outputs.last_hidden_state
# 非常重要:移除钩子,避免内存泄漏
hook_handle.remove()
if 0:
try:
with torch.amp.autocast_mode.autocast(DEVICE, enabled=True):
# 执行前向传播
vision_outputs = clip_model(
pixel_values=pixel_values,
output_hidden_states=True
)
if vision_outputs.hidden_states is None:
print('vision_outputs.hidden_states is None')
exit(52)
image_features = vision_outputs.last_hidden_state[-2]
last_hidden_state = vision_outputs.last_hidden_state
print("✅ 推理成功完成!")
print(f" 图像特征形状 (倒数第二层): {image_features.shape}")
print(f" 最后隐藏状态形状: {last_hidden_state.shape}")
print(f" 特征数据类型: {image_features.dtype}")
print(f" 特征值范围: [{image_features.min():.4f}, {image_features.max():.4f}]")
# 验证输出是否合理
if torch.isnan(image_features).any():
print("⚠️ 警告: 输出中包含NaN值!")
else:
print("✅ 输出检查: 无NaN值")
except torch.cuda.OutOfMemoryError:
print("❌ CUDA内存不足! 尝试减小批处理大小或图像尺寸")
except RuntimeError as e:
print(f"❌ 运行时错误: {e}")
except Exception as e:
print(f"❌ 推理过程中出错: {e}")
# 6. 附加测试:计算相似度(可选)
def test_similarity(features):
"""测试特征向量的相似度计算"""
try:
# 归一化特征 (dim=-1 在最后一个维度,即特征维度1152上归一化)
normalized_features = torch.nn.functional.normalize(features, dim=-1)
# 计算相似度矩阵
similarity = torch.mm(normalized_features, normalized_features.t())
print(f"\n相似度矩阵形状: {similarity.shape}")
print(f"相似度矩阵:\n{similarity}")
# 检查对角线是否接近1.0(对于归一化后的向量,自己与自己的点积应为1)
diag_values = similarity.diag()
print(f"对角线值 (应接近1.0): {diag_values}")
# 验证:对角线元素是否都非常接近1(允许微小浮点误差)
if torch.allclose(diag_values, torch.ones_like(diag_values), rtol=1e-3):
print("✅ 自相似度测试通过 (对角线≈1)")
else:
print("⚠️ 自相似度测试未通过")
return True
except Exception as e:
print(f"相似度测试失败: {e}")
return False
# 执行相似度测试(使用倒数第二层特征)
print("\n执行附加测试...")
test_similarity(image_features[:, 0, :]) # 使用[CLS] token特征
# 7. 清理和总结
torch.cuda.empty_cache()
print(f"\n{'=' * 50}")
print("测试总结:")
print(f" 模型路径: {LOCAL_MODEL_PATH}")
print(f" 设备: {DEVICE}")
print(f" 输入尺寸: {pixel_values.shape}")
print(f" 输出尺寸: {image_features.shape}")
print(f" 模型参数量: {sum(p.numel() for p in clip_model.parameters()):,}")
print("=" * 50)