import librosa
import numpy as np
import utils
import torch
import torch.nn.functional as F
from matplotlib import pyplot as plt
from torchvision.models.feature_extraction import create_feature_extractor
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def extract_mbe(_y, _sr, _nfft, _nb_mel):
#梅尔频谱
spec = librosa.core.spectrum._spectrogram(y=_y, n_fft=_nfft, hop_length=_nfft // 2, power=1)[0]
mel_basis = librosa.filters.mel(sr=_sr, n_fft=_nfft, n_mels=_nb_mel)
mel_spec = np.log(np.dot(mel_basis, spec).T)
return mel_spec #最后必须是[frames, dimensions]
def preprocess_data(X, seq_len, nb_ch):
# split into sequences
X = utils.split_in_seqs(X, seq_len)
X = utils.split_multi_channels(X, nb_ch)
# Convert to PyTorch tensors
X = torch.Tensor(X)
X = X.permute(0,1,3,2) #x形状为[709,2,40,256],【总样本数,通道数,特征维度,像素宽度】
return X
# 提取梅尔频谱特征
audio_path = "b093.wav"
y, sr = librosa.load(audio_path, sr=44100)
mel = extract_mbe(y, sr, 2048, 64)
value = preprocess_data(mel, 256, 1).to(device) #value 为输入模型的样本特征
features = {"cnn1": '1', "cnn2": '2', "cnn3": '3', "cnn4": '4', "cnn5": '5', "cnn6": '6'}
model = torch.load(f'best_model_2.pth')
feature_extractor = create_feature_extractor(model, return_nodes=features)
out = feature_extractor(value)
layer = "3"
out = torch.cat((out[layer][0], out[layer][1]), dim=1)
out = out.unsqueeze(0)
out = F.interpolate(out, size=(470, 64), mode='bilinear', align_corners=False)
out = out.squeeze(0) .permute(2, 0, 1) #[128, 256, 64]->[64, 128, 256]->[纵, 值, 横]
plt.imshow(out.sum(1).detach().cpu().numpy(), origin='lower')
plt.show()
神经网络中间层特征图可视化(输入为音频)
孜孜不倦fly2023-11-15 12:41
相关推荐
watersink3 分钟前
Dify框架下的基于RAG流程的政务检索平台脑极体6 分钟前
在MWC2025,读懂华为如何以行践言DeepBI9 分钟前
AI+大数据:DeepBI重构竞品分析新思路KoiC10 分钟前
内网环境部署Deepseek+Dify,构建企业私有化AI应用不去幼儿园21 分钟前
【启发式算法】Dijkstra算法详细介绍(Python)serve the people30 分钟前
神经网络中梯度计算求和公式求导问题云卓SKYDROID32 分钟前
无人机投屏技术解码过程详解!zy_destiny38 分钟前
【YOLOv12改进trick】三重注意力TripletAttention引入YOLOv12中,实现遮挡目标检测涨点,含创新点Python代码,方便发论文自由的晚风40 分钟前
深度学习在SSVEP信号分类中的应用分析大数据追光猿40 分钟前
【大模型技术】LlamaFactory 的原理解析与应用