Fourier层
python
improt torch
import torch.nn as nn
class NaiveFourierKANLayer(nn.Module):
def __init__(self, inputdim, outdim, initial_gridsize, addbias=True):
super(NaiveFourierKANLayer, self).__init__()
self.addbias = addbias
self.inputdim = inputdim
self.outdim = outdim
# Learnable gridsize parameter
self.gridsize_param = nn.Parameter(torch.tensor(initial_gridsize, dtype=torch.float32))
# Fourier coefficients as a learnable parameter with Xavier initialization
self.fouriercoeffs = nn.Parameter(torch.empty(2, outdim, inputdim, initial_gridsize))
nn.init.xavier_uniform_(self.fouriercoeffs)
if self.addbias:
self.bias = nn.Parameter(torch.zeros(1, outdim))
def forward(self, x):
gridsize = torch.clamp(self.gridsize_param, min=1).round().int()
xshp = x.shape
outshape = xshp[:-1] + (self.outdim,)
x = torch.reshape(x, (-1, self.inputdim))
k = torch.reshape(torch.arange(1, gridsize + 1, device=x.device), (1, 1, 1, gridsize))
xrshp = torch.reshape(x, (x.shape[0], 1, x.shape[1], 1))
c = torch.cos(k * xrshp)
s = torch.sin(k * xrshp)
y = torch.sum(c * self.fouriercoeffs[0:1, :, :, :gridsize], (-2, -1))
y += torch.sum(s * self.fouriercoeffs[1:2, :, :, :gridsize], (-2, -1))
if self.addbias:
y += self.bias
y = torch.reshape(y, outshape)
return y
Laplace层
python
import torch
import torch.nn as nn
class NaiveLaplaceKANLayer(nn.Module):
def __init__(self, inputdim, outdim, initial_gridsize, addbias=True):
super(NaiveLaplaceKANLayer, self).__init__()
self.addbias = addbias
self.inputdim = inputdim
self.outdim = outdim
# Learnable gridsize parameter
self.gridsize_param = nn.Parameter(torch.tensor(initial_gridsize, dtype=torch.float32))
# Laplace coefficients as a learnable parameter with Xavier initialization
self.laplacecoeffs = nn.Parameter(torch.empty(2, outdim, inputdim, initial_gridsize))
nn.init.xavier_uniform_(self.laplacecoeffs)
if self.addbias:
self.bias = nn.Parameter(torch.zeros(1, outdim))
def forward(self, x):
gridsize = torch.clamp(self.gridsize_param, min=1).round().int()
xshp = x.shape
outshape = xshp[:-1] + (self.outdim,)
x = torch.reshape(x, (-1, self.inputdim))
# Create a grid of lambda values
lambdas = torch.reshape(torch.linspace(0.1, 1., gridsize, device=x.device), (1, 1, 1, gridsize))
xrshp = torch.reshape(x, (x.shape[0], 1, x.shape[1], 1))
# Exponential functions for Laplace transform
exp_neg = torch.exp(-lambdas * xrshp)
exp_pos = torch.exp(lambdas * xrshp)
# Applying Laplace coefficients to the input
y = torch.sum(exp_neg * self.laplacecoeffs[0:1, :, :, :gridsize], (-2, -1))
y += torch.sum(exp_pos * self.laplacecoeffs[1:2, :, :, :gridsize], (-2, -1))
if self.addbias:
y += self.bias
y = torch.reshape(y, outshape)
return y
Legendre层
python
import torch
import torch.nn as nn
class RecurrentLegendreLayer(nn.Module):
def __init__(self, max_degree, input_dim, output_dim):
super(RecurrentLegendreLayer, self).__init__()
self.max_degree = max_degree
self.input_dim = input_dim
self.output_dim = output_dim
# Parameters for the linear combination of Legendre polynomials
# Adjust dimensions: one set of weights for each polynomial degree, across all input/output dimension pairs
self.weights = nn.Parameter(torch.randn(max_degree + 1, self.input_dim, self.output_dim))
# nn.init.xavier_normal_(self.weights)
nn.init.orthogonal_(self.weights)
# nn.init.kaiming_normal_(self.weights)
self.dropout = nn.Dropout(.1)
# Optional: Bias for each output dimension
self.bias = nn.Parameter(torch.zeros(self.output_dim))
def forward(self, x):
batch_size = x.shape[0]
# Initialize P0 and P1 for the recurrence relation
P_n_minus_2 = torch.ones((batch_size, self.input_dim), device=x.device)
P_n_minus_1 = x.clone()
# Store all polynomial values
polys = [P_n_minus_2.unsqueeze(-1), P_n_minus_1.unsqueeze(-1)]
# Compute higher order polynomials up to max_degree
for n in range(2, self.max_degree + 1):
P_n = ((2 * n - 1) * x * P_n_minus_1 - (n - 1) * P_n_minus_2) / n
polys.append(P_n.unsqueeze(-1))
P_n_minus_2, P_n_minus_1 = P_n_minus_1, P_n
# Concatenate all polynomial values
polys = torch.cat(polys, dim=-1) # Shape: [batch_size, input_dim, max_degree + 1]
polys = self.dropout(polys)
# Linearly combine polynomial features
output = torch.einsum('bif,fio->bo', polys, self.weights) + self.bias
return output
Wavelet层
python
import torch
import torch.nn as nn
import numpy as np
class NaiveWaveletKANLayer(nn.Module):
def __init__(self, inputdim, outdim, initial_gridsize, addbias=True):
super(NaiveWaveletKANLayer, self).__init__()
self.addbias = addbias
self.inputdim = inputdim
self.outdim = outdim
# Learnable gridsize parameter
self.gridsize_param = nn.Parameter(torch.tensor(initial_gridsize, dtype=torch.float32))
# Wavelet coefficients as a learnable parameter with Xavier initialization
self.waveletcoeffs = nn.Parameter(torch.empty(2, outdim, inputdim, initial_gridsize))
nn.init.xavier_uniform_(self.waveletcoeffs)
if self.addbias:
self.bias = nn.Parameter(torch.zeros(1, outdim))
def forward(self, x):
gridsize = torch.clamp(self.gridsize_param, min=1).round().int()
xshp = x.shape
outshape = xshp[:-1] + (self.outdim,)
x = torch.reshape(x, (-1, self.inputdim))
# Create a range of scales and translations for the wavelet
scales = torch.linspace(1, gridsize, gridsize, device=x.device).unsqueeze(0).unsqueeze(0).unsqueeze(0)
translations = torch.linspace(0, 1, gridsize, device=x.device).unsqueeze(0).unsqueeze(0).unsqueeze(0)
# Morlet wavelet calculations
xrshp = torch.reshape(x, (x.shape[0], 1, x.shape[1], 1))
u = (xrshp - translations) * scales
real = torch.cos(np.pi*u) * torch.exp(-u**2 /2.)
imag = torch.sin(np.pi*u) * torch.exp(-u**2 /2.)
# Apply wavelet coefficients to the wavelet transform outputs
y_real = torch.sum(real * self.waveletcoeffs[0:1, :, :, :gridsize], (-2, -1))
y_imag = torch.sum(imag * self.waveletcoeffs[1:2, :, :, :gridsize], (-2, -1))
y = y_real + y_imag
if self.addbias:
y += self.bias
y = torch.reshape(y, outshape)
return y
工学博士,担任《Mechanical System and Signal Processing》《中国电机工程学报》《控制与决策》等期刊审稿专家,擅长领域:现代信号处理,机器学习,深度学习,数字孪生,时间序列分析,设备缺陷检测、设备异常检测、设备智能故障诊断与健康管理PHM等。