卡尔曼滤波与力场插值算法代码框架

一、扩展算法实现

1.1 完整卡尔曼滤波库

python 复制代码
# ============================================================================
# 文件:tracking/kalman_filters.py
# ============================================================================

import numpy as np
from scipy.linalg import cholesky, solve_triangular
from typing import Tuple, Optional, Callable, List, Dict
from dataclasses import dataclass
from enum import Enum

class FilterType(Enum):
    KF = "kalman_filter"
    EKF = "extended_kalman_filter"
    UKF = "unscented_kalman_filter"
    IMM = "interacting_multiple_model"
    CKF = "cubature_kalman_filter"

@dataclass
class FilterConfig:
    """滤波器配置"""
    filter_type: FilterType = FilterType.UKF
    dt: float = 0.033  # 30 Hz
    process_noise_std: np.ndarray = None
    measurement_noise_std: np.ndarray = None
    
    # UKF 参数
    alpha: float = 1e-3
    beta: float = 2.0
    kappa: float = 0.0
    
    # IMM 参数
    transition_matrix: np.ndarray = None
    
    def __post_init__(self):
        if self.process_noise_std is None:
            self.process_noise_std = np.array([10.0, 10.0, 10.0, 1.0, 1.0, 1.0])
        if self.measurement_noise_std is None:
            self.measurement_noise_std = np.array([10.0, 10.0, 10.0])

class KalmanFilterBase:
    """卡尔曼滤波器基类"""
    
    def __init__(self, config: FilterConfig):
        self.config = config
        self.dt = config.dt
        self.is_initialized = False
        
    def predict(self, control_input: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
        raise NotImplementedError
    
    def update(self, measurement: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        raise NotImplementedError
    
    def get_state(self) -> np.ndarray:
        raise NotImplementedError
    
    def get_covariance(self) -> np.ndarray:
        raise NotImplementedError


class UnscentedKalmanFilter3D(KalmanFilterBase):
    """
    3D 无迹卡尔曼滤波器(UKF)
    
    适用于非线性系统
    使用 Sigma 点捕捉状态分布
    """
    
    def __init__(self, config: FilterConfig):
        super().__init__(config)
        
        self.n_state = 6  # [x, y, z, vx, vy, vz]
        self.n_meas = 3   # [x, y, z]
        
        # UKF 参数
        self.alpha = config.alpha
        self.beta = config.beta
        self.kappa = config.kappa
        self.lambda_ = self.alpha**2 * (self.n_state + self.kappa) - self.n_state
        
        # 计算 Sigma 点权重
        self._compute_weights()
        
        # 状态和协方差
        self.state = np.zeros(self.n_state)
        self.P = np.eye(self.n_state) * 100.0
        
        # 噪声协方差
        self.Q = np.diag(config.process_noise_std**2)
        self.R = np.diag(config.measurement_noise_std**2)
        
        # Sigma 点
        self.n_sigma = 2 * self.n_state + 1
        self.sigma_points = np.zeros((self.n_sigma, self.n_state))
        
    def _compute_weights(self):
        """计算 Sigma 点权重"""
        n = self.n_state
        
        self.Wm = np.zeros(self.n_sigma)  # 均值权重
        self.Wc = np.zeros(self.n_sigma)  # 协方差权重
        
        self.Wm[0] = self.lambda_ / (n + self.lambda_)
        self.Wc[0] = self.lambda_ / (n + self.lambda_) + (1 - self.alpha**2 + self.beta)
        
        for i in range(1, self.n_sigma):
            self.Wm[i] = 1.0 / (2 * (n + self.lambda_))
            self.Wc[i] = 1.0 / (2 * (n + self.lambda_))
    
    def _generate_sigma_points(self, state: np.ndarray, P: np.ndarray) -> np.ndarray:
        """
        生成 Sigma 点
        
        sigma_points[i] = state + (sqrt((n+λ)P))_i  for i = 1..n
        sigma_points[i] = state - (sqrt((n+λ)P))_{i-n}  for i = n+1..2n
        """
        sigma_points = np.zeros((self.n_sigma, self.n_state))
        sigma_points[0] = state
        
        # 计算 (n+λ)P 的平方根
        scale = self.n_state + self.lambda_
        
        try:
            # Cholesky 分解
            L = cholesky(scale * P, lower=True)
        except np.linalg.LinAlgError:
            # 如果 P 不是正定,添加小扰动
            P_reg = P + np.eye(self.n_state) * 1e-6
            L = cholesky(scale * P_reg, lower=True)
        
        # 生成 Sigma 点
        for i in range(self.n_state):
            sigma_points[i + 1] = state + L[:, i]
            sigma_points[i + 1 + self.n_state] = state - L[:, i]
        
        return sigma_points
    
    def set_state(self, position: np.ndarray, velocity: Optional[np.ndarray] = None):
        """初始化状态"""
        self.state[:3] = position
        if velocity is not None:
            self.state[3:] = velocity
        self.is_initialized = True
    
    def predict(self, control_input: Optional[np.ndarray] = None,
                state_transition: Optional[Callable] = None) -> Tuple[np.ndarray, np.ndarray]:
        """
        UKF 预测步骤
        
        state_transition: 非线性状态转移函数 x_{k+1} = f(x_k, u_k)
                         如果为 None,使用恒速模型
        """
        if not self.is_initialized:
            return self.state[:3], self.state[3:]
        
        # 定义状态转移函数
        if state_transition is None:
            state_transition = self._constant_velocity_model
        
        # 生成 Sigma 点
        self.sigma_points = self._generate_sigma_points(self.state, self.P)
        
        # 通过状态转移函数传播 Sigma 点
        sigma_points_pred = np.zeros_like(self.sigma_points)
        for i in range(self.n_sigma):
            sigma_points_pred[i] = state_transition(self.sigma_points[i], control_input)
        
        # 预测状态均值
        self.state_pred = np.zeros(self.n_state)
        for i in range(self.n_sigma):
            self.state_pred += self.Wm[i] * sigma_points_pred[i]
        
        # 预测状态协方差
        self.P_pred = self.Q.copy()
        for i in range(self.n_sigma):
            diff = sigma_points_pred[i] - self.state_pred
            self.P_pred += self.Wc[i] * np.outer(diff, diff)
        
        return self.state_pred[:3], self.state_pred[3:]
    
    def update(self, measurement: np.ndarray,
               measurement_function: Optional[Callable] = None) -> Tuple[np.ndarray, np.ndarray]:
        """
        UKF 更新步骤
        
        measurement_function: 非线性观测函数 z = h(x)
                             如果为 None,使用线性观测
        """
        if not self.is_initialized:
            self.set_state(measurement)
            return measurement, np.zeros(3)
        
        # 定义观测函数
        if measurement_function is None:
            measurement_function = self._linear_measurement
        
        # 通过观测函数传播 Sigma 点
        sigma_points_z = np.zeros((self.n_sigma, self.n_meas))
        for i in range(self.n_sigma):
            sigma_points_z[i] = measurement_function(self.sigma_points[i])
        
        # 预测观测均值
        z_pred = np.zeros(self.n_meas)
        for i in range(self.n_sigma):
            z_pred += self.Wm[i] * sigma_points_z[i]
        
        # 观测协方差
        S = self.R.copy()
        for i in range(self.n_sigma):
            diff = sigma_points_z[i] - z_pred
            S += self.Wc[i] * np.outer(diff, diff)
        
        # 状态 - 观测互协方差
        Pxz = np.zeros((self.n_state, self.n_meas))
        for i in range(self.n_sigma):
            diff_x = self.sigma_points[i] - self.state_pred
            diff_z = sigma_points_z[i] - z_pred
            Pxz += self.Wc[i] * np.outer(diff_x, diff_z)
        
        # 卡尔曼增益
        K = Pxz @ np.linalg.inv(S)
        
        # 状态更新
        innovation = measurement - z_pred
        self.state = self.state_pred + K @ innovation
        
        # 协方差更新
        self.P = self.P_pred - K @ S @ K.T
        
        # 确保协方差对称
        self.P = 0.5 * (self.P + self.P.T)
        
        return self.state[:3], self.state[3:]
    
    def _constant_velocity_model(self, state: np.ndarray, 
                                  control: Optional[np.ndarray]) -> np.ndarray:
        """恒速运动模型"""
        x, y, z, vx, vy, vz = state
        
        # 添加控制输入(加速度)
        if control is not None:
            ax, ay, az = control
            vx += ax * self.dt
            vy += ay * self.dt
            vz += az * self.dt
        
        x_new = x + vx * self.dt
        y_new = y + vy * self.dt
        z_new = z + vz * self.dt
        
        return np.array([x_new, y_new, z_new, vx, vy, vz])
    
    def _linear_measurement(self, state: np.ndarray) -> np.ndarray:
        """线性观测模型(只观测位置)"""
        return state[:3]
    
    def get_state(self) -> np.ndarray:
        return self.state.copy()
    
    def get_covariance(self) -> np.ndarray:
        return self.P.copy()
    
    def get_innovation(self, measurement: np.ndarray) -> np.ndarray:
        """计算创新(残差)"""
        return measurement - self.state[:3]
    
    def mahalanobis_distance(self, measurement: np.ndarray) -> float:
        """计算马氏距离(用于异常检测)"""
        innovation = self.get_innovation(measurement)
        
        # 创新协方差
        H = np.eye(3, 6)  # 观测矩阵
        S = H @ self.P @ H.T + self.R
        
        return np.sqrt(innovation @ np.linalg.inv(S) @ innovation)


class InteractingMultipleModel:
    """
    交互式多模型(IMM)滤波器
    
    用于处理运动模式切换(自由运动、声阱捕获、碰撞等)
    """
    
    def __init__(self, models: List[Dict], transition_matrix: np.ndarray):
        """
        models: 模型列表,每个模型包含:
                - filter: 卡尔曼滤波器实例
                - type: 模型类型
        transition_matrix: 模型转移概率矩阵 [n_models, n_models]
        """
        self.models = models
        self.n_models = len(models)
        self.mu = transition_matrix  # 转移概率矩阵
        
        # 模型概率(初始等概率)
        self.model_probs = np.ones(self.n_models) / self.n_models
        
        # 混合状态
        self.mixed_state = None
        self.mixed_cov = None
        
    def predict(self, control_input: Optional[np.ndarray] = None):
        """IMM 预测步骤"""
        # Step 1: 交互(混合)
        self._mixing()
        
        # Step 2: 各模型独立预测
        for i, model in enumerate(self.models):
            model['filter'].predict(control_input)
    
    def update(self, measurement: np.ndarray):
        """IMM 更新步骤"""
        # Step 3: 模式匹配滤波
        likelihoods = np.zeros(self.n_models)
        
        for i, model in enumerate(self.models):
            # 更新
            model['filter'].update(measurement)
            
            # 计算似然
            innovation = model['filter'].get_innovation(measurement)
            S = model['filter'].R  # 创新协方差
            
            # 高斯似然
            det_S = np.linalg.det(S)
            likelihoods[i] = (
                1.0 / np.sqrt((2*np.pi)**3 * det_S) *
                np.exp(-0.5 * innovation @ np.linalg.inv(S) @ innovation)
            )
        
        # Step 4: 概率更新
        self._update_probabilities(likelihoods)
        
        # Step 5: 组合估计
        self._combine_estimates()
    
    def _mixing(self):
        """计算混合概率并混合状态"""
        # 归一化常数
        c_j = np.zeros(self.n_models)
        
        for j in range(self.n_models):
            for i in range(self.n_models):
                c_j[j] += self.mu[i, j] * self.model_probs[i]
        
        # 混合概率
        mu_ij = np.zeros((self.n_models, self.n_models))
        for i in range(self.n_models):
            for j in range(self.n_models):
                mu_ij[i, j] = self.mu[i, j] * self.model_probs[i] / c_j[j]
        
        # 混合各模型状态
        for j in range(self.n_models):
            mixed_state = np.zeros(6)
            mixed_cov = np.zeros((6, 6))
            
            for i in range(self.n_models):
                x_i = self.models[i]['filter'].get_state()
                P_i = self.models[i]['filter'].get_covariance()
                
                mixed_state += mu_ij[i, j] * x_i
            
            for i in range(self.n_models):
                x_i = self.models[i]['filter'].get_state()
                P_i = self.models[i]['filter'].get_covariance()
                
                diff = x_i - mixed_state
                mixed_cov += mu_ij[i, j] * (P_i + np.outer(diff, diff))
            
            # 设置混合状态
            self.models[j]['filter'].state = mixed_state
            self.models[j]['filter'].P = mixed_cov
    
    def _update_probabilities(self, likelihoods: np.ndarray):
        """更新模型概率"""
        # 预测概率
        mu_pred = self.mu.T @ self.model_probs
        
        # 后验概率
        self.model_probs = likelihoods * mu_pred
        self.model_probs /= np.sum(self.model_probs)  # 归一化
    
    def _combine_estimates(self):
        """组合各模型估计"""
        combined_state = np.zeros(6)
        combined_cov = np.zeros((6, 6))
        
        for i in range(self.n_models):
            x_i = self.models[i]['filter'].get_state()
            combined_state += self.model_probs[i] * x_i
        
        for i in range(self.n_models):
            x_i = self.models[i]['filter'].get_state()
            P_i = self.models[i]['filter'].get_covariance()
            
            diff = x_i - combined_state
            combined_cov += self.model_probs[i] * (P_i + np.outer(diff, diff))
        
        self.mixed_state = combined_state
        self.mixed_cov = combined_cov
    
    def get_state(self) -> np.ndarray:
        """返回最可能模型的状态"""
        best_model = np.argmax(self.model_probs)
        return self.models[best_model]['filter'].get_state()
    
    def get_model_probabilities(self) -> np.ndarray:
        """返回各模型概率"""
        return self.model_probs.copy()

1.2 力场插值完整实现

python 复制代码
# ============================================================================
# 文件:tracking/force_field.py
# ============================================================================

import numpy as np
from scipy.interpolate import RegularGridInterpolator, RectBivariateSpline
from typing import Tuple, Optional, Callable
from dataclasses import dataclass

@dataclass
class ForceFieldConfig:
    """力场配置"""
    grid_min: np.ndarray
    grid_max: np.ndarray
    grid_size: Tuple[int, int, int]
    interpolation_method: str = 'cubic'  # 'linear', 'cubic', 'bspline'

class ForceField3D:
    """
    3D 力场存储与插值
    
    支持多种插值方法和快速查询
    """
    
    def __init__(self, config: ForceFieldConfig):
        self.grid_min = config.grid_min
        self.grid_max = config.grid_max
        self.grid_size = config.grid_size
        self.interpolation_method = config.interpolation_method
        
        # 计算网格间距
        self.grid_spacing = (
            (config.grid_max[0] - config.grid_min[0]) / (config.grid_size[0] - 1),
            (config.grid_max[1] - config.grid_min[1]) / (config.grid_size[1] - 1),
            (config.grid_max[2] - config.grid_min[2]) / (config.grid_size[2] - 1)
        )
        
        # 力场数据 [Nx, Ny, Nz, 3]
        self.forces = np.zeros((*config.grid_size, 3))
        
        # 插值器
        self.interpolator = None
        
    def set_force_field(self, forces: np.ndarray):
        """设置力场数据"""
        if forces.shape != (*self.grid_size, 3):
            raise ValueError(f"Force field shape mismatch: {forces.shape} vs {(*self.grid_size, 3)}")
        
        self.forces = forces.copy()
        self._build_interpolator()
    
    def _build_interpolator(self):
        """构建插值器"""
        # 网格坐标
        x = np.linspace(self.grid_min[0], self.grid_max[0], self.grid_size[0])
        y = np.linspace(self.grid_min[1], self.grid_max[1], self.grid_size[1])
        z = np.linspace(self.grid_min[2], self.grid_max[2], self.grid_size[2])
        
        if self.interpolation_method == 'linear':
            self.interpolator = RegularGridInterpolator(
                (x, y, z), self.forces,
                method='linear',
                bounds_error=False,
                fill_value=0.0
            )
        elif self.interpolation_method == 'cubic':
            # 对每个分量分别构建三次样条
            self.interpolators = []
            for dim in range(3):
                interp = RegularGridInterpolator(
                    (x, y, z), self.forces[..., dim],
                    method='cubic' if hasattr(RegularGridInterpolator, 'cubic') else 'linear',
                    bounds_error=False,
                    fill_value=0.0
                )
                self.interpolators.append(interp)
    
    def interpolate(self, position: np.ndarray) -> np.ndarray:
        """
        插值计算指定位置的力
        
        position: [N, 3] 或 [3]
        """
        position = np.atleast_2d(position)
        
        if self.interpolation_method == 'linear':
            return self.interpolator(position)
        elif self.interpolation_method == 'cubic':
            result = np.zeros((len(position), 3))
            for dim in range(3):
                result[:, dim] = self.interpolators[dim](position)
            return result
    
    def interpolate_with_gradient(self, position: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        """
        插值并计算梯度
        
        返回:(force, gradient)
        gradient: [3, 3] Jacobian 矩阵 ∂F/∂x
        """
        force = self.interpolate(position)
        
        # 数值梯度
        eps = self.grid_spacing[0] * 0.01
        
        gradient = np.zeros((3, 3))
        
        for dim in range(3):
            delta = np.zeros(3)
            delta[dim] = eps
            
            force_plus = self.interpolate(position + delta)
            force_minus = self.interpolate(position - delta)
            
            gradient[:, dim] = (force_plus - force_minus) / (2 * eps)
        
        return force, gradient
    
    def find_trap_centers(self, threshold: float = 0.5) -> np.ndarray:
        """
        找到声阱中心(力场零点)
        """
        # 力的大小
        force_magnitude = np.linalg.norm(self.forces, axis=-1)
        
        # 找到局部最小值
        from scipy.ndimage import minimum_filter
        
        local_min = (force_magnitude == minimum_filter(force_magnitude, size=3))
        
        # 阈值筛选
        trap_mask = local_min & (force_magnitude < threshold * force_magnitude.max())
        
        # 提取阱中心坐标
        trap_indices = np.argwhere(trap_mask)
        
        # 转换为物理坐标
        trap_centers = np.zeros((len(trap_indices), 3))
        for i, idx in enumerate(trap_indices):
            trap_centers[i] = self._index_to_position(idx)
        
        return trap_centers
    
    def _index_to_position(self, idx: np.ndarray) -> np.ndarray:
        """网格索引转物理坐标"""
        return np.array([
            self.grid_min[0] + idx[0] * self.grid_spacing[0],
            self.grid_min[1] + idx[1] * self.grid_spacing[1],
            self.grid_min[2] + idx[2] * self.grid_spacing[2]
        ])
    
    def _position_to_index(self, pos: np.ndarray) -> np.ndarray:
        """物理坐标转网格索引"""
        return np.array([
            int((pos[0] - self.grid_min[0]) / self.grid_spacing[0]),
            int((pos[1] - self.grid_min[1]) / self.grid_spacing[1]),
            int((pos[2] - self.grid_min[2]) / self.grid_spacing[2])
        ], dtype=int)


class OctreeForceField:
    """
    八叉树力场(加速稀疏力场查询)
    
    适用于力场数据稀疏分布的情况
    """
    
    class OctreeNode:
        def __init__(self, center: np.ndarray, size: float, depth: int = 0):
            self.center = center.copy()
            self.size = size
            self.depth = depth
            self.children = [None] * 8
            self.is_leaf = True
            self.force_value = np.zeros(3)
            self.samples = []
    
    def __init__(self, bounds_min: np.ndarray, bounds_max: np.ndarray, max_depth: int = 8):
        self.bounds_min = bounds_min.copy()
        self.bounds_max = bounds_max.copy()
        self.max_depth = max_depth
        
        # 创建根节点
        center = (bounds_min + bounds_max) / 2.0
        size = (bounds_max - bounds_min).max() / 2.0
        self.root = self.OctreeNode(center, size)
    
    def insert(self, position: np.ndarray, force: np.ndarray):
        """插入力场样本点"""
        self._insert_recursive(self.root, position, force)
    
    def _insert_recursive(self, node: 'OctreeNode', pos: np.ndarray, force: np.ndarray):
        """递归插入"""
        if node.is_leaf:
            if node.depth >= self.max_depth or len(node.samples) < 10:
                node.samples.append((pos.copy(), force.copy()))
                # 更新平均力值
                node.force_value = np.mean([s[1] for s in node.samples], axis=0)
            else:
                # 细分
                self._subdivide(node)
                self._insert_recursive(node, pos, force)
        else:
            # 找到对应的子节点
            idx = self._compute_child_index(node, pos)
            self._insert_recursive(node.children[idx], pos, force)
    
    def _subdivide(self, node: 'OctreeNode'):
        """细分节点"""
        if node.depth >= self.max_depth:
            return
        
        node.is_leaf = False
        half_size = node.size / 2.0
        
        for i in range(8):
            dx = 1 if (i & 1) else -1
            dy = 1 if (i & 2) else -1
            dz = 1 if (i & 4) else -1
            
            child_center = node.center + np.array([dx, dy, dz]) * half_size / 2.0
            node.children[i] = self.OctreeNode(child_center, half_size, node.depth + 1)
        
        # 重新分配样本
        for pos, force in node.samples:
            idx = self._compute_child_index(node, pos)
            node.children[idx].samples.append((pos.copy(), force.copy()))
        
        node.samples = []
    
    def _compute_child_index(self, node: 'OctreeNode', pos: np.ndarray) -> int:
        """计算子节点索引"""
        idx = 0
        if pos[0] > node.center[0]: idx |= 1
        if pos[1] > node.center[1]: idx |= 2
        if pos[2] > node.center[2]: idx |= 4
        return idx
    
    def interpolate(self, position: np.ndarray) -> np.ndarray:
        """插值查询"""
        leaf = self._find_leaf(position)
        
        if leaf is None:
            return np.zeros(3)
        
        if leaf.is_leaf:
            # 逆距离加权插值
            return self._inverse_distance_weighting(position, leaf)
        else:
            return leaf.force_value
    
    def _find_leaf(self, pos: np.ndarray) -> 'OctreeNode':
        """查找包含位置的叶节点"""
        node = self.root
        
        while not node.is_leaf:
            idx = self._compute_child_index(node, pos)
            if node.children[idx] is None:
                return None
            node = node.children[idx]
        
        return node
    
    def _inverse_distance_weighting(self, pos: np.ndarray, leaf: 'OctreeNode') -> np.ndarray:
        """逆距离加权插值"""
        if len(leaf.samples) == 0:
            return np.zeros(3)
        
        force = np.zeros(3)
        weight_sum = 0.0
        
        for sample_pos, sample_force in leaf.samples:
            dist = np.linalg.norm(pos - sample_pos)
            if dist < 1e-10:
                return sample_force
            
            weight = 1.0 / dist**2
            force += weight * sample_force
            weight_sum += weight
        
        return force / weight_sum

二、完整追踪系统

2.1 多目标追踪器

python 复制代码
# ============================================================================
# 文件:tracking/multi_target_tracker.py
# ============================================================================

import numpy as np
from typing import List, Dict, Optional, Tuple
from dataclasses import dataclass, field
from scipy.optimize import linear_sum_assignment
from collections import defaultdict

@dataclass
class TrackedObject:
    """追踪目标"""
    id: int
    filter: object  # 卡尔曼滤波器
    age: int = 0
    consecutive_misses: int = 0
    last_position: np.ndarray = field(default_factory=lambda: np.zeros(3))
    is_confirmed: bool = False
    trajectory: List[np.ndarray] = field(default_factory=list)

@dataclass
class TrackerConfig:
    """追踪器配置"""
    dt: float = 0.033
    new_track_threshold: float = 50.0  # 新轨迹马氏距离阈值
    confirm_threshold: int = 3  # 确认轨迹所需帧数
    delete_threshold: int = 10  # 删除轨迹的连续丢失帧数
    association_method: str = 'hungarian'  # 'hungarian' or 'greedy'

class MultiTargetTracker:
    """
    多目标追踪器
    
    支持:
    - 多假设追踪
    - 轨迹管理(创建、确认、删除)
    - 数据关联(匈牙利算法)
    - 遮挡处理
    """
    
    def __init__(self, config: TrackerConfig, filter_factory: callable):
        self.config = config
        self.filter_factory = filter_factory
        
        self.tracks: List[TrackedObject] = []
        self.next_track_id = 0
        
        # 统计信息
        self.stats = {
            'n_tracks_created': 0,
            'n_tracks_confirmed': 0,
            'n_tracks_deleted': 0,
            'n_associations': 0
        }
    
    def process_frame(self, measurements: np.ndarray, 
                      timestamp: Optional[float] = None) -> List[TrackedObject]:
        """
        处理一帧测量数据
        
        measurements: [N, 3] 测量位置
        
        返回:更新后的追踪目标列表
        """
        # Step 1: 预测所有轨迹
        for track in self.tracks:
            track.filter.predict()
        
        # Step 2: 数据关联
        associations = self._associate_measurements(measurements)
        
        # Step 3: 更新已关联的轨迹
        used_measurements = set()
        
        for track_idx, meas_idx in associations:
            track = self.tracks[track_idx]
            measurement = measurements[meas_idx]
            
            # 更新滤波器
            track.filter.update(measurement)
            
            # 更新状态
            track.last_position = measurement
            track.age += 1
            track.consecutive_misses = 0
            track.trajectory.append(measurement.copy())
            
            # 确认轨迹
            if track.age >= self.config.confirm_threshold and not track.is_confirmed:
                track.is_confirmed = True
                self.stats['n_tracks_confirmed'] += 1
            
            used_measurements.add(meas_idx)
        
        # Step 4: 更新未关联的轨迹
        for i, track in enumerate(self.tracks):
            if i not in [a[0] for a in associations]:
                track.consecutive_misses += 1
        
        # Step 5: 创建新轨迹(未关联的测量)
        for j in range(len(measurements)):
            if j not in used_measurements:
                self._create_new_track(measurements[j])
        
        # Step 6: 删除老轨迹
        self._delete_old_tracks()
        
        return self.tracks
    
    def _associate_measurements(self, measurements: np.ndarray) -> List[Tuple[int, int]]:
        """
        数据关联
        
        使用匈牙利算法最小化总马氏距离
        """
        if len(self.tracks) == 0 or len(measurements) == 0:
            return []
        
        # 构建代价矩阵(马氏距离)
        n_tracks = len(self.tracks)
        n_meas = len(measurements)
        
        cost_matrix = np.zeros((n_tracks, n_meas))
        
        for i, track in enumerate(self.tracks):
            for j in range(n_meas):
                # 马氏距离
                d = track.filter.mahalanobis_distance(measurements[j])
                cost_matrix[i, j] = d
        
        # 门控:超过阈值的关联设为无穷大
        cost_matrix[cost_matrix > self.config.new_track_threshold] = 1e10
        
        # 匈牙利算法
        if self.config.association_method == 'hungarian':
            row_ind, col_ind = linear_sum_assignment(cost_matrix)
            
            # 过滤掉高代价关联
            associations = []
            for i, j in zip(row_ind, col_ind):
                if cost_matrix[i, j] < 1e9:
                    associations.append((i, j))
            
            return associations
        else:
            # 贪心关联
            return self._greedy_association(cost_matrix)
    
    def _greedy_association(self, cost_matrix: np.ndarray) -> List[Tuple[int, int]]:
        """贪心关联(快速但非最优)"""
        n_tracks, n_meas = cost_matrix.shape
        associations = []
        
        used_tracks = set()
        used_meas = set()
        
        # 按代价排序所有可能的关联
        all_pairs = []
        for i in range(n_tracks):
            for j in range(n_meas):
                all_pairs.append((cost_matrix[i, j], i, j))
        
        all_pairs.sort()
        
        for cost, i, j in all_pairs:
            if cost > self.config.new_track_threshold:
                break
            if i not in used_tracks and j not in used_meas:
                associations.append((i, j))
                used_tracks.add(i)
                used_meas.add(j)
        
        return associations
    
    def _create_new_track(self, measurement: np.ndarray):
        """创建新轨迹"""
        filter_obj = self.filter_factory()
        filter_obj.set_state(measurement)
        
        track = TrackedObject(
            id=self.next_track_id,
            filter=filter_obj,
            age=1,
            last_position=measurement.copy(),
            trajectory=[measurement.copy()]
        )
        
        self.tracks.append(track)
        self.next_track_id += 1
        self.stats['n_tracks_created'] += 1
    
    def _delete_old_tracks(self):
        """删除长时间未检测到的轨迹"""
        before = len(self.tracks)
        
        self.tracks = [
            track for track in self.tracks
            if track.consecutive_misses < self.config.delete_threshold
        ]
        
        self.stats['n_tracks_deleted'] += before - len(self.tracks)
    
    def get_confirmed_tracks(self) -> List[TrackedObject]:
        """获取已确认的轨迹"""
        return [t for t in self.tracks if t.is_confirmed]
    
    def get_all_trajectories(self) -> Dict[int, List[np.ndarray]]:
        """获取所有轨迹历史"""
        return {t.id: t.trajectory for t in self.tracks}

三、GPU 加速实现

3.1 CUDA 批量卡尔曼滤波

cuda 复制代码
// ============================================================================
// 文件:tracking/cuda/kalman_cuda.cu
// ============================================================================

#include <cuda_runtime.h>
#include <device_launch_parameters.h>

// ============================================================================
// 批量卡尔曼滤波预测 Kernel
// ============================================================================

__global__ void kalmanPredictBatch(
    double* states,        // [n_tracks, 6]
    double* covariances,   // [n_tracks, 36]
    const double* F,       // [6, 6] 状态转移矩阵
    const double* Q,       // [6, 6] 过程噪声
    const double* B,       // [6, 3] 控制矩阵
    const double* controls,// [n_tracks, 3] 控制输入
    int n_tracks
) {
    int track_id = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (track_id >= n_tracks) return;
    
    int state_offset = track_id * 6;
    int cov_offset = track_id * 36;
    int ctrl_offset = track_id * 3;
    
    // 加载状态
    double x[6];
    for (int i = 0; i < 6; i++) {
        x[i] = states[state_offset + i];
    }
    
    // 加载协方差(上三角)
    double P[36];
    for (int i = 0; i < 36; i++) {
        P[i] = covariances[cov_offset + i];
    }
    
    // 加载控制
    double u[3];
    for (int i = 0; i < 3; i++) {
        u[i] = controls[ctrl_offset + i];
    }
    
    // 状态预测:x = F * x + B * u
    double x_pred[6] = {0};
    for (int i = 0; i < 6; i++) {
        for (int j = 0; j < 6; j++) {
            x_pred[i] += F[i * 6 + j] * x[j];
        }
        x_pred[i] += B[i * 3 + 0] * u[0] + B[i * 3 + 1] * u[1] + B[i * 3 + 2] * u[2];
    }
    
    // 协方差预测:P = F * P * F^T + Q
    double P_pred[36] = {0};
    for (int i = 0; i < 6; i++) {
        for (int j = 0; j < 6; j++) {
            for (int k = 0; k < 6; k++) {
                for (int l = 0; l < 6; l++) {
                    P_pred[i * 6 + j] += F[i * 6 + k] * P[k * 6 + l] * F[j * 6 + l];
                }
            }
            P_pred[i * 6 + j] += Q[i * 6 + j];
        }
    }
    
    // 写回
    for (int i = 0; i < 6; i++) {
        states[state_offset + i] = x_pred[i];
    }
    for (int i = 0; i < 36; i++) {
        covariances[cov_offset + i] = P_pred[i];
    }
}

// ============================================================================
// 批量卡尔曼滤波更新 Kernel
// ============================================================================

__global__ void kalmanUpdateBatch(
    double* states,
    double* covariances,
    const double* H,       // [3, 6] 观测矩阵
    const double* R,       // [3, 3] 观测噪声
    const double* measurements, // [n_tracks, 3]
    int n_tracks
) {
    int track_id = blockIdx.x * blockDim.x + threadIdx.x;
    
    if (track_id >= n_tracks) return;
    
    int state_offset = track_id * 6;
    int cov_offset = track_id * 36;
    int meas_offset = track_id * 3;
    
    // 加载状态
    double x[6];
    for (int i = 0; i < 6; i++) {
        x[i] = states[state_offset + i];
    }
    
    // 加载协方差
    double P[36];
    for (int i = 0; i < 36; i++) {
        P[i] = covariances[cov_offset + i];
    }
    
    // 加载测量
    double z[3];
    for (int i = 0; i < 3; i++) {
        z[i] = measurements[meas_offset + i];
    }
    
    // 预测测量:z_pred = H * x
    double z_pred[3] = {0};
    for (int i = 0; i < 3; i++) {
        for (int j = 0; j < 6; j++) {
            z_pred[i] += H[i * 6 + j] * x[j];
        }
    }
    
    // 创新:y = z - z_pred
    double y[3];
    for (int i = 0; i < 3; i++) {
        y[i] = z[i] - z_pred[i];
    }
    
    // 创新协方差:S = H * P * H^T + R
    double S[9] = {0};
    for (int i = 0; i < 3; i++) {
        for (int j = 0; j < 3; j++) {
            for (int k = 0; k < 6; k++) {
                for (int l = 0; l < 6; l++) {
                    S[i * 3 + j] += H[i * 6 + k] * P[k * 6 + l] * H[j * 6 + l];
                }
            }
            S[i * 3 + j] += R[i * 3 + j];
        }
    }
    
    // S 的逆(3x3 矩阵求逆)
    double det_S = S[0] * (S[4]*S[8] - S[5]*S[7]) -
                   S[1] * (S[3]*S[8] - S[5]*S[6]) +
                   S[2] * (S[3]*S[7] - S[4]*S[6]);
    
    double S_inv[9];
    S_inv[0] = (S[4]*S[8] - S[5]*S[7]) / det_S;
    S_inv[1] = (S[2]*S[7] - S[1]*S[8]) / det_S;
    S_inv[2] = (S[1]*S[5] - S[2]*S[4]) / det_S;
    S_inv[3] = (S[5]*S[6] - S[3]*S[8]) / det_S;
    S_inv[4] = (S[0]*S[8] - S[2]*S[6]) / det_S;
    S_inv[5] = (S[3]*S[2] - S[0]*S[5]) / det_S;
    S_inv[6] = (S[3]*S[7] - S[4]*S[6]) / det_S;
    S_inv[7] = (S[1]*S[6] - S[0]*S[7]) / det_S;
    S_inv[8] = (S[0]*S[4] - S[1]*S[3]) / det_S;
    
    // 卡尔曼增益:K = P * H^T * S^{-1}
    double K[18] = {0};
    for (int i = 0; i < 6; i++) {
        for (int j = 0; j < 3; j++) {
            for (int k = 0; k < 6; k++) {
                for (int l = 0; l < 3; l++) {
                    K[i * 3 + j] += P[i * 6 + k] * H[l * 6 + k] * S_inv[l * 3 + j];
                }
            }
        }
    }
    
    // 状态更新:x = x + K * y
    for (int i = 0; i < 6; i++) {
        for (int j = 0; j < 3; j++) {
            x[i] += K[i * 3 + j] * y[j];
        }
    }
    
    // 协方差更新:P = (I - K * H) * P
    double KH[36] = {0};
    for (int i = 0; i < 6; i++) {
        for (int j = 0; j < 6; j++) {
            for (int k = 0; k < 3; k++) {
                KH[i * 6 + j] += K[i * 3 + k] * H[k * 6 + j];
            }
        }
    }
    
    double I_KH[36];
    for (int i = 0; i < 36; i++) {
        I_KH[i] = (i % 7 == 0) ? 1.0 - KH[i] : -KH[i];
    }
    
    for (int i = 0; i < 36; i++) {
        P[i] = 0;
        int row = i / 6;
        int col = i % 6;
        for (int k = 0; k < 6; k++) {
            P[i] += I_KH[row * 6 + k] * P[k * 6 + col];
        }
    }
    
    // 写回
    for (int i = 0; i < 6; i++) {
        states[state_offset + i] = x[i];
    }
    for (int i = 0; i < 36; i++) {
        covariances[cov_offset + i] = P[i];
    }
}

// ============================================================================
// Python 调用接口
// ============================================================================

/*
import cupy as cp

class GPUKalmanTracker:
    def __init__(self, n_tracks, dt=0.033):
        self.n_tracks = n_tracks
        self.dt = dt
        
        # 状态转移矩阵(恒速模型)
        F = np.eye(6)
        F[0:3, 3:6] = dt * np.eye(3)
        
        # 控制矩阵
        B = np.zeros((6, 3))
        B[0:3, :] = 0.5 * dt**2 * np.eye(3)
        B[3:6, :] = dt * np.eye(3)
        
        # 观测矩阵
        H = np.zeros((3, 6))
        H[:, 0:3] = np.eye(3)
        
        # 转移到 GPU
        self.F = cp.asarray(F)
        self.B = cp.asarray(B)
        self.H = cp.asarray(H)
        
        # 分配状态和协方差
        self.states = cp.zeros((n_tracks, 6))
        self.covariances = cp.zeros((n_tracks, 36))
        
    def predict(self, controls=None):
        if controls is None:
            controls = cp.zeros((self.n_tracks, 3))
        
        threads_per_block = 256
        n_blocks = (self.n_tracks + threads_per_block - 1) // threads_per_block
        
        kalmanPredictBatch[n_blocks, threads_per_block](
            self.states, self.covariances,
            self.F, self.Q, self.B, controls,
            self.n_tracks
        )
    
    def update(self, measurements):
        threads_per_block = 256
        n_blocks = (self.n_tracks + threads_per_block - 1) // threads_per_block
        
        kalmanUpdateBatch[n_blocks, threads_per_block](
            self.states, self.covariances,
            self.H, self.R, measurements,
            self.n_tracks
        )
*/

四、验证与性能基准

4.1 完整测试套件

python 复制代码
# ============================================================================
# 文件:tests/test_tracking.py
# ============================================================================

import pytest
import numpy as np
from tracking.kalman_filters import UnscentedKalmanFilter3D, FilterConfig, FilterType
from tracking.force_field import ForceField3D, ForceFieldConfig
from tracking.multi_target_tracker import MultiTargetTracker, TrackerConfig

class TestKalmanFilter:
    """卡尔曼滤波器测试"""
    
    @pytest.fixture
    def ukf_config(self):
        return FilterConfig(
            filter_type=FilterType.UKF,
            dt=0.033,
            process_noise_std=np.array([10.0, 10.0, 10.0, 1.0, 1.0, 1.0]),
            measurement_noise_std=np.array([10.0, 10.0, 10.0])
        )
    
    def test_ukf_linear_tracking(self, ukf_config):
        """测试 UKF 线性追踪"""
        ukf = UnscentedKalmanFilter3D(ukf_config)
        
        # 真实轨迹(匀速直线运动)
        true_positions = []
        measurements = []
        
        pos = np.array([0.0, 0.0, 0.0])
        vel = np.array([100e-6, 0.0, 0.0])  # 100 μm/s
        
        for i in range(100):
            true_positions.append(pos.copy())
            
            # 带噪声的测量
            meas = pos + np.random.normal(0, 10e-6, 3)
            measurements.append(meas)
            
            # 更新
            if i == 0:
                ukf.set_state(meas, vel)
            else:
                ukf.predict()
                ukf.update(meas)
            
            # 更新真实位置
            pos = pos + vel * ukf_config.dt
        
        # 评估精度
        true_positions = np.array(true_positions)
        estimates = np.array([ukf.get_state()[:3] for _ in range(100)])
        
        rmse = np.sqrt(np.mean((true_positions - estimates)**2))
        
        assert rmse < 20e-6, f"RMSE {rmse*1e6:.1f}μm 超过 20μm"
    
    def test_ukf_nonlinear_tracking(self, ukf_config):
        """测试 UKF 非线性追踪(曲线运动)"""
        # 实现非线性运动模型测试
        pass

class TestForceField:
    """力场插值测试"""
    
    def test_trilinear_interpolation(self):
        """测试三线性插值精度"""
        config = ForceFieldConfig(
            grid_min=np.array([0.0, 0.0, 0.0]),
            grid_max=np.array([1.0, 1.0, 1.0]),
            grid_size=(11, 11, 11),
            interpolation_method='linear'
        )
        
        field = ForceField3D(config)
        
        # 设置已知力场(线性梯度)
        forces = np.zeros((11, 11, 11, 3))
        for i in range(11):
            for j in range(11):
                for k in range(11):
                    forces[i, j, k] = np.array([i/10, j/10, k/10])
        
        field.set_force_field(forces)
        
        # 测试插值
        test_pos = np.array([0.35, 0.65, 0.85])
        interpolated = field.interpolate(test_pos)
        
        expected = test_pos  # 线性场,插值应精确
        
        error = np.linalg.norm(interpolated - expected)
        assert error < 0.01, f"插值误差 {error} 过大"

class TestMultiTargetTracker:
    """多目标追踪器测试"""
    
    def test_data_association(self):
        """测试数据关联正确性"""
        config = TrackerConfig(
            dt=0.033,
            new_track_threshold=50.0,
            confirm_threshold=3,
            delete_threshold=10
        )
        
        def filter_factory():
            return UnscentedKalmanFilter3D(FilterConfig())
        
        tracker = MultiTargetTracker(config, filter_factory)
        
        # 模拟两个目标的测量
        true_positions = [
            np.array([100e-6, 100e-6, 100e-6]),
            np.array([500e-6, 500e-6, 500e-6])
        ]
        
        for frame in range(50):
            # 生成测量(带噪声)
            measurements = np.array([
                true_positions[0] + np.random.normal(0, 10e-6, 3),
                true_positions[1] + np.random.normal(0, 10e-6, 3)
            ])
            
            tracks = tracker.process_frame(measurements)
        
        # 验证 ID 保持
        confirmed = tracker.get_confirmed_tracks()
        assert len(confirmed) == 2, "应追踪到 2 个目标"

五、结论

细胞球运动追踪系统的完整工程实现,包括:

  1. 完整卡尔曼滤波库:KF、EKF、UKF、IMM 四种滤波器
  2. 力场插值系统:规则网格、八叉树、多种插值方法
  3. 多目标追踪器:匈牙利算法、轨迹管理、遮挡处理
  4. GPU 加速实现:批量卡尔曼滤波 CUDA kernel
  5. 完整测试套件:单元测试、性能基准
相关推荐
汀、人工智能2 小时前
[特殊字符] 第105课:除自身以外数组的乘积
数据结构·算法·数据库架构·数组·前缀积·除自身以外数组的乘积
minji...2 小时前
Linux 多线程(三)线程控制,线程终止,线程中的异常问题
linux·运维·服务器·开发语言·网络·算法
We་ct2 小时前
LeetCode 137. 只出现一次的数字 II:从基础到最优的两种解法详解
前端·数据结构·算法·leetcode·typescript·位运算
CappuccinoRose2 小时前
排序算法和查找算法 - 软考备战(十五)
数据结构·python·算法·排序算法·查找算法
旖-旎2 小时前
分治(交易逆序对的总数)(6)
c++·算法·leetcode·排序算法·归并排序
北顾笙9802 小时前
day14-数据结构力扣
数据结构·算法·leetcode
Ln5x9qZC22 小时前
尾递归与Continuation
算法
一路向北he2 小时前
esp32库依赖
c语言·c++·算法
老四啊laosi2 小时前
[双指针] 6. 查找总价为目标值的两个商品
算法·力扣·总价为目标值得两商品