03-深度学习基础:训练技巧

训练技巧:提升模型性能与训练效率

一、为什么需要训练技巧?

1.1 深度学习训练的挑战

python 复制代码
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, CosineAnnealingLR
import warnings
warnings.filterwarnings('ignore')

print("=" * 60)
print("深度学习训练的挑战")
print("=" * 60)

challenges = [
    ("学习率选择", "太大不收敛,太小收敛慢"),
    ("梯度问题", "梯度消失/爆炸"),
    ("显存不足", "模型太大,batch太大"),
    ("训练速度", "单卡训练慢"),
    ("过拟合", "验证集性能下降")
]

print("\n📊 常见训练挑战:")
for challenge, solution in challenges:
    print(f"   • {challenge}: {solution}")

# 可视化学习率影响
def visualize_lr_impact():
    """可视化学习率对收敛的影响"""
    
    x = np.linspace(-2, 2, 100)
    y = x ** 2
    
    plt.figure(figsize=(12, 4))
    
    # 不同学习率的梯度下降路径
    start = 1.5
    learning_rates = [0.01, 0.1, 0.5, 1.2]
    colors = ['blue', 'green', 'orange', 'red']
    
    for lr, color in zip(learning_rates, colors):
        x_curr = start
        path_x = [x_curr]
        path_y = [x_curr ** 2]
        
        for _ in range(10):
            grad = 2 * x_curr
            x_curr = x_curr - lr * grad
            path_x.append(x_curr)
            path_y.append(x_curr ** 2)
        
        plt.plot(path_x, path_y, 'o-', color=color, label=f'lr={lr}', alpha=0.7)
    
    plt.plot(x, y, 'k-', linewidth=2, label='损失函数')
    plt.xlabel('参数')
    plt.ylabel('损失')
    plt.title('学习率对梯度下降的影响')
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.tight_layout()
    plt.show()

visualize_lr_impact()

print("\n💡 本章将介绍的训练技巧:")
print("   1. 学习率调度 - 动态调整学习率")
print("   2. 梯度裁剪 - 防止梯度爆炸")
print("   3. 混合精度训练 - 节省显存")
print("   4. TensorBoard可视化 - 监控训练过程")

二、学习率调度

2.1 学习率调度器对比

python 复制代码
def lr_schedulers_demo():
    """学习率调度器演示"""
    
    print("\n" + "=" * 60)
    print("学习率调度器")
    print("=" * 60)
    
    # 模拟训练过程
    epochs = 100
    initial_lr = 0.1
    
    # 不同调度器
    schedulers = {
        'StepLR (step=30)': lambda e: initial_lr * (0.1 ** (e // 30)),
        'ReduceLROnPlateau': None,  # 需要验证损失
        'CosineAnnealing': lambda e: initial_lr * (1 + np.cos(np.pi * e / epochs)) / 2,
        'Exponential': lambda e: initial_lr * (0.95 ** e),
        'Constant': lambda e: initial_lr
    }
    
    # 计算学习率变化
    lr_values = {name: [] for name in schedulers}
    for epoch in range(epochs):
        for name, scheduler_fn in schedulers.items():
            if scheduler_fn is not None:
                lr = scheduler_fn(epoch)
                lr_values[name].append(lr)
    
    # 模拟ReduceLROnPlateau(当验证损失不再下降时降低学习率)
    lr = initial_lr
    lr_values['ReduceLROnPlateau'] = []
    patience = 10
    best_loss = float('inf')
    wait = 0
    
    for epoch in range(epochs):
        # 模拟验证损失(先下降后上升)
        val_loss = 0.5 * np.exp(-epoch/30) + 0.05 * np.sin(epoch/5) + 0.1 * (epoch/100)**2
        
        lr_values['ReduceLROnPlateau'].append(lr)
        
        if val_loss < best_loss:
            best_loss = val_loss
            wait = 0
        else:
            wait += 1
            if wait >= patience:
                lr *= 0.5
                wait = 0
    
    # 可视化
    plt.figure(figsize=(14, 6))
    
    colors = ['blue', 'green', 'red', 'purple', 'orange']
    for (name, lrs), color in zip(lr_values.items(), colors):
        plt.plot(lrs, label=name, linewidth=2, color=color)
    
    plt.xlabel('Epoch')
    plt.ylabel('Learning Rate')
    plt.title('不同学习率调度策略对比')
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.yscale('log')
    plt.tight_layout()
    plt.show()
    
    print("\n📊 调度器说明:")
    print("   StepLR: 每隔固定步数降低学习率")
    print("   ReduceLROnPlateau: 验证损失不改善时降低")
    print("   CosineAnnealing: 余弦退火")
    print("   Exponential: 指数衰减")

lr_schedulers_demo()

2.2 PyTorch实现

python 复制代码
def pytorch_lr_schedulers():
    """PyTorch学习率调度器使用"""
    
    print("\n" + "=" * 60)
    print("PyTorch学习率调度器")
    print("=" * 60)
    
    # 创建模型和优化器
    model = nn.Linear(10, 2)
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    
    # 1. StepLR
    scheduler_step = StepLR(optimizer, step_size=30, gamma=0.1)
    print("StepLR: 每30个epoch将学习率乘以0.1")
    
    # 2. ReduceLROnPlateau
    scheduler_plateau = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, 
                                          patience=10, verbose=True)
    print("ReduceLROnPlateau: 验证损失10个epoch不下降时,学习率减半")
    
    # 3. CosineAnnealingLR
    scheduler_cosine = CosineAnnealingLR(optimizer, T_max=100, eta_min=1e-6)
    print("CosineAnnealingLR: 余弦退火,从初始学习率降到eta_min")
    
    # 使用示例
    print("\n💡 使用示例:")
    print("""
    # StepLR
    for epoch in range(epochs):
        train(...)
        scheduler.step()
    
    # ReduceLROnPlateau
    for epoch in range(epochs):
        train(...)
        val_loss = validate(...)
        scheduler.step(val_loss)
    
    # CosineAnnealing
    for epoch in range(epochs):
        train(...)
        scheduler.step()
    """)

pytorch_lr_schedulers()

三、梯度裁剪

3.1 梯度爆炸与裁剪

python 复制代码
def gradient_clipping_demo():
    """梯度裁剪演示"""
    
    print("\n" + "=" * 60)
    print("梯度裁剪")
    print("=" * 60)
    
    # 模拟梯度爆炸
    np.random.seed(42)
    epochs = 50
    
    # 正常梯度
    normal_grads = np.random.randn(epochs) * 0.5
    # 爆炸梯度
    exploding_grads = normal_grads.copy()
    exploding_grads[10] = 50
    exploding_grads[25] = -40
    exploding_grads[40] = 60
    
    # 裁剪后的梯度
    max_norm = 5.0
    clipped_grads = np.clip(exploding_grads, -max_norm, max_norm)
    
    plt.figure(figsize=(12, 5))
    
    plt.subplot(1, 2, 1)
    plt.plot(exploding_grads, 'ro-', label='原始梯度(有爆炸)', alpha=0.7)
    plt.plot(clipped_grads, 'b--', label='裁剪后梯度', alpha=0.7)
    plt.axhline(y=max_norm, color='g', linestyle='--', label=f'上限 {max_norm}')
    plt.axhline(y=-max_norm, color='g', linestyle='--', label=f'下限 -{max_norm}')
    plt.xlabel('迭代步')
    plt.ylabel('梯度值')
    plt.title('梯度裁剪效果')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    # 梯度范数变化
    plt.subplot(1, 2, 2)
    norm_original = np.abs(exploding_grads)
    norm_clipped = np.abs(clipped_grads)
    plt.plot(norm_original, 'ro-', label='原始梯度范数', alpha=0.7)
    plt.plot(norm_clipped, 'bs-', label='裁剪后梯度范数', alpha=0.7)
    plt.axhline(y=max_norm, color='g', linestyle='--', label=f'裁剪阈值 {max_norm}')
    plt.xlabel('迭代步')
    plt.ylabel('梯度范数')
    plt.title('梯度范数变化')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    plt.tight_layout()
    plt.show()
    
    # PyTorch梯度裁剪
    print("\n📐 PyTorch梯度裁剪:")
    print("""
    # 裁剪整个模型的梯度范数
    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.0)
    
    # 裁剪单个参数的梯度值
    torch.nn.utils.clip_grad_value_(model.parameters(), clip_value=1.0)
    
    # 在backward之后,optimizer.step()之前使用
    loss.backward()
    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.0)
    optimizer.step()
    """)
    
    print("\n💡 梯度裁剪的作用:")
    print("   1. 防止梯度爆炸导致训练不稳定")
    print("   2. 允许使用更大的学习率")
    print("   3. 提高训练的鲁棒性")

gradient_clipping_demo()

四、混合精度训练(AMP)

4.1 AMP原理与优势

python 复制代码
def amp_demo():
    """混合精度训练演示"""
    
    print("\n" + "=" * 60)
    print("混合精度训练 (AMP)")
    print("=" * 60)
    
    # 精度对比
    precisions = {
        'FP32 (单精度)': 32,
        'FP16 (半精度)': 16,
    }
    
    print("\n📊 精度对比:")
    for name, bits in precisions.items():
        print(f"   {name}: {bits}位")
    
    print("\n📐 AMP原理:")
    print("   1. 前向传播使用FP16加速计算")
    print("   2. 损失缩放防止梯度下溢")
    print("   3. 主权重保持FP32精度")
    
    # 模拟显存节省
    model_size_gb = 1.0
    fp32_memory = model_size_gb * 4  # 每参数4字节
    fp16_memory = model_size_gb * 2  # 每参数2字节
    
    plt.figure(figsize=(10, 5))
    
    models = ['FP32模型', 'FP16模型', 'FP32+梯度', 'FP16+梯度']
    memory = [fp32_memory, fp16_memory, fp32_memory * 2, fp16_memory * 2]
    colors = ['lightcoral', 'lightgreen', 'lightcoral', 'lightgreen']
    
    bars = plt.bar(models, memory, color=colors)
    plt.ylabel('显存占用 (GB)')
    plt.title('混合精度训练显存节省')
    
    for bar, mem in zip(bars, memory):
        plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.05,
                f'{mem:.1f}GB', ha='center', va='bottom')
    
    plt.tight_layout()
    plt.show()
    
    # PyTorch AMP使用
    print("\n📐 PyTorch AMP使用:")
    print("""
    from torch.cuda.amp import autocast, GradScaler
    
    # 初始化
    scaler = GradScaler()
    model = MyModel().cuda()
    optimizer = optim.Adam(model.parameters())
    
    # 训练循环
    for data, target in dataloader:
        data, target = data.cuda(), target.cuda()
        
        # 前向传播(自动混合精度)
        with autocast():
            output = model(data)
            loss = criterion(output, target)
        
        # 反向传播(缩放梯度)
        optimizer.zero_grad()
        scaler.scale(loss).backward()
        scaler.step(optimizer)
        scaler.update()
    """)
    
    print("\n💡 AMP优势:")
    print("   1. 节省显存(约50%)")
    print("   2. 加速训练(约2-3倍)")
    print("   3. 几乎不损失精度")
    print("   4. 易于集成")

amp_demo()

五、TensorBoard可视化

5.1 TensorBoard基础使用

python 复制代码
def tensorboard_demo():
    """TensorBoard可视化演示"""
    
    print("\n" + "=" * 60)
    print("TensorBoard可视化")
    print("=" * 60)
    
    # 模拟训练数据
    np.random.seed(42)
    epochs = 100
    
    # 模拟训练指标
    train_loss = 1.0 / (1 + np.arange(epochs) * 0.05) + np.random.randn(epochs) * 0.02
    val_loss = train_loss + np.random.randn(epochs) * 0.01
    train_acc = 0.5 + 0.4 * (1 - np.exp(-np.arange(epochs) / 30)) + np.random.randn(epochs) * 0.02
    val_acc = train_acc - 0.03 + np.random.randn(epochs) * 0.02
    
    # 可视化模拟的TensorBoard效果
    fig, axes = plt.subplots(2, 2, figsize=(12, 8))
    
    axes[0, 0].plot(train_loss, 'b-', label='训练损失', linewidth=2)
    axes[0, 0].plot(val_loss, 'r-', label='验证损失', linewidth=2)
    axes[0, 0].set_xlabel('Epoch')
    axes[0, 0].set_ylabel('Loss')
    axes[0, 0].set_title('损失曲线')
    axes[0, 0].legend()
    axes[0, 0].grid(True, alpha=0.3)
    
    axes[0, 1].plot(train_acc, 'b-', label='训练准确率', linewidth=2)
    axes[0, 1].plot(val_acc, 'r-', label='验证准确率', linewidth=2)
    axes[0, 1].set_xlabel('Epoch')
    axes[0, 1].set_ylabel('Accuracy')
    axes[0, 1].set_title('准确率曲线')
    axes[0, 1].legend()
    axes[0, 1].grid(True, alpha=0.3)
    
    # 模拟直方图(权重分布)
    axes[1, 0].hist(np.random.randn(1000), bins=50, alpha=0.7, color='blue')
    axes[1, 0].set_title('权重分布直方图')
    axes[1, 0].set_xlabel('权重值')
    axes[1, 0].set_ylabel('频数')
    axes[1, 0].grid(True, alpha=0.3)
    
    # 模拟学习率变化
    lr_schedule = 0.01 * (0.95 ** np.arange(epochs))
    axes[1, 1].plot(lr_schedule, 'g-', linewidth=2)
    axes[1, 1].set_xlabel('Epoch')
    axes[1, 1].set_ylabel('Learning Rate')
    axes[1, 1].set_title('学习率变化')
    axes[1, 1].grid(True, alpha=0.3)
    
    plt.suptitle('TensorBoard可视化效果模拟', fontsize=14)
    plt.tight_layout()
    plt.show()
    
    # PyTorch TensorBoard使用
    print("\n📐 PyTorch TensorBoard使用:")
    print("""
    from torch.utils.tensorboard import SummaryWriter
    
    # 创建Writer
    writer = SummaryWriter('runs/experiment_1')
    
    # 记录标量
    writer.add_scalar('Loss/train', train_loss, epoch)
    writer.add_scalar('Loss/val', val_loss, epoch)
    writer.add_scalar('Accuracy/train', train_acc, epoch)
    
    # 记录直方图
    writer.add_histogram('weights/fc1', model.fc1.weight, epoch)
    
    # 记录图像
    writer.add_image('images', img_grid, epoch)
    
    # 记录模型图
    writer.add_graph(model, input_tensor)
    
    # 关闭Writer
    writer.close()
    
    # 启动TensorBoard
    # tensorboard --logdir=runs
    """)
    
    print("\n💡 TensorBoard可记录内容:")
    print("   1. 标量(损失、准确率、学习率)")
    print("   2. 直方图(权重、梯度分布)")
    print("   3. 图像(输入、输出、特征图)")
    print("   4. 模型结构图")
    print("   5. 嵌入向量可视化")

tensorboard_demo()

5.2 Wandb替代方案

python 复制代码
def wandb_alternative():
    """Wandb替代方案介绍"""
    
    print("\n" + "=" * 60)
    print("Wandb (Weights & Biases)")
    print("=" * 60)
    
    print("\n📊 Wandb vs TensorBoard对比:")
    
    comparison = """
    ╔══════════════════╦══════════════════════════════════════════════════════════════╗
    ║     特性         ║                    说明                                      ║
    ╠══════════════════╬══════════════════════════════════════════════════════════════╣
    ║ 云端存储         ║ Wandb自动上传,可远程查看                                    ║
    ║ 本地存储         ║ TensorBoard需要本地运行                                      ║
    ║ 超参数搜索       ║ Wandb内置超参数搜索和对比                                    ║
    ║ 协作分享         ║ Wandb可生成分享链接                                          ║
    ║ 开源             ║ TensorBoard完全开源                                         ║
    ║ 易用性           ║ Wandb配置更简单                                             ║
    ║ 免费额度         ║ Wandb个人版免费                                              ║
    ╚══════════════════╩══════════════════════════════════════════════════════════════╝
    """
    
    print(comparison)
    
    print("\n📐 Wandb使用示例:")
    print("""
    import wandb
    
    # 初始化
    wandb.init(project="my-project", name="experiment-1")
    
    # 记录超参数
    wandb.config.update({
        "learning_rate": 0.001,
        "batch_size": 32,
        "epochs": 100
    })
    
    # 训练循环
    for epoch in range(epochs):
        train_loss = train_epoch()
        val_acc = validate()
        
        # 记录指标
        wandb.log({
            "train_loss": train_loss,
            "val_accuracy": val_acc,
            "epoch": epoch
        })
    
    # 记录模型
    wandb.save('model.pth')
    """)
    
    print("\n💡 选择建议:")
    print("   个人/小团队实验 → Wandb(便捷)")
    print("   内网/隐私要求 → TensorBoard(本地)")
    print("   团队协作 → Wandb(分享方便)")

wandb_alternative()

六、完整训练模板

6.1 集成所有技巧的训练器

python 复制代码
class Trainer:
    """集成所有训练技巧的训练器"""
    
    def __init__(self, model, train_loader, val_loader, 
                 learning_rate=0.001, device='cuda'):
        self.model = model.to(device)
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.device = device
        
        # 优化器
        self.optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        
        # 学习率调度器
        self.scheduler = ReduceLROnPlateau(self.optimizer, mode='min', 
                                           factor=0.5, patience=10)
        
        # 损失函数
        self.criterion = nn.CrossEntropyLoss()
        
        # 混合精度训练
        self.scaler = torch.cuda.amp.GradScaler() if device == 'cuda' else None
        
        # TensorBoard
        self.writer = SummaryWriter('runs/experiment')
        
        # 训练记录
        self.train_losses = []
        self.val_losses = []
        self.val_accs = []
    
    def train_epoch(self):
        """训练一个epoch"""
        self.model.train()
        total_loss = 0
        
        for batch_idx, (data, target) in enumerate(self.train_loader):
            data, target = data.to(self.device), target.to(self.device)
            
            self.optimizer.zero_grad()
            
            # 混合精度训练
            if self.scaler:
                with torch.cuda.amp.autocast():
                    output = self.model(data)
                    loss = self.criterion(output, target)
                
                self.scaler.scale(loss).backward()
                # 梯度裁剪
                self.scaler.unscale_(self.optimizer)
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
                self.scaler.step(self.optimizer)
                self.scaler.update()
            else:
                output = self.model(data)
                loss = self.criterion(output, target)
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
                self.optimizer.step()
            
            total_loss += loss.item()
            
            # 记录batch损失
            if batch_idx % 50 == 0:
                self.writer.add_scalar('Batch/TrainLoss', loss.item(), 
                                       len(self.train_loader) * epoch + batch_idx)
        
        avg_loss = total_loss / len(self.train_loader)
        return avg_loss
    
    def validate(self):
        """验证"""
        self.model.eval()
        correct = 0
        total = 0
        val_loss = 0
        
        with torch.no_grad():
            for data, target in self.val_loader:
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                loss = self.criterion(output, target)
                val_loss += loss.item()
                
                _, predicted = torch.max(output, 1)
                total += target.size(0)
                correct += (predicted == target).sum().item()
        
        avg_loss = val_loss / len(self.val_loader)
        accuracy = 100 * correct / total
        
        return avg_loss, accuracy
    
    def train(self, epochs):
        """完整训练流程"""
        print(f"使用设备: {self.device}")
        print(f"混合精度训练: {self.scaler is not None}")
        print("开始训练...\n")
        
        best_val_acc = 0
        
        for epoch in range(epochs):
            # 训练
            train_loss = self.train_epoch()
            self.train_losses.append(train_loss)
            
            # 验证
            val_loss, val_acc = self.validate()
            self.val_losses.append(val_loss)
            self.val_accs.append(val_acc)
            
            # 学习率调度(基于验证损失)
            self.scheduler.step(val_loss)
            current_lr = self.optimizer.param_groups[0]['lr']
            
            # TensorBoard记录
            self.writer.add_scalar('Epoch/TrainLoss', train_loss, epoch)
            self.writer.add_scalar('Epoch/ValLoss', val_loss, epoch)
            self.writer.add_scalar('Epoch/ValAcc', val_acc, epoch)
            self.writer.add_scalar('Epoch/LearningRate', current_lr, epoch)
            
            # 记录梯度直方图
            for name, param in self.model.named_parameters():
                if param.grad is not None:
                    self.writer.add_histogram(f'Gradients/{name}', param.grad, epoch)
                    self.writer.add_histogram(f'Weights/{name}', param.data, epoch)
            
            # 保存最佳模型
            if val_acc > best_val_acc:
                best_val_acc = val_acc
                torch.save(self.model.state_dict(), 'best_model.pth')
            
            # 打印进度
            if (epoch + 1) % 10 == 0:
                print(f'Epoch [{epoch+1}/{epochs}]')
                print(f'  Train Loss: {train_loss:.4f}')
                print(f'  Val Loss: {val_loss:.4f}')
                print(f'  Val Acc: {val_acc:.2f}%')
                print(f'  LR: {current_lr:.6f}')
        
        print(f"\n训练完成!最佳验证准确率: {best_val_acc:.2f}%")
        self.writer.close()
        
        return self.train_losses, self.val_losses, self.val_accs
    
    def visualize_results(self):
        """可视化训练结果"""
        fig, axes = plt.subplots(1, 3, figsize=(15, 4))
        
        # 损失曲线
        axes[0].plot(self.train_losses, 'b-', label='训练损失', linewidth=2)
        axes[0].plot(self.val_losses, 'r-', label='验证损失', linewidth=2)
        axes[0].set_xlabel('Epoch')
        axes[0].set_ylabel('Loss')
        axes[0].set_title('损失曲线')
        axes[0].legend()
        axes[0].grid(True, alpha=0.3)
        
        # 准确率曲线
        axes[1].plot(self.val_accs, 'g-', linewidth=2)
        axes[1].set_xlabel('Epoch')
        axes[1].set_ylabel('Accuracy (%)')
        axes[1].set_title('验证准确率曲线')
        axes[1].grid(True, alpha=0.3)
        
        # 学习率变化
        # 需要从scheduler中获取学习率历史
        axes[2].text(0.5, 0.5, '训练技巧总结:\n' +
                    '✓ 学习率调度 (ReduceLROnPlateau)\n' +
                    '✓ 梯度裁剪 (max_norm=1.0)\n' +
                    '✓ 混合精度训练 (AMP)\n' +
                    '✓ TensorBoard可视化',
                    ha='center', va='center', fontsize=10,
                    transform=axes[2].transAxes)
        axes[2].set_title('训练配置')
        axes[2].axis('off')
        
        plt.tight_layout()
        plt.show()

# 使用示例(需要实际数据)
print("\n" + "=" * 60)
print("训练器使用示例")
print("=" * 60)

print("""
# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False)

# 创建模型
model = MyModel()

# 创建训练器
trainer = Trainer(model, train_loader, val_loader, 
                  learning_rate=0.001, device='cuda')

# 训练
train_losses, val_losses, val_accs = trainer.train(epochs=100)

# 可视化
trainer.visualize_results()
""")

七、总结

技巧 作用 实现方式
学习率调度 动态调整步长 StepLR, ReduceLROnPlateau
梯度裁剪 防梯度爆炸 clip_grad_norm_
混合精度 节省显存 autocast, GradScaler
可视化 监控训练 TensorBoard, Wandb

训练检查清单:

  • 设置合适的学习率和调度器
  • 添加梯度裁剪防止爆炸
  • 使用混合精度加速训练
  • 用TensorBoard实时监控
  • 保存最佳模型检查点
  • 记录超参数配置
相关推荐
橘子编程2 小时前
PyTorch深度学习全栈指南
人工智能·pytorch·深度学习
慧一居士2 小时前
One API 高可用部署及负载配置完整使用步骤
人工智能
哈伦20192 小时前
第七章 回归案例(二)美国爱荷华州埃姆斯地区房价预测
人工智能·数据挖掘·回归
2501_933329552 小时前
品牌公关实战:Infoseek数字公关AI中台技术架构与舆情处置全流程解析
人工智能·自然语言处理·架构·数据库开发
这儿有一堆花2 小时前
终端AI编程助手CLI工具:Claude Code 的同类选择
人工智能·chatgpt·ai编程
byte轻骑兵2 小时前
【LE Audio】BASS精讲[1]: 核心缩写词拆解,从基础到实战的协议通用语言
人工智能·语音识别·蓝牙·le audio·低功耗音频
emfuture2 小时前
行业观察 | 实时工业控制垂类大模型研发获立项,将探索工业智能新路径
人工智能
数字供应链安全产品选型2 小时前
2026 AI智能体安全治理深度报告:从“决策黑盒”到“全链路可溯”,悬镜灵境AIDR的技术架构与实践路径
人工智能·安全·架构