第17章:错误处理与恢复

章节概述

本章全面介绍DRM Buddy分配器和AMDGPU VRAM管理中的错误处理机制。了解常见错误场景、检测方法、恢复策略以及调试技巧,对于开发稳定可靠的GPU驱动和应用程序至关重要。

难度级别 : 🔴 高级
预计阅读时间 : 50分钟
前置知识: Buddy算法、AMDGPU集成、内核调试基础


📋 本章学习目标

完成本章学习后,你将能够:

  • ✅ 识别常见的分配失败场景
  • ✅ 理解内存泄漏的检测和预防
  • ✅ 掌握状态一致性检查机制
  • ✅ 了解VRAM损坏的应对策略
  • ✅ 使用调试工具诊断问题

17.1 分配失败场景

17.1.1 ENOMEM vs ENOSPC

c 复制代码
/**
 * 两种主要的分配失败错误码
 */

// ENOMEM - 内存不足
error_enomem:
    errno = -ENOMEM;
    meaning = "系统内存不足,无法分配内核数据结构";
    
    typical_causes = {
        "slab分配器无法分配block结构",
        "vres结构分配失败",
        "临时列表分配失败",
    };
    
    example:
        block = kmem_cache_alloc(slab_blocks, GFP_KERNEL);
        if (!block)
            return -ENOMEM;  // 内核内存不足

// ENOSPC - 空间不足
error_enospc:
    errno = -ENOSPC;
    meaning = "VRAM空间不足,无法满足分配请求";
    
    typical_causes = {
        "VRAM已满",
        "找不到足够大的连续块",
        "碎片严重",
        "Visible VRAM耗尽",
    };
    
    example:
        if (mm->avail < size)
            return -ENOSPC;  // VRAM不足

/**
 * 错误码的使用
 */
int drm_buddy_alloc_blocks(struct drm_buddy *mm,
                          u64 start, u64 end, u64 size,
                          u64 min_page_size,
                          struct list_head *blocks,
                          unsigned long flags)
{
    // 检查基本条件
    if (size < mm->chunk_size)
        return -EINVAL;  // 参数错误
    
    if (size > mm->avail)
        return -ENOSPC;  // VRAM不足
    
    // 分配内部数据结构
    block = drm_block_alloc(mm, ...);
    if (!block)
        return -ENOMEM;  // 系统内存不足
    
    // 搜索合适的块
    block = __alloc_range_bias(mm, ...);
    if (!block)
        return -ENOSPC;  // 找不到合适的块
    
    return 0;
}

17.1.2 常见失败场景

c 复制代码
/**
 * 场景1: VRAM完全耗尽
 */
scenario_vram_full:
    vram_total = 8GB;
    vram_used = 7.9GB;
    vram_free = 100MB;
    
    request = allocate(256MB);
    result = -ENOSPC;
    
    // 解决方案
    solutions = {
        "1. Evict LRU对象到GTT",
        "2. 释放不必要的缓存",
        "3. 降低质量/分辨率",
        "4. 返回错误给应用层",
    };

/**
 * 场景2: 碎片导致的分配失败
 */
scenario_fragmentation:
    vram_total = 8GB;
    vram_used = 4GB;
    vram_free = 4GB;  // 但都是小块
    largest_block = 128MB;
    
    request = allocate_contiguous(512MB);
    result = -ENOSPC;  // 虽然总空闲4GB
    
    // 错误信息
    error_message = "Cannot allocate 512MB contiguous, "
                   "largest free block is 128MB";

/**
 * 场景3: Visible VRAM耗尽
 */
scenario_visible_exhausted:
    visible_vram = 256MB;
    visible_used = 256MB;
    invisible_free = 7.75GB;  // 大量空闲
    
    request = allocate_with_cpu_access(64MB);
    result = -ENOSPC;
    
    // 特殊情况:总VRAM充足但Visible不足
    error_message = "Visible VRAM exhausted (256MB/256MB), "
                   "cannot allocate CPU-accessible buffer";

/**
 * 场景4: 范围分配失败
 */
scenario_range_allocation:
    request = allocate_range(0, 256MB, 128MB);
    // 请求在[0, 256MB]范围内分配128MB
    
    // 该范围内可能没有足够的连续空间
    if (no_space_in_range)
        result = -ENOSPC;
    
    // 即使其他地方有空间也不行
    other_free_space = 7GB;  // 无济于事

/**
 * 场景5: 对齐要求无法满足
 */
scenario_alignment:
    request:
        size = 65MB;
        alignment = 128MB;
    
    // 需要找到128MB对齐且大小至少128MB的块
    required_block_size = 128MB;  // 向上取整到对齐
    
    if (no_aligned_block)
        result = -ENOSPC;

17.1.3 错误传播

c 复制代码
/**
 * 从Buddy到应用层的错误传播链
 */

// 层级1: Buddy分配器
int drm_buddy_alloc_blocks(...)
{
    if (size > mm->avail)
        return -ENOSPC;
    // ...
}

// 层级2: VRAM Manager
static int amdgpu_vram_mgr_new(...)
{
    r = drm_buddy_alloc_blocks(&mgr->mm, ...);
    if (r) {
        // 记录错误
        pr_debug("VRAM allocation failed: %d\n", r);
        
        // 尝试回退策略
        if (r == -ENOSPC && can_fallback) {
            // 尝试非连续分配
            flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION;
            r = drm_buddy_alloc_blocks(&mgr->mm, ...);
        }
        
        if (r)
            goto error_fini;
    }
    return 0;

error_fini:
    // 清理
    ttm_resource_fini(man, &vres->base);
    kfree(vres);
    return r;
}

// 层级3: TTM
int ttm_bo_validate(...)
{
    r = man->func->alloc(man, bo, place, &bo->resource);
    if (r) {
        // TTM处理错误
        if (r == -ENOSPC) {
            // 尝试eviction
            r = ttm_bo_evict(bo);
            if (!r) {
                // 重试分配
                r = man->func->alloc(man, bo, place, &bo->resource);
            }
        }
        return r;
    }
}

// 层级4: GEM
int amdgpu_gem_create_ioctl(...)
{
    r = amdgpu_bo_create(adev, &bp, &gobj);
    if (r) {
        // 转换为用户态错误
        switch (r) {
        case -ENOSPC:
            DRM_ERROR("Out of VRAM: cannot create %lluMB buffer\n",
                     size >> 20);
            break;
        case -ENOMEM:
            DRM_ERROR("Out of system memory\n");
            break;
        default:
            DRM_ERROR("BO creation failed: %d\n", r);
        }
        return r;
    }
}

// 层级5: 用户空间
userspace_application:
    ret = ioctl(fd, DRM_IOCTL_AMDGPU_GEM_CREATE, &args);
    if (ret < 0) {
        switch (errno) {
        case ENOSPC:
            fprintf(stderr, "Out of GPU memory\n");
            // 降低质量
            reduce_texture_quality();
            retry();
            break;
        case ENOMEM:
            fprintf(stderr, "Out of system memory\n");
            exit(1);
            break;
        }
    }

17.2 内存泄漏检测

17.2.1 块泄漏

c 复制代码
/**
 * 块泄漏的表现
 */
block_leak_symptoms:
    // 现象1: 可用空间持续减少
    time_0:  avail = 8GB
    time_1h: avail = 7.5GB
    time_2h: avail = 7GB
    time_3h: avail = 6.5GB
    // 但应用声称已释放所有对象
    
    // 现象2: 空闲列表不正确
    expected_free = 2GB;
    actual_free = mm->avail = 1.5GB;
    missing = 512MB;  // 泄漏的块

/**
 * 块泄漏检测
 */
struct leak_detector {
    u64 total_allocated;
    u64 total_freed;
    u64 net_allocated;
    
    struct list_head allocated_blocks;
    spinlock_t lock;
};

// 追踪分配
void leak_detector_track_alloc(struct leak_detector *ld,
                               struct drm_buddy_block *block)
{
    unsigned long flags;
    
    spin_lock_irqsave(&ld->lock, flags);
    
    list_add(&block->leak_link, &ld->allocated_blocks);
    ld->total_allocated++;
    ld->net_allocated++;
    
    spin_unlock_irqrestore(&ld->lock, flags);
}

// 追踪释放
void leak_detector_track_free(struct leak_detector *ld,
                              struct drm_buddy_block *block)
{
    unsigned long flags;
    
    spin_lock_irqsave(&ld->lock, flags);
    
    list_del(&block->leak_link);
    ld->total_freed++;
    ld->net_allocated--;
    
    spin_unlock_irqrestore(&ld->lock, flags);
}

// 检查泄漏
void leak_detector_check(struct leak_detector *ld)
{
    struct drm_buddy_block *block;
    
    if (ld->net_allocated == 0) {
        pr_info("No leaks detected\n");
        return;
    }
    
    pr_err("Memory leak detected: %llu blocks leaked\n",
           ld->net_allocated);
    
    // 列出泄漏的块
    list_for_each_entry(block, &ld->allocated_blocks, leak_link) {
        pr_err("  Leaked block: offset=0x%llx, size=%llu, "
               "allocated_at=%ps\n",
               drm_buddy_block_offset(block),
               drm_buddy_block_size(mm, block),
               block->alloc_caller);
    }
}

/**
 * 启用泄漏检测
 */
// 在分配时记录调用栈
void drm_buddy_alloc_blocks_traced(...)
{
    // 正常分配
    r = drm_buddy_alloc_blocks(...);
    if (r)
        return r;
    
    // 记录调用者
    list_for_each_entry(block, blocks, link) {
        block->alloc_caller = __builtin_return_address(0);
        block->alloc_time = ktime_get();
        leak_detector_track_alloc(&mm->leak_detector, block);
    }
    
    return 0;
}

17.2.2 BO泄漏

c 复制代码
/**
 * Buffer Object泄漏检测
 */

// BO引用计数追踪
struct amdgpu_bo_tracker {
    int total_bos;
    int leaked_bos;
    struct list_head bo_list;
    struct mutex lock;
};

// 创建BO时注册
void amdgpu_bo_register(struct amdgpu_bo *bo)
{
    struct amdgpu_bo_tracker *tracker = &bo->adev->bo_tracker;
    
    mutex_lock(&tracker->lock);
    list_add(&bo->track_link, &tracker->bo_list);
    tracker->total_bos++;
    
    // 记录创建信息
    bo->creator_pid = current->pid;
    bo->creator_comm = current->comm;
    bo->create_time = ktime_get();
    bo->create_caller = __builtin_return_address(0);
    
    mutex_unlock(&tracker->lock);
}

// 销毁BO时注销
void amdgpu_bo_unregister(struct amdgpu_bo *bo)
{
    struct amdgpu_bo_tracker *tracker = &bo->adev->bo_tracker;
    
    mutex_lock(&tracker->lock);
    list_del(&bo->track_link);
    tracker->total_bos--;
    mutex_unlock(&tracker->lock);
}

// 检查泄漏
void amdgpu_bo_check_leaks(struct amdgpu_device *adev)
{
    struct amdgpu_bo_tracker *tracker = &adev->bo_tracker;
    struct amdgpu_bo *bo;
    ktime_t now = ktime_get();
    
    mutex_lock(&tracker->lock);
    
    if (tracker->total_bos == 0) {
        pr_info("No BO leaks\n");
        goto unlock;
    }
    
    pr_err("BO leak detected: %d BOs not freed\n", tracker->total_bos);
    
    list_for_each_entry(bo, &tracker->bo_list, track_link) {
        u64 age_ms = ktime_ms_delta(now, bo->create_time);
        
        pr_err("  Leaked BO: size=%lluMB, age=%llums, "
               "pid=%d (%s), caller=%ps\n",
               amdgpu_bo_size(bo) >> 20,
               age_ms,
               bo->creator_pid,
               bo->creator_comm,
               bo->create_caller);
    }
    
    tracker->leaked_bos = tracker->total_bos;

unlock:
    mutex_unlock(&tracker->lock);
}

/**
 * Debugfs接口查看BO
 */
static int amdgpu_debugfs_bo_list_show(struct seq_file *m, void *unused)
{
    struct amdgpu_device *adev = m->private;
    struct amdgpu_bo_tracker *tracker = &adev->bo_tracker;
    struct amdgpu_bo *bo;
    ktime_t now = ktime_get();
    
    seq_printf(m, "Total BOs: %d\n\n", tracker->total_bos);
    
    mutex_lock(&tracker->lock);
    
    list_for_each_entry(bo, &tracker->bo_list, track_link) {
        u64 age_ms = ktime_ms_delta(now, bo->create_time);
        
        seq_printf(m, "BO: size=%lluMB, domain=0x%x, "
                     "age=%llums, pid=%d (%s)\n",
                   amdgpu_bo_size(bo) >> 20,
                   bo->preferred_domains,
                   age_ms,
                   bo->creator_pid,
                   bo->creator_comm);
    }
    
    mutex_unlock(&tracker->lock);
    return 0;
}

// 使用
$ cat /sys/kernel/debug/dri/0/amdgpu_bo_list
Total BOs: 3

BO: size=512MB, domain=0x4, age=1234ms, pid=5678 (glxgears)
BO: size=256MB, domain=0x4, age=5678ms, pid=5678 (glxgears)
BO: size=1024MB, domain=0x4, age=10000ms, pid=1234 (python3)

17.2.3 引用计数错误

c 复制代码
/**
 * 引用计数不匹配导致泄漏或UAF
 */

// 正确的引用计数使用
correct_refcount_usage:
    // 创建时refcount = 1
    bo = amdgpu_bo_create(...);
    // bo->tbo.base.refcount.refs = 1
    
    // 增加引用
    drm_gem_object_get(&bo->tbo.base);
    // refcount = 2
    
    // 使用BO
    use_bo(bo);
    
    // 释放引用
    drm_gem_object_put(&bo->tbo.base);
    // refcount = 1
    
    // 最终释放
    drm_gem_object_put(&bo->tbo.base);
    // refcount = 0 → 释放BO

// 错误1: 忘记put
bug_missing_put:
    bo = amdgpu_bo_create(...);
    drm_gem_object_get(&bo->tbo.base);
    use_bo(bo);
    // 忘记 drm_gem_object_put()
    // → 泄漏!refcount永远不会到0

// 错误2: 多余的put
bug_extra_put:
    bo = amdgpu_bo_create(...);
    use_bo(bo);
    drm_gem_object_put(&bo->tbo.base);
    drm_gem_object_put(&bo->tbo.base);  // 多余!
    // → refcount变负 → UAF或crash

/**
 * 引用计数调试
 */
void debug_refcount_get(struct drm_gem_object *obj, const char *caller)
{
    int old_count = kref_read(&obj->refcount);
    drm_gem_object_get(obj);
    int new_count = kref_read(&obj->refcount);
    
    pr_debug("refcount_get: %s: %d → %d\n",
            caller, old_count, new_count);
}

void debug_refcount_put(struct drm_gem_object *obj, const char *caller)
{
    int old_count = kref_read(&obj->refcount);
    drm_gem_object_put(obj);
    
    pr_debug("refcount_put: %s: %d → %d\n",
            caller, old_count, old_count - 1);
}

// 使用宏简化
#define amdgpu_bo_get_debug(bo) \
    debug_refcount_get(&(bo)->tbo.base, __func__)

#define amdgpu_bo_put_debug(bo) \
    debug_refcount_put(&(bo)->tbo.base, __func__)

17.3 状态一致性检查

17.3.1 Buddy内部一致性

c 复制代码
/**
 * 验证Buddy分配器的内部状态
 */
int drm_buddy_verify_integrity(struct drm_buddy *mm)
{
    u64 computed_avail = 0;
    u64 computed_clear_avail = 0;
    int errors = 0;
    int order;
    
    // 1. 遍历所有空闲块
    for (order = 0; order <= mm->max_order; order++) {
        struct drm_buddy_block *block;
        
        list_for_each_entry(block, &mm->free_list[order], link) {
            u64 block_size;
            
            // 检查块状态
            if (!drm_buddy_block_is_free(block)) {
                pr_err("Block in free_list[%d] not marked free\n",
                      order);
                errors++;
            }
            
            // 检查order
            if (drm_buddy_block_order(block) != order) {
                pr_err("Block in free_list[%d] has order %d\n",
                      order, drm_buddy_block_order(block));
                errors++;
            }
            
            // 累加空闲空间
            block_size = drm_buddy_block_size(mm, block);
            computed_avail += block_size;
            
            if (drm_buddy_block_is_clear(block))
                computed_clear_avail += block_size;
        }
    }
    
    // 2. 验证avail统计
    if (computed_avail != mm->avail) {
        pr_err("avail mismatch: computed=%llu, mm->avail=%llu\n",
              computed_avail, mm->avail);
        errors++;
    }
    
    // 3. 验证clear_avail统计
    if (computed_clear_avail != mm->clear_avail) {
        pr_err("clear_avail mismatch: computed=%llu, "
               "mm->clear_avail=%llu\n",
               computed_clear_avail, mm->clear_avail);
        errors++;
    }
    
    // 4. 检查总空间
    u64 total_allocated = mm->size - mm->avail;
    if (total_allocated > mm->size) {
        pr_err("Impossible state: allocated > size\n");
        errors++;
    }
    
    if (errors) {
        pr_err("Buddy integrity check FAILED: %d errors\n", errors);
        return -EINVAL;
    }
    
    pr_info("Buddy integrity check PASSED\n");
    return 0;
}

/**
 * 验证伙伴关系
 */
int verify_buddy_relationship(struct drm_buddy *mm,
                             struct drm_buddy_block *block)
{
    struct drm_buddy_block *buddy;
    u64 block_offset, buddy_offset;
    u64 block_size;
    
    buddy = __get_buddy(block);
    if (!buddy)
        return 0;  // 根块没有伙伴
    
    block_offset = drm_buddy_block_offset(block);
    buddy_offset = drm_buddy_block_offset(buddy);
    block_size = drm_buddy_block_size(mm, block);
    
    // 伙伴必须相邻
    if (buddy_offset != block_offset + block_size &&
        block_offset != buddy_offset + block_size) {
        pr_err("Buddy blocks not adjacent: "
               "block=0x%llx, buddy=0x%llx, size=0x%llx\n",
               block_offset, buddy_offset, block_size);
        return -EINVAL;
    }
    
    // 伙伴必须同样大小
    if (drm_buddy_block_size(mm, buddy) != block_size) {
        pr_err("Buddy blocks different size\n");
        return -EINVAL;
    }
    
    return 0;
}

/**
 * 运行时断言
 */
#define BUDDY_ASSERT(cond, fmt, ...) \
    do { \
        if (unlikely(!(cond))) { \
            pr_err("Buddy assertion failed: " fmt "\n", ##__VA_ARGS__); \
            dump_stack(); \
            if (panic_on_buddy_error) \
                panic("Buddy corruption detected"); \
        } \
    } while (0)

// 使用
void drm_buddy_free_block(struct drm_buddy *mm,
                         struct drm_buddy_block *block)
{
    BUDDY_ASSERT(drm_buddy_block_is_allocated(block),
                "Freeing non-allocated block");
    
    BUDDY_ASSERT(mm->avail + drm_buddy_block_size(mm, block) <= mm->size,
                "Free would exceed total size");
    
    // 执行释放
    // ...
}

17.3.2 WARN_ON和BUG_ON使用

c 复制代码
/**
 * 内核断言的使用
 */

// BUG_ON - 严重错误,触发kernel panic
void drm_buddy_free_block(struct drm_buddy *mm,
                         struct drm_buddy_block *block)
{
    // 释放已释放的块是严重错误
    BUG_ON(!drm_buddy_block_is_allocated(block));
    
    // 块必须有order
    BUG_ON(!drm_buddy_block_order(block));
    
    // 执行释放
    mm->avail += drm_buddy_block_size(mm, block);
    __drm_buddy_free(mm, block, false);
}

// WARN_ON - 警告,但继续执行
static int __drm_buddy_free(struct drm_buddy *mm,
                           struct drm_buddy_block *block,
                           bool force_merge)
{
    struct drm_buddy_block *buddy;
    
    // 检查clear状态不一致(非致命)
    buddy = __get_buddy(block);
    if (buddy && !force_merge) {
        WARN_ON(drm_buddy_block_is_clear(block) ==
                drm_buddy_block_is_clear(buddy));
        // 继续执行,但记录警告
    }
    
    // 合并
    // ...
}

/**
 * 何时使用BUG_ON vs WARN_ON
 */
guideline:
    BUG_ON:
        "数据结构损坏(无法继续)",
        "内存越界(安全问题)",
        "不可能的状态(逻辑错误)",
        "继续执行会导致更严重问题",
    
    WARN_ON:
        "性能问题(非致命)",
        "不推荐的使用方式",
        "状态不一致(可恢复)",
        "调试信息",

/**
 * 在生产环境中的处理
 */
// 开发版本:激进的检查
#ifdef CONFIG_DEBUG_KERNEL
    #define BUDDY_CHECK(cond) BUG_ON(!(cond))
#else
    // 生产版本:温和的处理
    #define BUDDY_CHECK(cond) \
        do { \
            if (unlikely(!(cond))) { \
                WARN_ONCE(1, "Buddy check failed: " #cond); \
                return -EINVAL; \
            } \
        } while (0)
#endif

17.3.3 运行时验证

c 复制代码
/**
 * 周期性健康检查
 */
struct buddy_health_monitor {
    struct delayed_work work;
    struct drm_buddy *mm;
    int check_interval_ms;
    
    // 统计
    u64 total_checks;
    u64 errors_found;
};

static void buddy_health_check_work(struct work_struct *work)
{
    struct buddy_health_monitor *mon = 
        container_of(work, struct buddy_health_monitor, work.work);
    int r;
    
    mon->total_checks++;
    
    // 执行完整性检查
    r = drm_buddy_verify_integrity(mon->mm);
    if (r) {
        mon->errors_found++;
        pr_err("Health check #%llu FAILED\n", mon->total_checks);
        
        // 尝试修复或触发恢复
        attempt_recovery(mon->mm);
    }
    
    // 重新调度
    schedule_delayed_work(&mon->work,
                         msecs_to_jiffies(mon->check_interval_ms));
}

// 启动监控
void start_buddy_health_monitor(struct drm_buddy *mm)
{
    struct buddy_health_monitor *mon;
    
    mon = kzalloc(sizeof(*mon), GFP_KERNEL);
    mon->mm = mm;
    mon->check_interval_ms = 60000;  // 每分钟
    
    INIT_DELAYED_WORK(&mon->work, buddy_health_check_work);
    schedule_delayed_work(&mon->work,
                         msecs_to_jiffies(mon->check_interval_ms));
}

17.4 VRAM损坏应对

17.4.1 VRAM错误检测

c 复制代码
/**
 * ECC错误处理
 */
struct vram_ecc_handler {
    // ECC统计
    atomic64_t correctable_errors;
    atomic64_t uncorrectable_errors;
    
    // 损坏的页面列表
    struct list_head bad_pages;
    spinlock_t lock;
};

// ECC错误中断处理
irqreturn_t amdgpu_vram_ecc_irq_handler(int irq, void *data)
{
    struct amdgpu_device *adev = data;
    u64 error_addr;
    bool correctable;
    
    // 读取错误信息
    error_addr = read_ecc_error_address(adev);
    correctable = is_correctable_error(adev);
    
    if (correctable) {
        atomic64_inc(&adev->ecc.correctable_errors);
        pr_info("ECC correctable error at 0x%llx\n", error_addr);
    } else {
        atomic64_inc(&adev->ecc.uncorrectable_errors);
        pr_err("ECC uncorrectable error at 0x%llx\n", error_addr);
        
        // 标记页面为损坏
        mark_vram_page_bad(adev, error_addr);
    }
    
    return IRQ_HANDLED;
}

/**
 * 损坏页面管理
 */
struct vram_bad_page {
    u64 offset;              // 损坏的偏移
    u64 size;                // 页面大小
    ktime_t detected_time;   // 检测时间
    struct list_head link;
};

void mark_vram_page_bad(struct amdgpu_device *adev, u64 offset)
{
    struct vram_bad_page *bad_page;
    unsigned long flags;
    
    bad_page = kzalloc(sizeof(*bad_page), GFP_ATOMIC);
    bad_page->offset = offset & PAGE_MASK;
    bad_page->size = PAGE_SIZE;
    bad_page->detected_time = ktime_get();
    
    spin_lock_irqsave(&adev->ecc.lock, flags);
    list_add(&bad_page->link, &adev->ecc.bad_pages);
    spin_unlock_irqrestore(&adev->ecc.lock, flags);
    
    pr_err("Marked VRAM page 0x%llx as bad\n", offset);
    
    // 预留该区域,防止分配
    reserve_vram_range(adev, offset, PAGE_SIZE);
}

/**
 * 分配时避开损坏区域
 */
bool is_vram_range_bad(struct amdgpu_device *adev, u64 offset, u64 size)
{
    struct vram_bad_page *bad_page;
    u64 end = offset + size;
    
    list_for_each_entry(bad_page, &adev->ecc.bad_pages, link) {
        u64 bad_start = bad_page->offset;
        u64 bad_end = bad_start + bad_page->size;
        
        // 检查是否重叠
        if (offset < bad_end && end > bad_start)
            return true;
    }
    
    return false;
}

17.4.2 数据恢复

c 复制代码
/**
 * VRAM内容备份
 */
struct vram_backup {
    void *shadow_memory;     // GTT中的备份
    u64 vram_offset;
    u64 size;
    bool valid;
};

// 创建备份
int create_vram_backup(struct amdgpu_device *adev,
                      struct drm_buddy_block *block)
{
    struct vram_backup *backup;
    u64 offset = drm_buddy_block_offset(block);
    u64 size = drm_buddy_block_size(&adev->mman.vram_mgr.mm, block);
    
    backup = kzalloc(sizeof(*backup), GFP_KERNEL);
    
    // 在GTT中分配shadow
    backup->shadow_memory = vmalloc(size);
    if (!backup->shadow_memory)
        return -ENOMEM;
    
    // 复制VRAM内容到GTT
    amdgpu_sdma_copy_vram_to_gtt(adev, offset, 
                                 backup->shadow_memory, size);
    
    backup->vram_offset = offset;
    backup->size = size;
    backup->valid = true;
    
    return 0;
}

// 恢复数据
int restore_from_backup(struct amdgpu_device *adev,
                       struct vram_backup *backup)
{
    if (!backup->valid)
        return -EINVAL;
    
    // 复制GTT内容回VRAM
    amdgpu_sdma_copy_gtt_to_vram(adev, backup->shadow_memory,
                                backup->vram_offset, backup->size);
    
    pr_info("Restored %lluMB VRAM from backup\n", backup->size >> 20);
    return 0;
}

/**
 * 关键数据的冗余存储
 */
critical_data_protection:
    // 对于重要的BO(如Framebuffer),保持GTT副本
    if (bo->flags & AMDGPU_GEM_CREATE_SHADOW) {
        // 分配shadow BO in GTT
        shadow_bo = allocate_shadow_in_gtt(bo->size);
        
        // 同步更新
        on_bo_write:
            update_vram(bo);
            update_gtt(shadow_bo);
        
        // 检测到VRAM损坏时
        on_vram_error:
            restore_from_shadow(bo, shadow_bo);
    }

17.4.3 灾难恢复

c 复制代码
/**
 * GPU重置流程
 */
int amdgpu_gpu_reset_vram_recovery(struct amdgpu_device *adev)
{
    struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
    int r;
    
    pr_info("Starting VRAM recovery after GPU reset\n");
    
    // 步骤1: Evict所有VRAM对象到GTT
    r = ttm_resource_manager_evict_all(&adev->mman.bdev,
                                      &mgr->manager);
    if (r) {
        pr_err("Failed to evict VRAM: %d\n", r);
        return r;
    }
    
    // 步骤2: 清理Buddy分配器
    drm_buddy_fini(&mgr->mm);
    
    // 步骤3: 重新初始化
    r = drm_buddy_init(&mgr->mm, 
                      adev->gmc.real_vram_size,
                      PAGE_SIZE);
    if (r) {
        pr_err("Failed to reinit Buddy: %d\n", r);
        return r;
    }
    
    // 步骤4: 重新测试VRAM(可选)
    r = test_vram_integrity(adev);
    if (r) {
        pr_err("VRAM integrity test failed: %d\n", r);
        // 可能需要RMA
    }
    
    // 步骤5: 允许对象重新迁移回VRAM
    ttm_resource_manager_set_used(&mgr->manager, true);
    
    pr_info("VRAM recovery complete\n");
    return 0;
}

/**
 * 测试VRAM完整性
 */
int test_vram_integrity(struct amdgpu_device *adev)
{
    u64 vram_size = adev->gmc.real_vram_size;
    u64 test_size = min(vram_size, 256ULL << 20);  // 测试256MB
    void *test_pattern;
    void *readback;
    int errors = 0;
    
    test_pattern = vmalloc(test_size);
    readback = vmalloc(test_size);
    
    // 生成测试模式
    for (int i = 0; i < test_size / 4; i++)
        ((u32 *)test_pattern)[i] = 0xDEADBEEF ^ i;
    
    // 写入VRAM
    amdgpu_sdma_copy_gtt_to_vram(adev, test_pattern, 0, test_size);
    
    // 读回
    amdgpu_sdma_copy_vram_to_gtt(adev, 0, readback, test_size);
    
    // 比较
    for (int i = 0; i < test_size / 4; i++) {
        if (((u32 *)readback)[i] != ((u32 *)test_pattern)[i]) {
            pr_err("VRAM error at offset 0x%x: "
                   "expected 0x%x, got 0x%x\n",
                   i * 4,
                   ((u32 *)test_pattern)[i],
                   ((u32 *)readback)[i]);
            errors++;
            
            if (errors > 100) {
                pr_err("Too many errors, stopping test\n");
                break;
            }
        }
    }
    
    vfree(test_pattern);
    vfree(readback);
    
    if (errors)
        return -EIO;
    
    return 0;
}

17.5 调试技巧

17.5.1 调试选项

c 复制代码
/**
 * 编译时调试选项
 */

// Kconfig
config DRM_BUDDY_DEBUG
    bool "Enable DRM Buddy allocator debugging"
    help
      Enable extensive debugging for the DRM Buddy allocator.
      This includes runtime checks, leak detection, and verbose
      logging. Only enable for development.

config DRM_BUDDY_DEBUG_VERBOSE
    bool "Verbose DRM Buddy debugging"
    depends on DRM_BUDDY_DEBUG
    help
      Enable very verbose logging for every allocation and free.
      Warning: This will generate large amounts of kernel log output.

// 使用
#ifdef CONFIG_DRM_BUDDY_DEBUG
    #define buddy_debug(fmt, ...) \
        pr_debug("drm_buddy: " fmt, ##__VA_ARGS__)
    
    static inline void buddy_verify_on_free(struct drm_buddy *mm,
                                           struct drm_buddy_block *block)
    {
        // 检查块状态
        BUG_ON(!drm_buddy_block_is_allocated(block));
        
        // 验证整体一致性
        drm_buddy_verify_integrity(mm);
    }
#else
    #define buddy_debug(fmt, ...) do { } while (0)
    #define buddy_verify_on_free(mm, block) do { } while (0)
#endif

/**
 * 运行时调试选项
 */
// 模块参数
static bool buddy_debug_enabled = false;
module_param_named(buddy_debug, buddy_debug_enabled, bool, 0644);
MODULE_PARM_DESC(buddy_debug, "Enable Buddy allocator debugging");

static int buddy_debug_level = 0;
module_param_named(buddy_debug_level, buddy_debug_level, int, 0644);
MODULE_PARM_DESC(buddy_debug_level, 
                "Debug level: 0=off, 1=errors, 2=warnings, 3=info, 4=verbose");

// 使用
void drm_buddy_alloc_blocks_debug(...)
{
    if (buddy_debug_level >= 3) {
        pr_info("alloc: size=%llu, flags=0x%lx\n", size, flags);
    }
    
    // 正常分配
    r = drm_buddy_alloc_blocks(...);
    
    if (r && buddy_debug_level >= 1) {
        pr_err("alloc failed: %d, avail=%llu\n", r, mm->avail);
        if (buddy_debug_level >= 4) {
            drm_buddy_print(mm, &p);
        }
    }
}

17.5.2 Ftrace和Trace Events

c 复制代码
/**
 * Trace events for Buddy allocator
 */

// 定义trace events
TRACE_EVENT(drm_buddy_alloc,
    TP_PROTO(struct drm_buddy *mm, u64 size, unsigned long flags, int ret),
    TP_ARGS(mm, size, flags, ret),
    
    TP_STRUCT__entry(
        __field(u64, size)
        __field(unsigned long, flags)
        __field(u64, avail_before)
        __field(u64, avail_after)
        __field(int, ret)
    ),
    
    TP_fast_assign(
        __entry->size = size;
        __entry->flags = flags;
        __entry->avail_before = mm->avail + (ret ? 0 : size);
        __entry->avail_after = mm->avail;
        __entry->ret = ret;
    ),
    
    TP_printk("size=%llu flags=0x%lx avail=%llu→%llu ret=%d",
              __entry->size, __entry->flags,
              __entry->avail_before, __entry->avail_after,
              __entry->ret)
);

TRACE_EVENT(drm_buddy_free,
    TP_PROTO(struct drm_buddy *mm, struct drm_buddy_block *block),
    TP_ARGS(mm, block),
    
    TP_STRUCT__entry(
        __field(u64, offset)
        __field(u64, size)
        __field(u64, avail_before)
        __field(u64, avail_after)
    ),
    
    TP_fast_assign(
        __entry->offset = drm_buddy_block_offset(block);
        __entry->size = drm_buddy_block_size(mm, block);
        __entry->avail_before = mm->avail;
        __entry->avail_after = mm->avail + __entry->size;
    ),
    
    TP_printk("offset=0x%llx size=%llu avail=%llu→%llu",
              __entry->offset, __entry->size,
              __entry->avail_before, __entry->avail_after)
);

// 使用trace events
int drm_buddy_alloc_blocks(...)
{
    int r;
    
    r = __drm_buddy_alloc_blocks(...);
    
    trace_drm_buddy_alloc(mm, size, flags, r);
    
    return r;
}

/**
 * 启用trace
 */
// 启用所有drm_buddy events
$ echo 1 > /sys/kernel/debug/tracing/events/drm_buddy/enable

// 查看trace
$ cat /sys/kernel/debug/tracing/trace

# tracer: nop
#
#           TASK-PID   CPU#    TIMESTAMP  FUNCTION
#              | |       |          |         |
   glxgears-5678  [001]  1234.567890: drm_buddy_alloc: size=4194304 flags=0x4 avail=8589934592→8585740288 ret=0
   glxgears-5678  [001]  1234.567901: drm_buddy_alloc: size=2097152 flags=0x4 avail=8585740288→8583643136 ret=0
   glxgears-5678  [001]  1234.890123: drm_buddy_free: offset=0x100000000 size=4194304 avail=8583643136→8587837440

17.5.3 常用调试命令

bash 复制代码
#!/bin/bash
# VRAM调试工具集

# 1. 查看当前VRAM状态
vram_status() {
    echo "=== VRAM Status ==="
    cat /sys/class/drm/card0/device/mem_info_vram_total
    cat /sys/class/drm/card0/device/mem_info_vram_used
    cat /sys/kernel/debug/dri/0/amdgpu_vram_mm | head -20
}

# 2. 检查泄漏
check_leaks() {
    echo "=== Checking for leaks ==="
    
    # 记录初始状态
    initial=$(cat /sys/class/drm/card0/device/mem_info_vram_used)
    
    # 运行测试
    echo "Run your test now, press Enter when done"
    read
    
    # 记录最终状态
    final=$(cat /sys/class/drm/card0/device/mem_info_vram_used)
    
    # 比较
    diff=$((final - initial))
    if [ $diff -gt 0 ]; then
        echo "LEAK DETECTED: $diff bytes not freed"
    else
        echo "No leaks detected"
    fi
}

# 3. 监控分配失败
monitor_failures() {
    echo "=== Monitoring allocation failures ==="
    dmesg -w | grep -i "vram.*fail\|out of.*vram\|enospc"
}

# 4. 转储Buddy状态
dump_buddy() {
    echo "=== Buddy Allocator State ==="
    cat /sys/kernel/debug/dri/0/amdgpu_vram_mm
}

# 5. 查看BO列表
list_bos() {
    echo "=== Buffer Objects ==="
    cat /sys/kernel/debug/dri/0/amdgpu_gem_info | head -50
}

# 6. 压力测试
stress_test() {
    echo "=== Running VRAM stress test ==="
    
    # 分配大量VRAM
    for i in {1..100}; do
        # 使用OpenCL或CUDA分配
        clBuildProgram ...
    done
    
    # 检查状态
    vram_status
}

# 主菜单
while true; do
    echo
    echo "VRAM Debug Menu:"
    echo "1. Check VRAM status"
    echo "2. Check for leaks"
    echo "3. Monitor failures"
    echo "4. Dump Buddy state"
    echo "5. List BOs"
    echo "6. Stress test"
    echo "0. Exit"
    echo
    read -p "Choice: " choice
    
    case $choice in
        1) vram_status ;;
        2) check_leaks ;;
        3) monitor_failures ;;
        4) dump_buddy ;;
        5) list_bos ;;
        6) stress_test ;;
        0) exit 0 ;;
    esac
done

🎯 最佳实践

✅ 应该做的

  1. 检查返回值

    c 复制代码
    // ✓ 总是检查
    r = drm_buddy_alloc_blocks(...);
    if (r) {
        handle_error(r);
        return r;
    }
  2. 正确的引用计数

    c 复制代码
    // ✓ get/put配对
    drm_gem_object_get(&bo->tbo.base);
    use_bo(bo);
    drm_gem_object_put(&bo->tbo.base);
  3. 错误路径清理

    c 复制代码
    // ✓ 使用goto清理
    r = alloc_resource_a();
    if (r)
        goto error_a;
    
    r = alloc_resource_b();
    if (r)
        goto error_b;
    
    return 0;

error_b:

free_resource_a();

error_a:

return r;

复制代码
4. **启用调试选项**
```c
// ✓ 开发时启用
CONFIG_DRM_BUDDY_DEBUG=y
buddy_debug=1

❌ 不应该做的

  1. 忽略错误

    c 复制代码
    // ❌ 不检查返回值
    drm_buddy_alloc_blocks(...);
    
    // ✓ 总是检查
    if (drm_buddy_alloc_blocks(...))
        return -ENOMEM;
  2. 过度使用BUG_ON

    c 复制代码
    // ❌ 非致命情况用BUG_ON
    BUG_ON(size > mm->avail);
    
    // ✓ 使用WARN_ON
    if (WARN_ON(size > mm->avail))
        return -ENOSPC;
  3. 泄漏资源

    c 复制代码
    // ❌ 错误路径不清理
    bo = allocate();
    if (init_failed)
        return -EIO;  // bo泄漏!
    
    // ✓ 清理
    if (init_failed) {
        free(bo);
        return -EIO;
    }

💡 实践练习

练习1: 实现泄漏检测器

c 复制代码
/**
 * 为Buddy实现完整的泄漏检测系统
 */
struct leak_detector {
    // TODO: 实现数据结构和函数
};

练习2: 错误注入测试

c 复制代码
/**
 * 实现错误注入框架用于测试
 */
void inject_allocation_failure(void);
void inject_vram_error(u64 offset);

📚 本章总结

核心要点

  1. 分配失败

    • ENOMEM:系统内存不足
    • ENOSPC:VRAM空间不足
    • 正确传播和处理错误码
  2. 内存泄漏

    • 块泄漏检测
    • BO泄漏追踪
    • 引用计数管理
  3. 状态一致性

    • Buddy完整性验证
    • WARN_ON vs BUG_ON
    • 运行时检查
  4. VRAM损坏

    • ECC错误处理
    • 损坏页面管理
    • 数据恢复机制
  5. 调试工具

    • Debugfs接口
    • Ftrace events
    • 调试脚本

错误处理流程图

复制代码
分配请求
    ↓
参数检查 → 失败 → 返回-EINVAL
    ↓ 成功
尝试分配
    ↓
成功?
    ├─ 是 → 返回0
    └─ 否 → ENOSPC?
              ├─ 是 → 尝试回退策略
              │        ├─ 非连续分配
              │        ├─ Eviction
              │        └─ 碎片整理
              │             ↓
              │        成功?
              │            ├─ 是 → 返回0
              │            └─ 否 → 返回-ENOSPC
              └─ 否 → 返回具体错误

🔗 导航

相关推荐
DeeplyMind6 天前
DRM Buddy Allocator 技术学习文档系列目录
drm_buddy
DeeplyMind8 天前
第11章:AMDGPU-VRAM分配流程
drm_buddy
DeeplyMind23 天前
06 - Buddy分配算法
drm_buddy
DeeplyMind24 天前
07 - Buddy释放与合并算法
drm_buddy
DeeplyMind1 个月前
05 - AMDGPU中的VRAM管理器
drm·amdgpu·drm_buddy·ttm
DeeplyMind1 个月前
04 - 核心数据结构详解
drm·drm_buddy·vram
DeeplyMind1 个月前
03 - DRM子系统与AMDGPU架构
drm·drm_buddy·vram分配
DeeplyMind4 个月前
Linux DRM 内存管理子系统的概念关系理解:gem、ttm、drm_buddy
drm·tm·drm_buddy