Linux物理内存管理-引导内存分配器

1 简介

本文主要介绍Linux早期物理内存管理框架memblock,对其框架原理和源码进行深入分析,以及如何释放内存到伙伴系统。

2框架

在开始本章之前,需要了解memblock在Linux内存中处在什么位置,如下是Linux内存管理框图

memblock即引导内存分配器,作为Linux早期物理内存管理,在伙伴系统未初始化时,担任内存主要管理框架,其他模块对内存的依赖将通过memblock提供的内存申请。

3 Linux早期物理内存管理-引导内存分配器

3.1 介绍

引导内存分配器是在伙伴系统初始化前,作为物理内存的早期管理模块,在内核启动初始化过程中需要分配内存,内核在早期提供该临时的物理内存管理。在页分配器和块分配器初始化完成后,引导内存管理器会把物理页交给页分配器,并释放给伙伴系统,丢弃引导内存管理器。目前在Linux内核中有两套引导内存管理器:bootmem和memblock,memblock正取代bootmem,特别是在arm64架构中,可通过宏彻底关闭bootmem。

本文主要介绍memblock的原理和代码实现过程。

3.2 引导内存管理器的算法原理

引导内存管理器实现了两个内存:memory数组和reserve memory数组,memory是总的物理内存,在Linux初始化时从设备树memory节点读取,reserve memory是预留内存块,这些内存块是以被使用内存,包括Linux内核代码段,用户在设备树显示reserve-memory指定预留内存,CMA预留内存等。在Linux启动初始化前期,外模块通过调用memblock申请内存时,所申请内存从memory管理的内存获取并从该数组移除加入reserve区域,当用户使用free相关接口时,这些内存会被从reserve删除。

如下图粗略展示上述逻辑:

3.3memblock相关数据结构

引导分配器memblock 使用的数据 结构如下:

复制代码
struct memblock {
	bool bottom_up;  /* is bottom up direction? */
	phys_addr_t current_limit;
	struct memblock_type memory;
	struct memblock_type reserved;
};

Bottom表示分配内存的方式:true为向上分配(低到高),false为向下分配(高到低)。

current_limit表示可分配内存的最大物理地址。

memory和reserved是1.2节所述的两个内存块,其中memory是内存类型(包括已分配和未分配,总的物理内存范围),该内存块的确定受多个因素影响:内核启动参数中可以带上mem=nn[KMG],表示当前可用地址范围,该地址范围可能小于memory初始化(从设备树memory节点读取)的内存,此时内核将只使用mem指定的范围,大于或者没定义则默认以设备树定义的物理内存范围为主;另外在其他初始化过程中该内存还会进行调整,例如移除内核镜代码段_text到_end的地址范围)所占用的内存区域到reserved等。reserved则是预留类型内存(已分配内存)。

内存类型定义:

复制代码
struct memblock_type {
	unsigned long cnt;
	unsigned long max;
	phys_addr_t total_size;
	struct memblock_region *regions;
	char *name;
};

regions 指向内存块区域数组

cnt表示当前内存块区域数量

max表示当前regions数组最大元素个数

total_size 是所有内存块区域的总长度

name 是内存块类型的名称

内存区域结构体定义:

复制代码
struct memblock_region {
	phys_addr_t base;
	phys_addr_t size;
	enum memblock_flags flags;
#ifdef CONFIG_NUMA
	int nid;
#endif
};

base:区域的基地址

size:当前区域的长度

flags:标志位,如下枚举值:

复制代码
enum memblock_flags {
	MEMBLOCK_NONE		= 0x0,	/* No special request 没有特殊要求的区域*/
/*表示可以热插拔的区域,即在系统运行过程中可以拔出或插入物理内存*/
	MEMBLOCK_HOTPLUG	= 0x1,	/* hotpluggable region */
/*表示镜像的区域。内存镜像是内存冗余技术的一种,工作原理与硬盘的热备份类似,将内存数据做两个复制,分别放在主内存和镜像内存中。*/
	MEMBLOCK_MIRROR		= 0x2,	/* mirrored region */
/*不添加到内核直接映射区域(即线性映射区域)*/
	MEMBLOCK_NOMAP		= 0x4,	/* don't add to kernel direct mapping */
};

数据结构之间的关系:

4.3 memblock 源码分析

前面提过memblock逐渐代替bootmem,需要内核打开宏

复制代码
CONFIG_NO_BOOTMEM=y

memblock源码主要是头文件include\linux\memblock.h和源码文件mm\memblock.c。在memblock.c中,定义全局变量维护内存:

cpp 复制代码
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
#endif

struct memblock memblock __initdata_memblock = {
	.memory.regions		= memblock_memory_init_regions,
	.memory.cnt		= 1,	/* empty dummy entry */
	.memory.max		= INIT_MEMBLOCK_REGIONS,
	.memory.name		= "memory",

	.reserved.regions	= memblock_reserved_init_regions,
	.reserved.cnt		= 1,	/* empty dummy entry */
	.reserved.max		= INIT_MEMBLOCK_RESERVED_REGIONS,
	.reserved.name		= "reserved",

	.bottom_up		= false,
	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
};

维护的内存memblock_memory_init_regions和预留内存memblock_reserved_init_regions分别以数组的形式维护,结构体内存都被放到__initdata_memblock段中。

3.4.1 物理内存范围的读取

读取设备树memory节点初始化memory物理内存过程

cpp 复制代码
start_kernel() ->setup_arch() ->setup_machine_fdt() ->early_init_dt_scan() ->early_init_dt_scan_nodes() 
drivers/of/fdt.c


void __init early_init_dt_scan_nodes(void)
{
	int rc = 0;

	/* Initialize {size,address}-cells info */
/* 初始化size-cells和address-cells信息 */ 
	of_scan_flat_dt(early_init_dt_scan_root, NULL);

	/* Retrieve various information from the /chosen node */
 /* 调用函数early_init_dt_add_memory_arch设置内存 */
	rc = of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
	if (!rc)
		pr_warn("No chosen node found, continuing without\n");

	/* Setup memory, calling early_init_dt_add_memory_arch */
	of_scan_flat_dt(early_init_dt_scan_memory, NULL);

	/* Handle linux,usable-memory-range property */
	early_init_dt_check_for_usable_mem_range();
}
cpp 复制代码
int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
				     int depth, void *data)
{
	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
	const __be32 *reg, *endp;
	int l;
	bool hotpluggable;

/*只处理memory节点*/
	/* We are scanning "memory" nodes only */
	if (type == NULL || strcmp(type, "memory") != 0)
		return 0;
/*取linux,usable-memory或者reg的元素值,设备树用这两个元素名定义系统物理内存都可以,优先linux,usable-memory*/
	reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
	if (reg == NULL)
		reg = of_get_flat_dt_prop(node, "reg", &l);
	if (reg == NULL)
		return 0;

	endp = reg + (l / sizeof(__be32));
	hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);

	pr_debug("memory scan node %s, reg size %d,\n", uname, l);

	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
		u64 base, size;
//取出当前reg的信息
		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
		size = dt_mem_next_cell(dt_root_size_cells, &reg);

		if (size == 0)
			continue;
		pr_debug(" - %llx, %llx\n", base, size);
//添加memory物理内存信息到memblock中
		early_init_dt_add_memory_arch(base, size);

		if (!hotpluggable)
			continue;

		if (memblock_mark_hotplug(base, size))
			pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
				base, base + size);
	}

	return 0;
}
cpp 复制代码
void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
{
	const u64 phys_offset = MIN_MEMBLOCK_ADDR;
//边界检查
	if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
			base, base + size);
		return;
	}

	if (!PAGE_ALIGNED(base)) {
		size -= PAGE_SIZE - (base & ~PAGE_MASK);
		base = PAGE_ALIGN(base);
	}
	size &= PAGE_MASK;

	if (base > MAX_MEMBLOCK_ADDR) {
		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
			base, base + size);
		return;
	}

	if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
		pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
			((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
		size = MAX_MEMBLOCK_ADDR - base + 1;
	}

	if (base + size < phys_offset) {
		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
			base, base + size);
		return;
	}
	if (base < phys_offset) {
		pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
			base, phys_offset);
		size -= phys_offset - base;
		base = phys_offset;
	}
 //调用memblock添加到memblock引导内存管理器中
	memblock_add(base, size);
}

//mm\memblock.c

cpp 复制代码
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
{
	phys_addr_t end = base + size - 1;

	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
		     &base, &end, (void *)_RET_IP_);
//将base, size的内存段,添加到memblock.memory这个内存数组中
	return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
}

static int __init_memblock memblock_add_range(struct memblock_type *type,
				phys_addr_t base, phys_addr_t size,
				int nid, enum memblock_flags flags)
{
	bool insert = false;
	phys_addr_t obase = base;
	phys_addr_t end = base + memblock_cap_size(base, &size);
	int idx, nr_new;
	struct memblock_region *rgn;

	if (!size)
		return 0;

	/* special case for empty array */
/*对于内存数组是空的情况,直接放入到偏移为0的数组中*/
	if (type->regions[0].size == 0) {
		WARN_ON(type->cnt != 1 || type->total_size);
		type->regions[0].base = base;//基地址
		type->regions[0].size = size;//大小
		type->regions[0].flags = flags;//标志
		memblock_set_region_node(&type->regions[0], nid);
		type->total_size = size;//总大小
		return 0;
	}
repeat:
	/*
	 * The following is executed twice.  Once with %false @insert and
	 * then with %true.  The first counts the number of regions needed
	 * to accommodate the new area.  The second actually inserts them.
	 */
	base = obase;
	nr_new = 0;
//对内存数组进行轮询
	for_each_memblock_type(idx, type, rgn) {
		phys_addr_t rbase = rgn->base;//当前索引内存的基地址
		phys_addr_t rend = rbase + rgn->size;//当前索引内存段结束地址
//异常检查
		if (rbase >= end)
			break;
//说明要插入的内存段在当前索引内存段的前面,需要进行插入操作,因为数组内存的储存是从低到高存放的
		if (rend <= base)
			continue;
		/*
		 * @rgn overlaps.  If it separates the lower part of new
		 * area, insert that portion.
		 */
		if (rbase > base) {
#ifdef CONFIG_NUMA
			WARN_ON(nid != memblock_get_region_node(rgn));
#endif
			WARN_ON(flags != rgn->flags);
			nr_new++;
			if (insert)
				memblock_insert_region(type, idx++, base,
						       rbase - base, nid,
						       flags);
		}
		/* area below @rend is dealt with, forget about it */
		base = min(rend, end);//每次取未重叠部分的最前面的地址
	}

	/* insert the remaining portion */
	if (base < end) {
		nr_new++;
//插入到当前索引的前一个数组内存中
		if (insert)/*当前地址段是未重叠的部分,插入到当前索引内存前面*/
			memblock_insert_region(type, idx, base, end - base,
					       nid, flags);
	}

	if (!nr_new)
		return 0;

	/*
	 * If this was the first round, resize array and repeat for actual
	 * insertions; otherwise, merge and return.
	 */
	if (!insert) {/*第一次处理时的插入内存块逻辑判断,*/
		while (type->cnt + nr_new > type->max)
			if (memblock_double_array(type, obase, size) < 0)
				return -ENOMEM;
		insert = true;
		goto repeat;
	} else {
		memblock_merge_regions(type);//对连续的内存块进行合并整理
		return 0;
	}
}

如果指定的 memblock_type 类型集合是个空数组,则直接添加并返回。否则,repeat 逻辑要执行 2 遍。第一遍要计算出本次要新增的内存块数量 nr_new;第 2 遍才会调用 memblock_insert_region() 函数执行真正的插入流程。如果在插入过程中,发现数组空间不足,还会调用 memblock_double_array() 函数将数组空间翻倍。插入完成后,还要调用 memblock_merge_regions() 函数将相邻的区域合并。

如上初始化过程,将memory的物理内存范围信息读取到memblock.memory中。

3.4.2 memblock的初始化过程

前面实际上也是在对memblock的初始化,获取系统可用的物理内存范围,接下来的初始化则是对可用物理内存范围进行处理,包括移出已用内存(例如内核代码所占内存)、明确配置的预留内存(例如设备树)、启动参数配置相关内存等到reserved内存中。

cpp 复制代码
start_kernel() ->setup_arch() ->arm64_memblock_init() ->


void __init arm64_memblock_init(void)
{
//获取内核线性映射区域,当前属于临时映射,在正式映射初始化之前Linux在启动脚本中采用大页映射,该线性映射包括内核代码
    //内核使用的堆栈区域等
	s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);

	/*
	 * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
	 * be limited in their ability to support a linear map that exceeds 51
	 * bits of VA space, depending on the placement of the ID map. Given
	 * that the placement of the ID map may be randomized, let's simply
	 * limit the kernel's linear map to 51 bits as well if we detect this
	 * configuration.
	 */
	if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
	    is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
		pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
		linear_region_size = min_t(u64, linear_region_size, BIT(51));
	}
//将线性映射区域不能覆盖的范围从memory中移除,这里移除的是应用层空间地址范围,即2^39 ~ 2^64地址范围
    //参考后面对memblock_remove函数的分析
	/* Remove memory above our supported physical address size */
	memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);

	/*
	 * Select a suitable value for the base of physical memory.
	 */
//获取当前可用物理地址范围的首地址并ARM64_MEMSTART_ALIGN对齐
	memstart_addr = round_down(memblock_start_of_DRAM(),
				   ARM64_MEMSTART_ALIGN);
//如果可用的物理地址范围长度值大于当前内核线性映射区,提出警告,原因是内核此时还没建立起完整的映射机制
    //如果有超过内核线性地址映射范围的地址,但CPU访问这些内存时将触发页异常,内核重新分配这些内存的映射,
    //这样降整体降低了内核在启动阶段的性能
	if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
		pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");

	/*
	 * Remove the memory that we will not be able to cover with the
	 * linear mapping. Take care not to clip the kernel which may be
	 * high in memory.
	 */
//移除可用物理结束地址或者线性映射结束地址到最大映射地址的范围地址
	memblock_remove(max_t(u64, memstart_addr + linear_region_size,
			__pa_symbol(_end)), ULLONG_MAX);
  //如果线性映射地址范围小于当前可用物理地址范围大小,则舍弃低地址范围部分,此时剩下线性映射范围大小的地址。
	if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
		/* ensure that memstart_addr remains sufficiently aligned */
		memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
					 ARM64_MEMSTART_ALIGN);
		memblock_remove(0, memstart_addr);
	}

	/*
	 * If we are running with a 52-bit kernel VA config on a system that
	 * does not support it, we have to place the available physical
	 * memory in the 48-bit addressable part of the linear region, i.e.,
	 * we have to move it upward. Since memstart_addr represents the
	 * physical address of PAGE_OFFSET, we have to *subtract* from it.
	 */
	if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
		memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);

	/*
	 * Apply the memory limit if it was set. Since the kernel may be loaded
	 * high up in memory, add back the kernel region that must be accessible
	 * via the linear mapping.
	 */
  //memory_limit是内核启动参数传过来的mem=nn[KMG]参数,表示限制当前只用这些物理内存,如果启动参数
    //带了该参数,memory_limit将被赋值不等于初始化值PHYS_ADDR_MAX
	if (memory_limit != PHYS_ADDR_MAX) {
//从可用物理内存里,移除memory_limit以外的内存范围大小
		memblock_mem_limit_remove_map(memory_limit);
 //将内核代码加入可用物理地址范围,防止因为上述memory_limit的调整将内核代码相关移除出去
		memblock_add(__pa_symbol(_text), (u64)(_end - _text));
	}

	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
		/*
		 * Add back the memory we just removed if it results in the
		 * initrd to become inaccessible via the linear mapping.
		 * Otherwise, this is a no-op
		 */
      
		u64 base = phys_initrd_start & PAGE_MASK;
  
		u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;

		/*
		 * We can only add back the initrd memory if we don't end up
		 * with more memory than we can address via the linear mapping.
		 * It is up to the bootloader to position the kernel and the
		 * initrd reasonably close to each other (i.e., within 32 GB of
		 * each other) so that all granule/#levels combinations can
		 * always access both.
		 */
		if (WARN(base < memblock_start_of_DRAM() ||
			 base + size > memblock_start_of_DRAM() +
				       linear_region_size,
			"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
			phys_initrd_size = 0;
		} else {
			memblock_remove(base, size); /* clear MEMBLOCK_ flags */
			memblock_add(base, size);
			memblock_reserve(base, size);
		}
	}

	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
		extern u16 memstart_offset_seed;
		u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
		int parange = cpuid_feature_extract_unsigned_field(
					mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
		s64 range = linear_region_size -
			    BIT(id_aa64mmfr0_parange_to_phys_shift(parange));

		/*
		 * If the size of the linear region exceeds, by a sufficient
		 * margin, the size of the region that the physical memory can
		 * span, randomize the linear region as well.
		 */
		if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
			range /= ARM64_MEMSTART_ALIGN;
			memstart_addr -= ARM64_MEMSTART_ALIGN *
					 ((range * memstart_offset_seed) >> 16);
		}
	}

	/*
	 * Register the kernel text, kernel data, initrd, and initial
	 * pagetables with memblock.
	 */
//将内核代码相关物理内存移到reserved区域
	memblock_reserve(__pa_symbol(_stext), _end - _stext);
	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
		/* the generic initrd code expects virtual addresses */
		initrd_start = __phys_to_virt(phys_initrd_start);
		initrd_end = initrd_start + phys_initrd_size;
	}
//早期dtsreserved内存处理
	early_init_fdt_scan_reserved_mem();
   //内核崩溃时使用的内存,正常申请不能申请该内存
	if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
		reserve_crashkernel();
 //获取最终可用的高地址值
	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
}

memblock_remove的实现:

cpp 复制代码
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
{
	phys_addr_t end = base + size - 1;

	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
		     &base, &end, (void *)_RET_IP_);

	return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
}

static int __init_memblock memblock_remove_range(struct memblock_type *type,
					  phys_addr_t base, phys_addr_t size)
{
	int start_rgn, end_rgn;
	int i, ret;
//根据基地址和长度,将该范围分离处理,并返回分离后的起始和结束索引
	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;

	for (i = end_rgn - 1; i >= start_rgn; i--)
		memblock_remove_region(type, i);//根据起始与结束索引一个个从memory中移除
	return 0;
}




static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
{
	type->total_size -= type->regions[r].size;
 //进行内存的搬运,把当前要移除的索引内存用后一个索引内存覆盖,同时计数数组个数减1
	memmove(&type->regions[r], &type->regions[r + 1],
		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
	type->cnt--;
   //空内存时的实现
	/* Special case for empty arrays */
	if (type->cnt == 0) {
		WARN_ON(type->total_size != 0);
		type->cnt = 1;
		type->regions[0].base = 0;
		type->regions[0].size = 0;
		type->regions[0].flags = 0;
		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
	}
}

memblock_reserver的实现:

cpp 复制代码
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
{
	phys_addr_t end = base + size - 1;

	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
		     &base, &end, (void *)_RET_IP_);
//实现热servered内存数组memblock.reserved,将base,size的内容添加到该数组内存中
//memblock_add_range的实现前面已经分析过
	return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
}

解析设备树预留内存相关配置的实现:

cpp 复制代码
void __init early_init_fdt_scan_reserved_mem(void)
{
	int n;
	u64 base, size;

	if (!initial_boot_params)
		return;

	/* Process header /memreserve/ fields */
//memreserve域的解析
	for (n = 0; ; n++) {
		fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
		if (!size)
			break;
		early_init_dt_reserve_memory_arch(base, size, false);
	}
//reserved-memory域的解析
	of_scan_flat_dt(__fdt_scan_reserved_mem, NULL);
 //对前面预留内存进行初始化,主要对维护预留内存的全局变量进行初始化
    // /static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]->drivers\of\of_reserved_mem.c
    //同时去掉memory中预留内存的信息,这些内存信息在之前函数的处理中已经被记录到resevered中了。
	fdt_init_reserved_mem();
//为内核elf头信息预留内存空间,这些信息被记录在设备树中,主要用于dump内核对系统内存的访问情况
	fdt_reserve_elfcorehdr();
}
cpp 复制代码
static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
					  int depth, void *data)
{
	static int found;
	int err;

	if (!found && depth == 1 && strcmp(uname, "reserved-memory") == 0) {
		if (__reserved_mem_check_root(node) != 0) {
			pr_err("Reserved memory: unsupported node format, ignoring\n");
			/* break scan */
			return 1;
		}
		found = 1;
		/* scan next node */
		return 0;
	} else if (!found) {
		/* scan next node */
		return 0;
	} else if (found && depth < 2) {
		/* scanning of /reserved-memory has been finished */
		return 1;
	}

	if (!of_fdt_device_is_available(initial_boot_params, node))
		return 0;
//真正在memblock处理预留内存函数
	err = __reserved_mem_reserve_reg(node, uname);
	if (err == -ENOENT && of_get_flat_dt_prop(node, "size", NULL))
		fdt_reserved_mem_save_node(node, uname, 0, 0);/记录预留内存的节点,包括节点数

	/* scan next node */
	return 0;
}


static int __init __reserved_mem_reserve_reg(unsigned long node,
					     const char *uname)
{
	int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
	phys_addr_t base, size;
	int len;
	const __be32 *prop;
	int first = 1;
	bool nomap;

	prop = of_get_flat_dt_prop(node, "reg", &len);
	if (!prop)
		return -ENOENT;

	if (len && len % t_len != 0) {
		pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
		       uname);
		return -EINVAL;
	}

	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;

	while (len >= t_len) {
		base = dt_mem_next_cell(dt_root_addr_cells, &prop);
		size = dt_mem_next_cell(dt_root_size_cells, &prop);
//调用memblock_reserve将内存加入resevered中
		if (size &&
		    early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
			pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
				uname, &base, (unsigned long)(size / SZ_1M));
		else
			pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
				uname, &base, (unsigned long)(size / SZ_1M));

		len -= t_len;
//记录预留内存的节点,包括节点数
		if (first) {
			fdt_reserved_mem_save_node(node, uname, base, size);
			first = 0;
		}
	}
	return 0;
}

3.4.3 memblock提供的接口

前面源码分析中,提到memblock_remove,memblock_add和memblock_reserver,此外,memblock还提供了memblock_alloc和memblock_free

memblock_alloc的实现:

cpp 复制代码
static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
{
	return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
				      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
void * __init memblock_alloc_try_nid(
			phys_addr_t size, phys_addr_t align,
			phys_addr_t min_addr, phys_addr_t max_addr,
			int nid)
{
	void *ptr;
...
	ptr = memblock_alloc_internal(size, align,
					   min_addr, max_addr, nid, false);
	if (ptr)
		memset(ptr, 0, size);

	return ptr;
}

static void * __init memblock_alloc_internal(
				phys_addr_t size, phys_addr_t align,
				phys_addr_t min_addr, phys_addr_t max_addr,
				int nid, bool exact_nid)
{
...
//分配内存
		alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
						exact_nid);
...
//转虚拟地址
	return phys_to_virt(alloc);
}




phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
					phys_addr_t align, phys_addr_t start,
					phys_addr_t end, int nid,
					bool exact_nid)
{
	enum memblock_flags flags = choose_memblock_flags();
	phys_addr_t found;

	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
		nid = NUMA_NO_NODE;

	if (!align) {
		/* Can't use WARNs this early in boot on powerpc */
		dump_stack();
		align = SMP_CACHE_BYTES;
	}

again:
//在memory中寻找合适的size大小内存,返回起始地址found
	found = memblock_find_in_range_node(size, align, start, end, nid,
					    flags);
   //如果找到,将该内存放入reserve中
	if (found && !memblock_reserve(found, size))
		goto done;

	if (nid != NUMA_NO_NODE && !exact_nid) {
 //尝试NUMA_NO_NODE情况的分配
		found = memblock_find_in_range_node(size, align, start,
						    end, NUMA_NO_NODE,
						    flags);
		if (found && !memblock_reserve(found, size))
			goto done;
	}

	if (flags & MEMBLOCK_MIRROR) {
		flags &= ~MEMBLOCK_MIRROR;
		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
			&size);
		goto again;
	}

	return 0;

done:
	/* Skip kmemleak for kasan_init() due to high volume. */
	if (end != MEMBLOCK_ALLOC_KASAN)
		/*
		 * The min_count is set to 0 so that memblock allocated
		 * blocks are never reported as leaks. This is because many
		 * of these blocks are only referred via the physical
		 * address which is not looked up by kmemleak.
		 */
		kmemleak_alloc_phys(found, size, 0, 0);

	return found;
}

memblock_free的实现:将内存块从reserve中删除

cpp 复制代码
int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
{
	phys_addr_t end = base + size - 1;

	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
		     &base, &end, (void *)_RET_IP_);

	kmemleak_free_part_phys(base, size);//删除memblock object
//从reserver区域删除
	return memblock_remove_range(&memblock.reserved, base, size);
}


static int __init_memblock memblock_remove_range(struct memblock_type *type,
					  phys_addr_t base, phys_addr_t size)
{
	int start_rgn, end_rgn;
	int i, ret;
//分离出该内存块
	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;
    //移除
	for (i = end_rgn - 1; i >= start_rgn; i--)
		memblock_remove_region(type, i);
	return 0;
}

3.5 memblock日志

通过启动参数打开memblock的日志

cpp 复制代码
chosen {
...
    bootargs = "rdinit=/init earlycon console=ttyAMA0 loglevel=7 memblock=debug";
}

3.6 memblock释放到伙伴系统

在伙伴系统初始化完成后,内核会将membloc记录的内存,释放到伙伴系统,memblock在此后被内核弃用。

释放过程在内核调用栈如下:

memlbock释放内存到伙伴系统入口memblock_free_all:

复制代码
void __init memblock_free_all(void)
{
	unsigned long pages;
 /*移除没有用的内存,从memory中找出nid是MAX_NUMNODES的区域(实际上memblock_add用的就是MAX_NUMNODES), 如果前后取出来
    的两个区域不连续,中间存在空洞,则把这个空洞区间从reserved删除,即把之前没有使用的内存空间的标志去掉reserved.
    */
	free_unused_memmap();
//初始化NUMA各节点
	reset_all_zones_managed_pages();
//将memory释放到伙伴系统
	pages = free_low_memory_core_early();
//统计添加的页数量到_totalram_pages
	totalram_pages_add(pages);
}

释放之前,需要将之前没有用到的内存规整进memblock,复位各个NUMA节点等

复制代码
static unsigned long __init free_low_memory_core_early(void)
{
	unsigned long count = 0;
	phys_addr_t start, end;
	u64 i;

	memblock_clear_hotplug(0, -1);//清除memory所有区域的热拔插标志
//初始化reserved区域,这部分作为预留区,不能加入伙伴系统
	memmap_init_reserved_pages();

	/*
	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
	 *  because in some case like Node0 doesn't have RAM installed
	 *  low ram will be on Node1
	 */
/*将memory内存块加入伙伴系统*/
	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
				NULL)
		count += __free_memory_core(start, end);

	return count;
}

free_low_memory_core_early遍历了memblock所有区域,调用__free_memory_core释放内存到伙伴系统。

复制代码
static unsigned long __init free_low_memory_core_early(void)
{
	unsigned long count = 0;
	phys_addr_t start, end;
	u64 i;

	memblock_clear_hotplug(0, -1);

	memmap_init_reserved_pages();

	/*
	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
	 *  because in some case like Node0 doesn't have RAM installed
	 *  low ram will be on Node1
	 */
	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
				NULL)
		count += __free_memory_core(start, end);

	return count;
}



static unsigned long __init __free_memory_core(phys_addr_t start,
				 phys_addr_t end)
{
	unsigned long start_pfn = PFN_UP(start);//物理地址转换成起始叶帧号
	unsigned long end_pfn = min_t(unsigned long,
				      PFN_DOWN(end), max_low_pfn);//物理结束地址转换成结束叶帧号

	if (start_pfn >= end_pfn)
		return 0;
//传入物理起始和结束叶帧号
	__free_pages_memory(start_pfn, end_pfn);

	return end_pfn - start_pfn;
}

__free_memory_core先计算传入内存块的起始和结束物理页帧号

复制代码
static void __init __free_pages_memory(unsigned long start, unsigned long end)
{
	int order;
//分割进行释放
	while (start < end) {
//以当前低位连续为1为位移进行order的选取,得到的order是阶数
		order = min(MAX_ORDER - 1UL, __ffs(start));
//检查起始地址阶数释放超过要释放的结束地址
		while (start + (1UL << order) > end)
			order--;
 /*
        * 调用释放memory内存到buddy
        * pfn_to_page(start):起始page地址
        * start:起始叶帧号
        * order: 要存放的阶数(free_eare数组的序号)
        */
		memblock_free_pages(pfn_to_page(start), start, order);

		start += (1UL << order);//往前移动2的order阶数,再继续
	}
}

__free_pages_memory则从起始物理叶帧号开始,计算最大的order次阶再放入伙伴系统,即放入伙伴系统时都是以最大阶数block方式放入伙伴系统。

复制代码
void __init memblock_free_pages(struct page *page, unsigned long pfn,
							unsigned int order)
{
	if (early_page_uninitialised(pfn))//当前页所在的节点还没初始化,不允许插入伙伴系统
		return;
	__free_pages_core(page, order);//释放页到伙伴接口函数
}

memblock_free_pages则对当前物理叶帧号地址进行合法检查,再调用伙伴系统接口 __free_pages_core释放给伙伴系统,从此将进入伙伴系统的世界。。

5 附录

相关推荐
马儿能够一直跑2 小时前
同一个环境中安装两个不同版本esp-idf的python冲突解决方案
linux·运维·服务器
小雪_Snow2 小时前
Rocky 操作防火墙
linux
vortex52 小时前
从 Scoop 故障看 Windows 与 Linux 软硬链接与权限机制的底层差异
linux·运维·windows
洒家肉山大魔王2 小时前
Kubernetes中Pod 处于 CrashLoopBackOff 状态(生产环境)
linux·容器·kubernetes·pod·pod循环重启
Unlyrical2 小时前
为什么moduo库要进行线程检查
linux·服务器·开发语言·c++·unix·muduo
小武~2 小时前
Leetcode 每日一题C 语言版 -- 234 basic calculator
linux·c语言·leetcode
橘颂TA3 小时前
【Linux】System V 通信——共享内存
linux·运维·服务器·c++
天赐学c语言3 小时前
Linux - 网络基础概念
linux·服务器·网络·socket
程序员果子3 小时前
零拷贝:程序性能加速的终极奥秘
linux·运维·nginx·macos·缓存·centos