第十七篇:实现堆内存管理
malloc底层原理
-
在内存管理系统中,由arena为任意大小的内存分配提供了统一的接口,但是arena是个内存仓库,并不直接对外提供内存分配
-
内存块描述符将同类arena中的空闲内存汇聚到一起,作为某一规格内存块的入口
malloc的实现
底层初始化
相关结构定义如下:
c
//内存块
struct mem_block {
struct list_node free_block;
};
//内存块描述符
//内存块描述符共有七个,分别为16,32,64,128,256,512,1024
struct mem_block_desc {
uint32_t block_size;
uint32_t blocks_per_arena;
struct list free_list;
};
struct arena {
//此arena所属的mem_block_desc
struct mem_block_desc* desc;
//当large为true时,cnt表示页框数
//当large为false时,cnt表示空闲的mem_block数
uint32_t cnt;
bool large;
};
初始化如下:
c
//为malloc做准备
void block_desc_init(struct mem_block_desc* desc_array) {
uint16_t desc_idx, block_size = 16;
//逐个初始化内存块描述符
for (desc_idx = 0; desc_idx < MEM_BLOCK_DESC_CNT; desc_idx++) {
desc_array[desc_idx].block_size = block_size;
desc_array[desc_idx].blocks_per_arena = (PG_SIZE - sizeof(struct arena)) / block_size;
list_init(&desc_array[desc_idx].free_list);
block_size *= 2;
}
}
内存块和arena的转换函数
c
//返回arena中第idx个内存块的地址
struct mem_block* arena2block(struct arena* a, uint32_t idx) {
return (struct mem_block*)((uint32_t)a + sizeof(struct arena) + idx * a->desc->block_size);
}
//返回内存块b所在的arena的地址
struct arena* block2arena(struct mem_block* b) {
return (struct arena*)((uint32_t)b & 0xfffff000);
}
实现sys_malloc
c
//在堆中申请size字节内存
void* sys_malloc(uint32_t size) {
enum pool_flags PF;
struct mem_pool* mem_pool;
uint32_t pool_size;
struct mem_block_desc* descs;
struct task_struct* cur_thread = running_thread();
//判断用哪个内存池
if (cur_thread->pgdir == NULL) {
//若为内核线程
PF = PF_KERNEL;
pool_size = kernel_mem_pool.pool_size;
mem_pool = &kernel_mem_pool;
descs = k_block_descs;
} else {
//若为用户进程
PF = PF_USER;
pool_size = user_mem_pool.pool_size;
mem_pool = &user_mem_pool;
descs = cur_thread->u_block_descs;
}
//若申请的内存超出容量则返回NULL
if (size < 0 || size > pool_size) return NULL;
struct arena* a;
struct mem_block* b;
lock_acquire(&mem_pool->lock);
//如果超过最大内存块1024B,就分配整个页框
if (size > 1024) {
//需要分配的页框数
uint32_t pg_cnt = DIV_ROUND_UP(size + sizeof(struct arena), PG_SIZE);
//获取页框
a = malloc_page(PF, pg_cnt);
//如果分配成功
if (a != NULL) {
//初始化清0
memset(a, 0, pg_cnt * PG_SIZE);
//初始化arena属性
a->desc = NULL;
a->cnt = pg_cnt;
a->large = true;
lock_release(&mem_pool->lock);
//跨国arena的大小,返回实际内存的起始地址
return (void*)(a + 1);
} else {
lock_release(&mem_pool->lock);
return NULL;
}
} else {
uint8_t desc_idx;
//找到符合大小的内存块规格
for (desc_idx = 0; desc_idx < MEM_BLOCK_DESC_CNT; desc_idx++) {
if (size <= descs[desc_idx].block_size) break;
}
//若mem_block_desc的free_list为空,则创建新的arena
if (list_empty(&descs[desc_idx].free_list)) {
a = malloc_page(PF, 1);
if (a == NULL) {
lock_release(&mem_pool->lock);
return NULL;
}
memset(a, 0, PG_SIZE);
//初始化arena的属性
a->desc = &descs[desc_idx];
a->cnt = descs[desc_idx].blocks_per_arena;
a->large = false;
//接下来将该arena的块都添加进内存块描述符的free_list中
uint32_t block_idx;
enum intr_status old_status = intr_disable();
for (block_idx = 0; block_idx < a->cnt; block_idx++) {
b = arena2block(a, block_idx);
ASSERT(!node_find(&a->desc->free_list, &b->free_block));
list_append(&a->desc->free_list, &b->free_block);
}
set_intr_status(old_status);
}
//开始分配内存块
b = elem2entry(struct mem_block, free_block, list_pop(&(descs[desc_idx].free_list)));
a = block2arena(b);
a->cnt--;
lock_release(&mem_pool->lock);
return (void*)b;
}
}
free的实现
内存回收工作分为三大步骤
- 先调用pfree清空物理地址位图中的相应位
- 再调用page_table_pte_remove删除页表中此地址的pte
- 最后调用vaddr_remove清除虚拟地址位图中的相应位
回收物理内存
c
//将物理地址pg_phy_addr回收到物理内存池
void pfree(uint32_t pg_phy_addr) {
struct mem_pool* mem_pool;
uint32_t bit_idx = 0;
//判断该内存属于哪个内存池
if (pg_phy_addr >= user_mem_pool.phy_addr_start) {
mem_pool = &user_mem_pool;
} else {
mem_pool = &kernel_mem_pool;
}
//将该页对应的位图置为0
bit_idx = (pg_phy_addr - mem_pool->phy_addr_start) / PG_SIZE;
bitmap_set(&mem_pool->mem_pool_bitmap, bit_idx, 0);
}
更新页表映射
c
//去掉页表中虚拟地址vaddr的映射,只去掉addr对应的pte
void page_table_pte_remove(uint32_t vaddr) {
//更新pte
uint32_t* pte = pte_ptr(vaddr);
*pte &= ~PG_P_1;
//更新TLB
asm volatile ("invlpg %0": : "m" (vaddr): "memory");
}
释放虚拟地址
c
//在虚拟地址池中释放以_vaddr起始的连续pg_cnt个虚拟地址
void vaddr_remove(enum pool_flags pf, void* _vaddr, uint32_t pg_cnt) {
uint32_t bit_idx = 0, vaddr = (uint32_t)_vaddr, cnt = 0;
struct virtual_addr_pool* vaddr_pool;
//判断属于哪个内存池
if (pf == PF_KERNEL) {
vaddr_pool = &kernel_vaddr_pool;
} else {
vaddr_pool = &(running_thread()->userprog_vaddr_pool);
}
//修改位图
bit_idx = (vaddr - vaddr_pool->vaddr_start) / PG_SIZE;
for (;cnt < pg_cnt; cnt++) {
bitmap_set(&vaddr_pool->vaddr_bitmap, bit_idx + cnt, 0);
}
}
上述三个步骤的汇总
c
//释放以虚拟地址addr起始的cnt个物理页
void mfree_page(enum pool_flags pf, void* _vaddr, uint32_t pg_cnt) {
uint32_t pg_phy_addr = addr_v2p(_vaddr);
uint32_t vaddr = (uint32_t)_vaddr;
uint32_t page_cnt = pg_cnt;
ASSERT(pg_cnt >= 1 && vaddr % PG_SIZE == 0);
for (;page_cnt > 0; page_cnt--) {
//获得物理地址
pg_phy_addr = addr_v2p(vaddr);
//将对应的物理页框归还到内存池
pfree(pg_phy_addr);
//在页表中取消映射
page_table_pte_remove(vaddr);
//处理下一个虚拟页
vaddr += PG_SIZE;
}
//清空虚拟地址在位图中的相应位
vaddr_remove(pf, _vaddr, pg_cnt);
}
实现sys_free
c
//回收内存ptr
void sys_free(void* ptr) {
ASSERT(ptr != NULL);
if (ptr == NULL) return;
enum pool_flags PF;
struct mem_pool* mem_pool;
//判断是内核线程还是用户进程
if (running_thread()->pgdir == NULL) {
ASSERT((uint32_t)ptr >= K_HEAP_START);
PF = PF_KERNEL;
mem_pool = &kernel_mem_pool;
} else {
PF = PF_USER;
mem_pool = &user_mem_pool;
}
lock_acquire(&mem_pool->lock);
//通过ptr得到arena,获取元信息
struct mem_block* b = ptr;
struct arena* a = block2arena(b);
//判断是大内存块还是小内存块
if (a->desc == NULL && a->large == true) {
//如果是大内存块,直接释放整个页
mfree_page(PF, a, a->cnt);
} else {
//如果是小内存块,先将内存块放会free_list
list_append(&a->desc->free_list, &b->free_block);
a->cnt++;
ASSERT(a->large == 0 || a->large == 1);
//如果arena满,则将整个仓库释放
if (a->cnt == a->desc->blocks_per_arena) {
uint32_t block_idx;
for (block_idx = 0; block_idx < a->desc->blocks_per_arena; block_idx++) {
struct mem_block* b = arena2block(a, block_idx);
ASSERT(node_find(&a->desc->free_list, &b->free_block));
list_remove(&b->free_block);
}
mfree_page(PF, a, 1);
}
}
lock_release(&mem_pool->lock);
}