Kernel2.X的内存世界

Kernel2.X的内存世界

1. 开天辟地的两个页表

在启动分页机制之前,我们操作的都是物理内存。在正式启动之前我们要做的工作就是让页目录,页表都指向正确的物理内存。

而页目录、页表也是存在在物理内存中的。因此这一部分都是人工硬编码的位置,没有必然在这里的理由。

arch/i386/kernel/head.S中有对这一部分的描述,此时还没有启动分页机制:

scss 复制代码
/* 页目录项,每一个页目录项对应一个页表,可以看到这里有两个部分共四个页表*/
/* 每个部分就是下面的pg0和pg1 */
/* 这里其实是想在内核空间和用户空间同时做映射, 这是因为此时的IP指针还没有到内核空间,也就是说IP可能是0x100*/
/* 所以我们要做底内存空间的映射*/
.org 0x1000
ENTRY(swapper_pg_dir)
  .long 0x00102007
  .long 0x00103007
  .fill BOOT_USER_PGD_PTRS-2,4,0
  /* default: 766 entries */
  .long 0x00102007
  .long 0x00103007
  /* default: 254 entries */
  .fill BOOT_KERNEL_PGD_PTRS-2,4,0
​
/*
 * The page tables are initialized to only 8MB here - the final page
 * tables are set up later depending on memory size.
 */
 
/* 第一个页表,放在0x2000 */
/* 这里的0x2000是相对偏移,文件起始会被加载到0x100000,所以最终是0x102000 */
​
.org 0x2000
ENTRY(pg0)
​
/* 同上 */
.org 0x3000
ENTRY(pg1)
​
/*
 * empty_zero_page must immediately follow the page tables ! (The
 * initialization loop counts until empty_zero_page)
 */
 
/* 这里不是页表!!!这里是一个页面里的内容 存放了一些初始化有的信息*/
.org 0x4000
ENTRY(empty_zero_page)

此时初始化完了,我们的页目录能映射到两个页表,但是页表还没有实际的值,即每个目录项都要映射到一个具体的物理地址。

这个循环就做到了这一点, 从pg0(0x2000)开始,一直填充到empty_zero_page(0x4000)

swift 复制代码
  movl $pg0-__PAGE_OFFSET,%edi /* initialize page tables */
  movl $007,%eax    /* "007" doesn't mean with right to kill, but
           PRESENT+RW+USER */
2:  stosl
  add $0x1000,%eax
  cmp $empty_zero_page-__PAGE_OFFSET,%edi
  jne 2b

到目前为止,我们有页目录,页目录对应的页表也填充好了,所以我们可以启用分页机制了。

swift 复制代码
/*
 * Enable paging
 */
 /* cr3是页目录指针,所以我们要让它等于swapper_pg_dir */
 /* cr0中有一位是控制是否启动分页机制的,我们set为1, 就启动了分页机制 */
 
3:
  movl $swapper_pg_dir-__PAGE_OFFSET,%eax
  movl %eax,%cr3    /* set the page table pointer.. */
  movl %cr0,%eax
  orl $0x80000000,%eax
  movl %eax,%cr0    /* ..and set paging (PG) bit */
  jmp 1f      /* flush the prefetch-queue */
  /* 这里跳到下一位的意义就是我们现在的IP从低地址空间到高地址空间(内核空间)*/
1:
  movl $1f,%eax
  jmp *%eax   /* make sure eip is relocated */
1:
  /* Set up the stack pointer */
  lss stack_start,%esp

现在形成的映射实际上是虚拟空间 [3G, 3G + 8M] 到物理空间 [0, 8M]的映射。

这件事做好了,下一步干什么?我们的内存当然不止8M,所以我们要把所有的物理内存空间管理起来。

在开始之前,我希望你能理解这8M里面的内容,里面包含了目前和内核相关的全部内容(包括我们刚才讲到的两个页表,他们在0x102000,1MB前面一点)

后面做了一点处理之后会走到这里,我们刚才只是完成了基础的物理内存管理,这里让它变得复杂了。

scss 复制代码
asmlinkage void __init start_kernel(void)
{
  char * command_line;
  unsigned long mempages;
  extern char saved_command_line[];
/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
  lock_kernel();
  printk(linux_banner);
  // 这里完成了物理内存的初始化
  // 这个函数我们会发现内核中有很多实现,因为和架构是强相关的,我们这里看i386的实现
  setup_arch(&command_line);

2. bootmem阶段(刚开始的物理内存管理)

arch/i386/kernel/setup.c

scss 复制代码
void __init setup_arch(char **cmdline_p)
{
  unsigned long bootmap_size, low_mem_size;
  unsigned long start_pfn, max_pfn, max_low_pfn;
  int i;
...
...
/* 根据虚拟地址计算所处的页的index */
#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
​
/*
 * 128MB for vmalloc and initrd
 */
/* 4G - (3G + 896M) = 128M */
#define VMALLOC_RESERVE (unsigned long)(128 << 20)
/* 这里的MAXMEM就是 896M */
#define MAXMEM    (unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE)
#define MAXMEM_PFN  PFN_DOWN(MAXMEM)
#define MAX_NONPAE_PFN  (1 << 20)
​
  /*
   * partially used pages are not usable - thus
   * we are rounding upwards:
   */
  /* 当然我们要从内核映像的end开始分配 */
  start_pfn = PFN_UP(__pa(&_end));
​
  /*
   * Find the highest page frame number we have available
   */
  /* 这里的e820的信息就是我们从empty_zero_page中得到的*/
  /* 刚才开启页表之后还有一段复杂的汇编我们才进入了start_kernel,我没有详细讲 */
  max_pfn = 0;
  for (i = 0; i < e820.nr_map; i++) {
    unsigned long start, end;
    /* RAM? */
    /* 我们只关注是RAM的部分 */
    if (e820.map[i].type != E820_RAM)
      continue;
    start = PFN_UP(e820.map[i].addr);
    end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
    if (start >= end)
      continue;
    /* 最终目的是找到我们物理内存管理的尽头,解决的问题是我们要管理从哪到哪的内存*/
    if (end > max_pfn)
      max_pfn = end;
  }
​
  /*
   * Determine low and high memory ranges:
   */
  max_low_pfn = max_pfn;
  if (max_low_pfn > MAXMEM_PFN) {
    max_low_pfn = MAXMEM_PFN;
#ifndef CONFIG_HIGHMEM
    /* Maximum memory usable is what is directly addressable */
    printk(KERN_WARNING "Warning only %ldMB will be used.\n",
          MAXMEM>>20);
    if (max_pfn > MAX_NONPAE_PFN)
      printk(KERN_WARNING "Use a PAE enabled kernel.\n");
    else
      printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
#else /* !CONFIG_HIGHMEM */
#ifndef CONFIG_X86_PAE
    if (max_pfn > MAX_NONPAE_PFN) {
      max_pfn = MAX_NONPAE_PFN;
      printk(KERN_WARNING "Warning only 4GB will be used.\n");
      printk(KERN_WARNING "Use a PAE enabled kernel.\n");
    }
#endif /* !CONFIG_X86_PAE */
#endif /* !CONFIG_HIGHMEM */
  }
​
#ifdef CONFIG_HIGHMEM
  highstart_pfn = highend_pfn = max_pfn;
  if (max_pfn > MAXMEM_PFN) {
    highstart_pfn = MAXMEM_PFN;
    printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
      pages_to_mb(highend_pfn - highstart_pfn));
  }
#endif
  /*
   * Initialize the boot-time allocator (with low memory only):
   */
  /* 两个参数: 1. 内核映像的end。 2.low memory的最大值 */
  /* 这里主要是管理low memmory, 暂时还没考虑high memory( >= 896M) */
  bootmap_size = init_bootmem(start_pfn, max_low_pfn);
​
  /*
   * Register fully available low RAM pages with the bootmem allocator.
   */
  /* 以下都是为了填充bitmap */
  for (i = 0; i < e820.nr_map; i++) {
    unsigned long curr_pfn, last_pfn, size;
    /*
     * Reserve usable low memory
     */
    if (e820.map[i].type != E820_RAM)
      continue;
    /*
     * We are rounding up the start address of usable memory:
     */
    curr_pfn = PFN_UP(e820.map[i].addr);
    if (curr_pfn >= max_low_pfn)
      continue;
    /*
     * ... and at the end of the usable range downwards:
     */
    last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
​
    if (last_pfn > max_low_pfn)
      last_pfn = max_low_pfn;
​
    /*
     * .. finally, did all the rounding and playing
     * around just make the area go away?
     */
    if (last_pfn <= curr_pfn)
      continue;
​
    size = last_pfn - curr_pfn;
    free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
  }
  /*
   * Reserve the bootmem bitmap itself as well. We do this in two
   * steps (first step was init_bootmem()) because this catches
   * the (very unlikely) case of us accidentally initializing the
   * bootmem allocator with an invalid RAM area.
   */
  reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
       bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
​
  /*
   * reserve physical page 0 - it's a special BIOS page on many boxes,
   * enabling clean reboots, SMP operation, laptop functions.
   */
  reserve_bootmem(0, PAGE_SIZE);
#ifdef CONFIG_SMP
  /*
   * But first pinch a few for the stack/trampoline stuff
   * FIXME: Don't need the extra page at 4K, but need to fix
   * trampoline before removing it. (see the GDT stuff)
   */
  reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
#endif
#ifdef CONFIG_X86_LOCAL_APIC
  /*
   * Find and reserve possible boot-time SMP configuration:
   */
  find_smp_config();
#endif
#ifdef CONFIG_BLK_DEV_INITRD
  if (LOADER_TYPE && INITRD_START) {
    if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
      reserve_bootmem(INITRD_START, INITRD_SIZE);
      initrd_start =
        INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
      initrd_end = initrd_start+INITRD_SIZE;
    }
    else {
      printk(KERN_ERR "initrd extends beyond end of memory "
          "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
          INITRD_START + INITRD_SIZE,
          max_low_pfn << PAGE_SHIFT);
      initrd_start = 0;
    }
  }
#endif
/* 到此Bitmap 填充完毕 */
/* 可以准备初始化页表 -> paging_init() */
  /*
   * NOTE: before this point _nobody_ is allowed to allocate
   * any memory using the bootmem allocator.
   */
​
#ifdef CONFIG_SMP
  smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
#endif
  paging_init();
#ifdef CONFIG_X86_LOCAL_APIC
  /*
   * get boot-time SMP configuration:
   */
  if (smp_found_config)
    get_smp_config();
  init_apic_mappings();
#endif
​
​
  /*
   * Request address space for all standard RAM and ROM resources
   * and also for regions reported as reserved by the e820.
   */
  probe_roms();
  for (i = 0; i < e820.nr_map; i++) {
    struct resource *res;
    if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
      continue;
    res = alloc_bootmem_low(sizeof(struct resource));
    switch (e820.map[i].type) {
    case E820_RAM:  res->name = "System RAM"; break;
    case E820_ACPI: res->name = "ACPI Tables"; break;
    case E820_NVS:  res->name = "ACPI Non-volatile Storage"; break;
    default:  res->name = "reserved";
    }
    res->start = e820.map[i].addr;
    res->end = res->start + e820.map[i].size - 1;
    res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
    request_resource(&iomem_resource, res);
    if (e820.map[i].type == E820_RAM) {
      /*
       *  We dont't know which RAM region contains kernel data,
       *  so we try it repeatedly and let the resource manager
       *  test it.
       */
      request_resource(res, &code_resource);
      request_resource(res, &data_resource);
    }
  }
  request_resource(&iomem_resource, &vram_resource);
​
  /* request I/O space for devices used on all i[345]86 PCs */
  for (i = 0; i < STANDARD_IO_RESOURCES; i++)
    request_resource(&ioport_resource, standard_io_resources+i);
​
  /* Tell the PCI layer not to allocate too close to the RAM area.. */
  low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
  if (low_mem_size > pci_mem_start)
    pci_mem_start = low_mem_size;
​
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
  conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
  conswitchp = &dummy_con;
#endif
#endif
}
​

init_bootmem实现:

arduino 复制代码
static unsigned long __init init_bootmem_core (pg_data_t *pgdat,
  unsigned long mapstart, unsigned long start, unsigned long end)
{
  //NUMA架构每个cpu会有自己的pgdat
  bootmem_data_t *bdata = pgdat->bdata;
  unsigned long mapsize = ((end - start)+7)/8;
​
  pgdat->node_next = pgdat_list;
  pgdat_list = pgdat;
​
  // 这里在对齐(sizeof long)
  mapsize = (mapsize + (sizeof(long) - 1UL)) & ~(sizeof(long) - 1UL);
  bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT);
  bdata->node_boot_start = (start << PAGE_SHIFT);
  bdata->node_low_pfn = end;
​
  /*
   * Initially all pages are reserved - setup_arch() has to
   * register free RAM areas explicitly.
   */
  // 全部标记为已占用,后续准备free
  memset(bdata->node_bootmem_map, 0xff, mapsize);
​
  return mapsize;
}

free_bootmem实现:

arduino 复制代码
static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
{
  unsigned long i;
  unsigned long start;
  /*
   * round down end of usable mem, partially free pages are
   * considered reserved.
   */
  unsigned long sidx;
  // 我们要释放的最后一个页的index
  unsigned long eidx = (addr + size - bdata->node_boot_start)/PAGE_SIZE;
  // 没用到,不知道为什么要写在这里,可能写的时候index要计算相对的?
  unsigned long end = (addr + size)/PAGE_SIZE;
​
  if (!size) BUG();
  if (end > bdata->node_low_pfn)
    BUG();
​
  /*
   * Round up the beginning of the address.
   */
  start = (addr + PAGE_SIZE-1) / PAGE_SIZE;
  sidx = start - (bdata->node_boot_start/PAGE_SIZE);
​
  for (i = sidx; i < eidx; i++) {
    // 清理对应bitmap上的位
    if (!test_and_clear_bit(i, bdata->node_bootmem_map))
      BUG();
  }
}

reserver_bootmem实现:

scss 复制代码
// 和上面函数是相反的,即设置为已占用
static void __init reserve_bootmem_core(bootmem_data_t *bdata, unsigned long addr, unsigned long size)
{
  unsigned long i;
  /*
   * round up, partially reserved pages are considered
   * fully reserved.
   */
  unsigned long sidx = (addr - bdata->node_boot_start)/PAGE_SIZE;
  unsigned long eidx = (addr + size - bdata->node_boot_start + 
              PAGE_SIZE-1)/PAGE_SIZE;
  unsigned long end = (addr + size + PAGE_SIZE-1)/PAGE_SIZE;
​
  if (!size) BUG();
​
  if (sidx < 0)
    BUG();
  if (eidx < 0)
    BUG();
  if (sidx >= eidx)
    BUG();
  if ((addr >> PAGE_SHIFT) >= bdata->node_low_pfn)
    BUG();
  if (end > bdata->node_low_pfn)
    BUG();
  for (i = sidx; i < eidx; i++)
    if (test_and_set_bit(i, bdata->node_bootmem_map))
      printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
}

所以整体来看,bitmap是先全部标记为占用,再将e820里的标记为非占用,最后将一些已经被用到的标记为占用。

到这里我们的内存状况是:

我们映射了8M的内存到物理内存的[0, 8M],并且我们映射的内存中现在包含了一个bitmap,展示了所有物理内存的使用情况,借此我们可以实现合理的物理内存分配(我们能够根据bitmap找到空闲的页)。

内核紧接着调用了paging_init(),彻底初始化好内核的页系统,准备摆脱掉bitmap这种性能不高且简单的物理内存管理方式。

3. 初始化内核空间的页表 & buddy伙伴系统

ini 复制代码
void __init paging_init(void)
{
  // 看来主要的实现在这里
  pagetable_init();
​
  // 这里有点奇怪,因为我们刚才就是已经把swapper_pg_dir的物理地址传给了cr3,这里又传了一次
  // 这是因为我们修改了swapper_pg_dir页目录项的内容,重新赋值会刷新TLB表
  __asm__( "movl %%ecx,%%cr3\n" ::"c"(__pa(swapper_pg_dir)));
​
#if CONFIG_X86_PAE
  /*
   * We will bail out later - printk doesnt work right now so
   * the user would just see a hanging kernel.
   */
  if (cpu_has_pae)
    set_in_cr4(X86_CR4_PAE);
#endif
​
  __flush_tlb_all();
​
#ifdef CONFIG_HIGHMEM
  kmap_init();
#endif
  {
    // 初始化buddy伙伴系统(物理内存管理)
    unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
    unsigned int max_dma, high, low;
​
    max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
    low = max_low_pfn;
    high = highend_pfn;
​
    if (low < max_dma)
      zones_size[ZONE_DMA] = low;
    else {
      zones_size[ZONE_DMA] = max_dma;
      zones_size[ZONE_NORMAL] = low - max_dma;
#ifdef CONFIG_HIGHMEM
      zones_size[ZONE_HIGHMEM] = high - low;
#endif
    }
    free_area_init(zones_size);
  }
  return;
}

pagetable_init实现:

ini 复制代码
// 在开始之前,我们首先要知道 pgd_t 就是页目录项的地址,他是映射的最高部分,一个页目录项最高可映射4GB
// pmd_t 是页中间目录,一个pgd_t 可以存 1024 个 pmd
// 一个 pmd_t 可以存 1024个pte
// 一个 pte对应一个page(4KB)
static void __init pagetable_init (void)
{
  unsigned long vaddr, end;
  pgd_t *pgd, *pgd_base;
  int i, j, k;
  pmd_t *pmd;
  pte_t *pte, *pte_base;
​
  /*
   * This can be zero as well - no problem, in that case we exit
   * the loops anyway due to the PTRS_PER_* conditions.
   */
  end = (unsigned long)__va(max_low_pfn*PAGE_SIZE);
​
  // 还记得swapper_pg_dir吗?
  // 记录了两个开天辟地的页表的就是它,早期整个系统只需要一个页目录项(4GB)
  pgd_base = swapper_pg_dir;
// 这个宏定义用来识别我们是二层映射还是单层映射
// PAE实际上才会经过pgd -> pmd -> pte
// 普通x86(无PAE)只会有pmd -> pte
#if CONFIG_X86_PAE
  for (i = 0; i < PTRS_PER_PGD; i++)
    set_pgd(pgd_base + i, __pgd(1 + __pa(empty_zero_page)));
#endif
  // 内核地址是比较高的,所以映射的起点肯定不是swapper_pg_dir的前几项
  // 因为swapper_pg_dir是从0开始映射的
  i = __pgd_offset(PAGE_OFFSET);
  pgd = pgd_base + i;
​
  // 以下是填充页表的算法:用宏来区分二次映射和一次映射的情况
  for (; i < PTRS_PER_PGD; pgd++, i++) {
    vaddr = i*PGDIR_SIZE;
    if (end && (vaddr >= end))
      break;
#if CONFIG_X86_PAE
    // 这里是三级映射的情况
    // 页表需要用一个页的大小来存储(PAGE_SIZE)
    pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
    // 让pdg指向我们刚分配的页表地址(物理地址)
    set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
#else
    pmd = (pmd_t *)pgd;
#endif
    if (pmd != pmd_offset(pgd, 0))
      BUG();
    // 开始填充二级页表(pmd)
    for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
      vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
      if (end && (vaddr >= end))
        break;
      // 开启大页的逻辑,可以不关注
      if (cpu_has_pse) {
        unsigned long __pe;
​
        set_in_cr4(X86_CR4_PSE);
        boot_cpu_data.wp_works_ok = 1;
        __pe = _KERNPG_TABLE + _PAGE_PSE + __pa(vaddr);
        /* Make it "global" too if supported */
        if (cpu_has_pge) {
          set_in_cr4(X86_CR4_PGE);
          __pe += _PAGE_GLOBAL;
        }
        set_pmd(pmd, __pmd(__pe));
        continue;
      }
      // 页表的起始位置
      pte_base = pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
      // 填充页表
      for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
        vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
        if (end && (vaddr >= end))
          break;
        *pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
      }
      // 填充好了页表之后,让二级页表的某一项指向该页表
      set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
      if (pte_base != pte_offset(pmd, 0))
        BUG();
​
    }
  }
​
  /*
   * Fixed mappings, only the page table structure has to be
   * created - mappings will be set by set_fixmap():
   */
  vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  // fixrange_init: 为没有映射到的虚拟地址建立映射(没有真的分配物理内存)
  fixrange_init(vaddr, 0, pgd_base);
​
#if CONFIG_HIGHMEM
  /*
   * Permanent kmaps:
   */
  vaddr = PKMAP_BASE;
  fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
​
  pgd = swapper_pg_dir + __pgd_offset(vaddr);
  pmd = pmd_offset(pgd, vaddr);
  pte = pte_offset(pmd, vaddr);
  pkmap_page_table = pte;
#endif
​
#if CONFIG_X86_PAE
  /*
   * Add low memory identity-mappings - SMP needs it when
   * starting up on an AP from real-mode. In the non-PAE
   * case we already have these mappings through head.S.
   * All user-space mappings are explicitly cleared after
   * SMP startup.
   */
  pgd_base[0] = pgd_base[USER_PTRS_PER_PGD];
#endif
}

fixrange_init:

ini 复制代码
static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
{
  pgd_t *pgd;
  pmd_t *pmd;
  pte_t *pte;
  int i, j;
  unsigned long vaddr;
​
  vaddr = start;
  i = __pgd_offset(vaddr);
  j = __pmd_offset(vaddr);
  pgd = pgd_base + i;
​
  for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
#if CONFIG_X86_PAE
    if (pgd_none(*pgd)) {
      // 分配二级页表的物理地址
      pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
      set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
      if (pmd != pmd_offset(pgd, 0))
        printk("PAE BUG #02!\n");
    }
    pmd = pmd_offset(pgd, vaddr);
#else
    pmd = (pmd_t *)pgd;
#endif
    for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
      if (pmd_none(*pmd)) {
        // 分配页表的物理空间
        // 注意这里没有真的分配物理页帧!!!
        // 以后访问到这里,处理缺页错误的时候再真的分配
        pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
        set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
        if (pte != pte_offset(pmd, 0))
          BUG();
      }
      vaddr += PMD_SIZE;
    }
    j = 0;
  }
}
​

然后我们

css 复制代码
  __asm__( "movl %%ecx,%%cr3\n" ::"c"(__pa(swapper_pg_dir)));

刷新一下页表,让cr3指向新的页目录项,虚拟内存初始化完成,初始映射完全建立。

开始理解buddy文件系统之前,让我们回忆一下我们现在物理内存是怎么分配的,用的函数是alloc_bootmem_low_pages,它是非常粗糙的基于bitmap的物理内存管理方法,在下面的事情做完了之后我们就不再使用它。

接下来的代码就是用来解决物理内存精细管理问题的:

ini 复制代码
  {
    // 基于Zone的内存管理(宏观上把物理内存分为DMA NORMAL HIGHMEM三种Zone来管理)
    
    // 每个ZONE的size的列表
    unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
    unsigned int max_dma, high, low;
​
    max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
    low = max_low_pfn;
    high = highend_pfn;
​
    if (low < max_dma)
      zones_size[ZONE_DMA] = low;
    else {
      zones_size[ZONE_DMA] = max_dma;
      zones_size[ZONE_NORMAL] = low - max_dma;
#ifdef CONFIG_HIGHMEM
      zones_size[ZONE_HIGHMEM] = high - low;
#endif
    }
    // 关键函数,是对free_area_init_core的封装
    free_area_init(zones_size);
  }

free_area_init:

arduino 复制代码
void __init free_area_init(unsigned long *zones_size)
{
  // contig_page_data表示当前node(numa中的概念,不同node是分开进行物理内存管理的)
  // mem_map是输出参数
  free_area_init_core(0, &contig_page_data, &mem_map, zones_size, 0, 0, 0);
}

free_area_init_core:

ini 复制代码
/*
 * Set up the zone data structures:
 *   - mark all pages reserved
 *   - mark all memory queues empty
 *   - clear the memory bitmaps
 */
void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
  unsigned long *zones_size, unsigned long zone_start_paddr, 
  unsigned long *zholes_size, struct page *lmem_map)
{
  struct page *p;
  unsigned long i, j;
  unsigned long map_size;
  unsigned long totalpages, offset, realtotalpages;
  const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
​
  if (zone_start_paddr & ~PAGE_MASK)
    BUG();
​
  totalpages = 0;
  for (i = 0; i < MAX_NR_ZONES; i++) {
    unsigned long size = zones_size[i];
    totalpages += size;
  }
  realtotalpages = totalpages;
  
  // zholes_size == 0所以不会走这个分支
  if (zholes_size)
    for (i = 0; i < MAX_NR_ZONES; i++)
      realtotalpages -= zholes_size[i];
      
  printk("On node %d totalpages: %lu\n", nid, realtotalpages);
​
  INIT_LIST_HEAD(&active_list);
  INIT_LIST_HEAD(&inactive_list);
​
  /*
   * Some architectures (with lots of mem and discontinous memory
   * maps) have to search for a good mem_map area:
   * For discontigmem, the conceptual mem map array starts from 
   * PAGE_OFFSET, we need to align the actual array onto a mem map 
   * boundary, so that MAP_NR works.
   */
  // mem_map是内核用来描述每一页的结构体
  // 内核用PFN来访问某个页对应的struct page
  map_size = (totalpages + 1)*sizeof(struct page);
  if (lmem_map == (struct page *)0) {
    lmem_map = (struct page *) alloc_bootmem_node(pgdat, map_size);
    lmem_map = (struct page *)(PAGE_OFFSET + 
      MAP_ALIGN((unsigned long)lmem_map - PAGE_OFFSET));
  }
  *gmap = pgdat->node_mem_map = lmem_map;
  pgdat->node_size = totalpages;
  pgdat->node_start_paddr = zone_start_paddr;
  pgdat->node_start_mapnr = (lmem_map - mem_map);
  pgdat->nr_zones = 0;
​
  /*
   * Initially all pages are reserved - free ones are freed
   * up by free_all_bootmem() once the early boot process is
   * done.
   */
  // 初始化每一页的状态
  for (p = lmem_map; p < lmem_map + totalpages; p++) {
    set_page_count(p, 0);
    SetPageReserved(p);
    init_waitqueue_head(&p->wait);
    memlist_init(&p->list);
  }
​
  offset = lmem_map - mem_map;
  
  // 这个循环是用来遍历三种ZONE的 DMA NORMAL HIGHMEM
  for (j = 0; j < MAX_NR_ZONES; j++) {
    zone_t *zone = pgdat->node_zones + j;
    unsigned long mask;
    unsigned long size, realsize;
​
    realsize = size = zones_size[j];
    if (zholes_size)
      realsize -= zholes_size[j];
​
    printk("zone(%lu): %lu pages.\n", j, size);
    zone->size = size;
    zone->name = zone_names[j];
    zone->lock = SPIN_LOCK_UNLOCKED;
    zone->zone_pgdat = pgdat;
    zone->free_pages = 0;
    zone->need_balance = 0;
    if (!size)
      continue;
​
    pgdat->nr_zones = j+1;
​
    mask = (realsize / zone_balance_ratio[j]);
    if (mask < zone_balance_min[j])
      mask = zone_balance_min[j];
    else if (mask > zone_balance_max[j])
      mask = zone_balance_max[j];
    zone->pages_min = mask;
    zone->pages_low = mask*2;
    zone->pages_high = mask*3;
​
    zone->zone_mem_map = mem_map + offset;
    zone->zone_start_mapnr = offset;
    zone->zone_start_paddr = zone_start_paddr;
​
    if ((zone_start_paddr >> PAGE_SHIFT) & (zone_required_alignment-1))
      printk("BUG: wrong zone alignment, it will crash\n");
​
    for (i = 0; i < size; i++) {
      struct page *page = mem_map + offset + i;
      page->zone = zone;
      if (j != ZONE_HIGHMEM)
        page->virtual = __va(zone_start_paddr);
      zone_start_paddr += PAGE_SIZE;
    }
​
    offset += size;
    
    // 这里是重点,buddy伙伴系统(真正实现物理内存管理算法的地方)
    for (i = 0; ; i++) {
      // free_area实际上是一个大小为MAX_ORDER的数组
      // 第i项表示大小为 2 ^ (i - 1) 个页的块的链表
      unsigned long bitmap_size;
​
      memlist_init(&zone->free_area[i].free_list);
      if (i == MAX_ORDER-1) {
        zone->free_area[i].map = NULL;
        break;
      }
​
      /*
       * Page buddy system uses "index >> (i+1)",
       * where "index" is at most "size-1".
       *
       * The extra "+3" is to round down to byte
       * size (8 bits per byte assumption). Thus
       * we get "(size-1) >> (i+4)" as the last byte
       * we can access.
       *
       * The "+1" is because we want to round the
       * byte allocation up rather than down. So
       * we should have had a "+7" before we shifted
       * down by three. Also, we have to add one as
       * we actually _use_ the last bit (it's [0,n]
       * inclusive, not [0,n[).
       *
       * So we actually had +7+1 before we shift
       * down by 3. But (n+8) >> 3 == (n >> 3) + 1
       * (modulo overflows, which we do not have).
       *
       * Finally, we LONG_ALIGN because all bitmap
       * operations are on longs.
       */
      // 这里是计算第i项的bitmap的 这里的size单位是PAGE,而free_area的第i项也恰好是2 ^ (i - 1)个PAGE
      // 所以是对应的
      // 上面解释了为什么不是 (size - 1) >> (i + 1) 而是在此基础上还 + 3
      // 因为一个byte是8个bits
      // 所以在此基础上还要右移3位
      bitmap_size = (size-1) >> (i+4);
      bitmap_size = LONG_ALIGN(bitmap_size+1);
      zone->free_area[i].map = 
        (unsigned long *) alloc_bootmem_node(pgdat, bitmap_size);
    }
  }
  build_zonelists(pgdat);
}

build_zonelists:

ini 复制代码
// 这个函数是用来构建ZONE分配优先级的
// 即解答:先分配DMA NORMAL还是HIGHMEM?
static inline void build_zonelists(pg_data_t *pgdat)
{
  int i, j, k;
​
  // 根据GFP_ZONEMASK参数构建相应的zonelist
  // 最终的目的是,内核调用某个函数分配物理空间,该函数包含了GFP_ZONEMASK
  // 我们到时候的分配顺序就是这里写的顺序
  for (i = 0; i <= GFP_ZONEMASK; i++) {
    zonelist_t *zonelist;
    zone_t *zone;
​
    zonelist = pgdat->node_zonelists + i;
    memset(zonelist, 0, sizeof(*zonelist));
​
    j = 0;
    k = ZONE_NORMAL;
    if (i & __GFP_HIGHMEM)
      k = ZONE_HIGHMEM;
    if (i & __GFP_DMA)
      k = ZONE_DMA;
​
    switch (k) {
      default:
        BUG();
      /*
       * fallthrough:
       */
      case ZONE_HIGHMEM:
        zone = pgdat->node_zones + ZONE_HIGHMEM;
        if (zone->size) {
#ifndef CONFIG_HIGHMEM
          BUG();
#endif
          zonelist->zones[j++] = zone;
        }
      case ZONE_NORMAL:
        zone = pgdat->node_zones + ZONE_NORMAL;
        if (zone->size)
          zonelist->zones[j++] = zone;
      case ZONE_DMA:
        zone = pgdat->node_zones + ZONE_DMA;
        if (zone->size)
          zonelist->zones[j++] = zone;
    }
    zonelist->zones[j++] = NULL;
  } 
}

到此我们分析完了Kernel2.4.16的虚拟内存映射和buddy伙伴系统的初始化过程。这些是在系统启动初始时候会被调用的函数,下一步就是分析内核运行过程中动态的使用或者改变这套系统的函数。

相关推荐
福理原乡大王2 小时前
Linux信号详解
linux·运维·服务器·c++·ubuntu·信号处理
锅锅是锅锅2 小时前
ubuntu调整硬盘大小-使用gparted
linux·ubuntu·硬盘·gparted
孙克旭_2 小时前
day031-Shell自动化编程-数组与案例
linux·运维·自动化
潘yi.3 小时前
ELK1日志分析系统
linux·elk
자신을 변화시키다3 小时前
磁盘配额管理
linux·运维·服务器·ubuntu·磁盘管理
两斤半4 小时前
Linux配置go环境
linux·golang
IU宝4 小时前
文件系统1(Linux中)
linux·运维·服务器
小Tomkk4 小时前
Linux 系统设置时区
linux·运维·服务器
yang_xiao_wu_4 小时前
Linux
linux·运维·服务器
星哥说事4 小时前
Rocky Linux 9 系统安装配置图解教程并做简单配置
linux·运维·服务器