// call the entry point from the ELF header // note: does not return ((void (*)(void))(ELFHDR->e_entry & 0xFFFFFF))();
1 - 准备进入内核代码
在boot/bootasm.S中,对GDT进行了第一次设置:
1 2 3 4
gdt: SEG_NULLASM # null seg SEG_ASM(STA_X|STA_R, 0x0, 0xffffffff) # code seg for bootloader and kernel SEG_ASM(STA_W, 0x0, 0xffffffff) # data seg for bootloader and kernel
// map all physical memory to linear memory with base linear addr KERNBASE //linear_addr KERNBASE~KERNBASE+KMEMSIZE = phy_addr 0~KMEMSIZE //But shouldn't use this map until enable_paging() & gdt_init() finished. boot_map_segment(boot_pgdir, KERNBASE, KMEMSIZE, 0, PTE_W);
staticvoid boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, uintptr_t pa, uint32_t perm) { assert(PGOFF(la) == PGOFF(pa)); size_t n = ROUNDUP(size + PGOFF(la), PGSIZE) / PGSIZE; la = ROUNDDOWN(la, PGSIZE); pa = ROUNDDOWN(pa, PGSIZE); for (; n > 0; n --, la += PGSIZE, pa += PGSIZE) { pte_t *ptep = get_pte(pgdir, la, 1); assert(ptep != NULL); *ptep = pa | PTE_P | perm; } }
/* free_area_t - maintains a doubly linked list to record free (unused) pages */ typedefstruct { list_entry_t free_list; // the list header unsignedint nr_free; // # of free pages in this free list } free_area_t;
structPage { int ref; // page frame's reference counter uint32_t flags; // array of flags that describe the status of the page frame unsignedint property; // the num of free block, used in first fit pm manager list_entry_t page_link; // free list link };
/* Flags describing the status of a page frame */ #define PG_reserved 0 // if this bit=1: the Page is reserved for kernel, cannot be used in alloc/free_pages; otherwise, this bit=0 #define PG_property 1 // if this bit=1: the Page is the head page of a free memory block(contains some continuous_addrress pages), and can be used in alloc_pages; if this bit=0: if the Page is the the head page of a free memory block, then this Page and the memory block is alloced. Or this Page isn't the head page.
for (i = 0; i < memmap->nr_map; i ++) { uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size; // Memory block isn't reserved if (memmap->map[i].type == E820_ARM) { if (begin < freemem) { begin = freemem; } if (end > KMEMSIZE) { end = KMEMSIZE; } if (begin < end) { begin = ROUNDUP(begin, PGSIZE); end = ROUNDDOWN(end, PGSIZE); if (begin < end) { init_memmap(pa2page(begin), (end - begin) / PGSIZE); } } } }
staticvoid default_init_memmap(struct Page *base, size_t n) { // n must be a positive number assert(n > 0);
structPage *ptr = base; for (; ptr < base + n; ptr++) { assert(PageReserved(ptr)); // clear flags ClearPageProperty(ptr); ClearPageReserved(ptr); // no reference set_page_ref(ptr, 0); ptr->property = 0; } // set bit and property of the first page SetPageProperty(base); base->property = n;
staticstruct Page * default_alloc_pages(size_t n) { // check if the size n is legal assert(n > 0); if (n > nr_free) { returnNULL; } structPage *p =NULL; list_entry_t *le = &free_list; while ((le = list_next(le)) != &free_list) { p = le2page(le, page_link); // find the first block that can allocate n pages if (p->property >= n) { goto can_alloc; } } returnNULL; can_alloc: if (p != NULL) { list_entry_t *tmp = list_next(&(p->page_link)); // adjust the free block list list_del(&(p->page_link)); if (p->property > n) { // set head page of the new free block SetPageProperty(p + n); (p + n)->property = p->property - n; list_add_before(tmp, &((p+n)->page_link)); } // set bits of the allocated pages ClearPageProperty(p); p->property -= n; nr_free -= n; } return p; }
staticvoid default_free_pages(struct Page *base, size_t n) { assert(n > 0); structPage *ptr = base, *next = NULL; for (; ptr < base + n; ptr++) { // reset all pages that needs to be free assert(!PageReserved(ptr) && !PageProperty(ptr)); ClearPageProperty(ptr); ClearPageReserved(ptr); set_page_ref(ptr, 0); } // reset head page of the block base->property = n; SetPageProperty(base); // check if this block can be merged with another block list_entry_t *le = &free_list, *tmp = NULL; while ((le = list_next(le)) != &free_list) { ptr = le2page(le, page_link); if (ptr + ptr->property == base) { // merge after this block ptr->property += base->property; ClearPageProperty(base); // check if next block can also be merged tmp = list_next(&(ptr->page_link)); next = le2page(tmp, page_link); if (tmp != &free_list && base + base->property == next) { ptr->property += next->property; ClearPageProperty(next); list_del(tmp); } goto done; } elseif (base + base->property == ptr) { // merge before this block base->property += ptr->property; ClearPageProperty(ptr); // need to set up free_list tmp = list_next(&(ptr->page_link)); list_del(&(ptr->page_link)); list_add_before(tmp, &(base->page_link)); goto done; } elseif (ptr > base) { tmp = list_prev(&(ptr->page_link)); // addr boundary check if (tmp == &free_list || tmp < ptr) { // independent block donot need to merge, just simply insert list_add_before(&(ptr->page_link), &(base->page_link)); goto done; } } } // this block cannot be merged with any free blocks, and it has the biggest addr // then insert to the end of the list list_add_before(&free_list, &(base->page_link)); done: nr_free += n; }