From 108e04a8f0ddebeea196625cf5b14d0efcac027f Mon Sep 17 00:00:00 2001 From: Nathan Giddings Date: Tue, 7 Nov 2023 12:44:23 -0600 Subject: [PATCH] Page fault handler now allocates new page tables Rather than contantly checking if a page table exists before access, the fault handler will automatically allocate them as needed. --- include/mmgr.h | 25 +++++++++++++++++++++++-- src/mmgr.c | 44 ++++++++++++++++++++++++++------------------ src/x86/isr.c | 40 ++++++++++++++++++++++++++++++++++++++-- src/x86/paging.c | 22 +++++++++++++++++++--- 4 files changed, 106 insertions(+), 25 deletions(-) diff --git a/include/mmgr.h b/include/mmgr.h index 7e0b5d8..aaca95d 100644 --- a/include/mmgr.h +++ b/include/mmgr.h @@ -23,7 +23,7 @@ physaddr_t reserve_pages(size_t size); * @param size * @return int */ -int free_pages(physaddr_t location, size_t size); +size_t free_pages(physaddr_t location); /** * @brief Reserves a single page and returns its physical address. @@ -92,6 +92,27 @@ physaddr_t create_address_space(); */ physaddr_t current_address_space(); +/** + * @brief Maps a region in virtual memory with the specified flags. + * + * @param page + * @param frame + * @param size + * @param flags + */ +error_t map_region(void *page, physaddr_t frame, size_t size, int flags); + +/** + * @brief Unmaps a region in virtual memory. All pages which contain some part + * of the region specified will be unmapped. + * + * @param page + * @param size + * + * @returns The physical address of the first page that was unmapped + */ +physaddr_t unmap_region(void *page, size_t size); + /** * @brief Maps a single page with the specified flags. * @@ -100,7 +121,7 @@ physaddr_t current_address_space(); * @param flags * @return int */ -int map_page(void *page, physaddr_t frame, int flags); +error_t map_page(void *page, physaddr_t frame, int flags); /** * @brief Unmaps a single page, returning the physical address of the frame it diff --git a/src/mmgr.c b/src/mmgr.c index 35c814e..d974a3a 100644 --- a/src/mmgr.c +++ b/src/mmgr.c @@ -26,10 +26,9 @@ physaddr_t reserve_pages(size_t size) } } -int free_pages(physaddr_t location, size_t size) +size_t free_pages(physaddr_t location) { - buddy_free_size(&page_alloc, location, size); - return ENONE; + return buddy_free(&page_alloc, location); } physaddr_t reserve_page() @@ -151,26 +150,35 @@ physaddr_t current_address_space() return paging_current_address_space(); } -int map_page(void *page, physaddr_t frame, int flags) +error_t map_region(void *page, physaddr_t frame, size_t size, int flags) +{ + if(frame % page_size != 0) + { + return EINVALIDARG; + } + for(size_t p = 0; p < size; p += page_size) + { + set_pte(page + p, page_table_levels - 1, PAGE_PRESENT | flags, frame + p); + } + return ENONE; +} + +physaddr_t unmap_region(void *page, size_t size) +{ + physaddr_t frame = get_pte_address(page, page_table_levels - 1); + for(size_t p = 0; p < size; p += page_size) + { + set_pte(page + p, page_table_levels - 1, 0, 0); + } + return frame; +} + +error_t map_page(void *page, physaddr_t frame, int flags) { if (frame % page_size != 0) { return EINVALIDARG; } - for(int level = 0; level < page_table_levels - 1; level++) - { - int present = get_pte_type(page, level) & PAGE_PRESENT; - if(present == 0) - { - physaddr_t new_table = reserve_page(); - if(new_table == ENOMEM) - { - return ENOMEM; - } - set_pte(page, level, PAGE_PRESENT | PAGE_USERMODE | PAGE_RW, new_table); - wipe_page_table(page, level + 1); - } - } set_pte(page, page_table_levels - 1, PAGE_PRESENT | flags, frame); return ENONE; } diff --git a/src/x86/isr.c b/src/x86/isr.c index 9371383..5dc1def 100644 --- a/src/x86/isr.c +++ b/src/x86/isr.c @@ -2,6 +2,9 @@ #include "stdio.h" #include "x86/apic.h" #include "platform/interrupts.h" +#include "mmgr.h" +#include "string.h" +#include "kernel.h" #include @@ -14,6 +17,18 @@ struct interrupt_frame_t uint32_t ss; }; +typedef struct page_fault_code_t +{ + uint32_t present : 1; + uint32_t write : 1; + uint32_t usermode : 1; + uint32_t reserved : 1; + uint32_t inst_fetch : 1; + uint32_t pk : 1; + uint32_t shadow_stack : 1; + uint32_t padding : 25; +} page_fault_code_t; + void isr_generic(struct interrupt_frame_t *frame) { printf("Generic interrupt.\n"); @@ -46,6 +61,7 @@ void isr_gp_fault(struct interrupt_frame_t *frame, unsigned int error) void isr_page_fault(struct interrupt_frame_t *frame, unsigned int error) { + page_fault_code_t *code = &error; uint32_t addr; asm("mov %%cr2, %0" : "=r"(addr)); @@ -55,8 +71,28 @@ void isr_page_fault(struct interrupt_frame_t *frame, unsigned int error) "mov %%ax, %%fs; " "mov %%ax, %%gs; " :: : "ax"); - printf("Exception: Page fault, code %08x, linear address %08x\n", error, addr); - asm("hlt"); + if(code->usermode == 0 + && code->present == 0 + && addr >= 0xFFC00000 + && addr < 0xFFFFF000) + { + printf("Allocating new page table %08x within fault handler.\n", addr); + physaddr_t new_table = reserve_page(); + if(new_table == ENOMEM) + { + kernel_panic("Out of memory while allocating page table.\n"); + } + set_pte((void*)addr, 1, PAGE_PRESENT | PAGE_USERMODE | PAGE_RW, new_table); + asm volatile("mov %%cr3, %%eax;" + "mov %%eax, %%cr3" :: + : "eax", "memory"); + memset((void*)(addr & ~0xFFF), 0, page_size); + } + else + { + printf("Exception: Page fault, code %08x, linear address %08x\n", error, addr); + kernel_panic("Unhandled page fault.\n"); + } } void isr_double_fault(struct interrupt_frame_t *frame, unsigned int error) diff --git a/src/x86/paging.c b/src/x86/paging.c index 8fcc632..45f160b 100644 --- a/src/x86/paging.c +++ b/src/x86/paging.c @@ -32,7 +32,7 @@ struct page_table_entry_t *page_tables = (struct page_table_entry_t *)0xFFC00000 struct page_table_entry_t *page_directory = (struct page_table_entry_t *)0xFFFFF000; -struct page_table_entry_t *get_pte_pointer(void *page, int level) +struct page_table_entry_t *get_pte_pointer_chk(void *page, int level) { unsigned int directory_index = (unsigned int)page >> 22; struct page_table_entry_t *entry = NULL; @@ -48,6 +48,22 @@ struct page_table_entry_t *get_pte_pointer(void *page, int level) return entry; } +struct page_table_entry_t *get_pte_pointer(void *page, int level) +{ + if(level == 0) + { + return &page_directory[(unsigned int)page >> 22]; + } + else if(level == 1) + { + return &page_tables[(unsigned int)page >> page_bits]; + } + else + { + return NULL; + } +} + int start_paging(void *linear_addr, physaddr_t start, physaddr_t end, uint32_t *directory, uint32_t *table, uint32_t *identity_table) { unsigned int directory_index = (unsigned int) linear_addr >> 22; @@ -114,7 +130,7 @@ void paging_load_address_space(physaddr_t table) int get_pte_type(void *page, int level) { - struct page_table_entry_t *entry = get_pte_pointer(page, level); + struct page_table_entry_t *entry = get_pte_pointer_chk(page, level); if(entry != NULL) { int flags = (entry->present ? PAGE_PRESENT | PAGE_EXECUTABLE : 0) @@ -146,7 +162,7 @@ int set_pte_type(void *page, int level, int flags) physaddr_t get_pte_address(void *page, int level) { - struct page_table_entry_t *entry = get_pte_pointer(page, level); + struct page_table_entry_t *entry = get_pte_pointer_chk(page, level); if(entry != NULL) { return entry->physical_address << page_bits | ((size_t)page & 0xFFF);