Kernel page allocator and heap use new algorithms

This commit is contained in:
2023-08-31 14:37:38 -05:00
parent e49bd0ca9c
commit 75b7e08e96
4 changed files with 68 additions and 62 deletions

View File

@@ -8,13 +8,11 @@
* This function contructs the heap's internal tables, allocating pages as needed * This function contructs the heap's internal tables, allocating pages as needed
* to do so. * to do so.
* *
* @param page_stack Pointer to the page stack descriptor
* @param base Base location of the heap to contruct * @param base Base location of the heap to contruct
* @param heap_size Total size in bytes of the heap * @param heap_size Total size in bytes of the heap
* @param block_size Size in bytes of a single unit of allocation
* @return a status code * @return a status code
*/ */
int kminit(void *base, void* start, size_t heap_size, size_t block_size); int kminit(void *base, size_t heap_size);
/** /**
* @brief Allocates a block of memory containing at least `size` bytes. * @brief Allocates a block of memory containing at least `size` bytes.

View File

@@ -1,34 +1,24 @@
#include <stdbool.h> #include <stdbool.h>
#include <libmalloc/bitmap_alloc.h> #include <libmalloc/list_alloc.h>
#include "heap.h" #include "heap.h"
#include "mmgr.h" #include "mmgr.h"
#include "math.h" #include "math.h"
#include "stdio.h" #include "stdio.h"
#include "types/status.h" #include "types/status.h"
bitmap_heap_descriptor_t system_heap; list_alloc_descriptor_t system_heap;
static int map_block(void *location, unsigned long size)
{
for(int n = 0; n < size; n += page_size)
{
if(!(page_type(location + n) & PAGE_PRESENT))
{
int status = map_page(location + n, reserve_page(), PAGE_RW);
if(status)
{
return status;
}
}
}
return ENONE;
}
static int mmap_callback(void *location, unsigned long size) static int mmap_callback(void *location, unsigned long size)
{ {
size += (unsigned long)location % page_size;
location -= (unsigned long)location % page_size;
int status = ENONE; int status = ENONE;
for(unsigned long i = 0; i < size; i += page_size) for(unsigned long i = 0; i < size; i += page_size)
{ {
if((page_type(location + i) & PAGE_PRESENT))
{
continue;
}
physaddr_t frame = reserve_page(); physaddr_t frame = reserve_page();
if(frame == ENOMEM) if(frame == ENOMEM)
{ {
@@ -42,44 +32,44 @@ static int mmap_callback(void *location, unsigned long size)
return status; return status;
} }
int kminit(void *base, void* start, size_t heap_size, size_t block_size) int kminit(void *base, size_t heap_size)
{ {
static unsigned long heap_cache[16];
system_heap.bitmap = NULL;
system_heap.cache = heap_cache;
system_heap.cache_capacity = 16;
system_heap.block_bits = 4;
system_heap.block_size = block_size;
system_heap.offset = (unsigned long)base;
memory_region_t map_array[8]; memory_region_t map_array[8];
memory_map_t map = { memory_map_t map = {
.array = map_array, .array = map_array,
.capacity = 8, .capacity = 8,
.size = 0 .size = 0
}; };
memmap_insert_region(&map, 0, heap_size, M_AVAILABLE); memmap_insert_region(&map, base, heap_size, M_AVAILABLE);
memmap_insert_region(&map, 0, start - base, M_UNAVAILABLE); for(void *p = base; p < (base + heap_size); p += page_size)
return initialize_heap(&system_heap, &map, mmap_callback); {
if((page_type(p) & PAGE_PRESENT))
{
continue;
}
physaddr_t frame = reserve_page();
if(frame == ENOMEM || map_page(p, frame, PAGE_RW))
{
return ENOMEM;
}
}
return list_alloc_init(&system_heap, &map);
} }
void *kmalloc(size_t size) void *kmalloc(size_t size)
{ {
unsigned long loc = reserve_region(&system_heap, size); void *p = list_alloc_reserve(&system_heap, size);
if(loc == NOMEM) if(p == NOMEM)
{
return NULL;
}
else if(map_block((void*)loc, size))
{ {
return NULL; return NULL;
} }
else else
{ {
return (void*)loc; return p;
} }
} }
void kfree(void *ptr) void kfree(void *ptr)
{ {
free_region(&system_heap, (unsigned long)ptr, 0); list_alloc_free(&system_heap, (unsigned long)ptr);
} }

View File

@@ -41,7 +41,9 @@ void kernel_initialize(struct boot_info_t *boot_info)
panic("Failed to initialize page allocator."); panic("Failed to initialize page allocator.");
} }
if(kminit(&_kernel_start, page_map_end(), 0xFFC00000 - (size_t)&_kernel_start, 64)) printf("End of page map: %08x\n", page_map_end);
if(kminit(page_map_end(), 0xFFC00000 - (size_t)page_map_end()))
{ {
panic("Failed to initialize heap."); panic("Failed to initialize heap.");
} }

View File

@@ -4,17 +4,17 @@
#include "platform/paging.h" #include "platform/paging.h"
#include "types/status.h" #include "types/status.h"
#include "stdio.h" #include "stdio.h"
#include <libmalloc/bitmap_alloc.h> #include <libmalloc/buddy_alloc.h>
#include <stdint.h> #include <stdint.h>
#include <stdbool.h> #include <stdbool.h>
#define MAX_CACHE_SIZE 32 #define AVAIL_LIST_SIZE 20
bitmap_heap_descriptor_t page_map; buddy_descriptor_t page_alloc;
physaddr_t reserve_pages(size_t size) physaddr_t reserve_pages(size_t size)
{ {
unsigned long location = reserve_region(&page_map, size); unsigned long location = buddy_reserve(&page_alloc, size);
if(location != NOMEM) if(location != NOMEM)
{ {
return location; return location;
@@ -27,61 +27,60 @@ physaddr_t reserve_pages(size_t size)
int free_pages(physaddr_t location, size_t size) int free_pages(physaddr_t location, size_t size)
{ {
free_region(&page_map, location, size); buddy_free_size(&page_alloc, location, size);
return ENONE; return ENONE;
} }
physaddr_t reserve_page() physaddr_t reserve_page()
{ {
unsigned long loc = reserve_region(&page_map, page_size); unsigned long loc = buddy_reserve(&page_alloc, page_size);
printf("Reserved %08x\n", loc);
if(loc == NOMEM) if(loc == NOMEM)
{ {
return ENOMEM; return ENOMEM;
} }
else else
{ {
printf("Reserved %08x\n", loc);
return loc; return loc;
} }
} }
int free_page(physaddr_t location) int free_page(physaddr_t location)
{ {
free_region(&page_map, location, page_size); buddy_free_size(&page_alloc, location, page_size);
return ENONE; return ENONE;
} }
size_t free_page_count() size_t free_page_count()
{ {
return page_map.free_block_count; return page_alloc.free_block_count;
} }
void *page_map_base() void *page_map_base()
{ {
return (void*)page_map.bitmap; return (void*)page_alloc.block_map;
} }
void *page_map_end() void *page_map_end()
{ {
return (void*)page_map.bitmap + page_map.bitmap_size; return (void*)page_alloc.block_map + page_alloc.block_map_size;
} }
error_t initialize_page_map(memory_map_t *map, void *base, size_t memory_size, unsigned long block_size) error_t initialize_page_map(memory_map_t *map, void *base, size_t memory_size, unsigned long block_size)
{ {
static unsigned long page_map_cache[MAX_CACHE_SIZE]; static unsigned long avail_list[AVAIL_LIST_SIZE];
// Round memory_size up to nearest power of 2 // Round memory_size up to nearest power of 2
memory_size = 1 << llog2(memory_size); memory_size = 1 << llog2(memory_size);
page_map.block_size = block_size; page_alloc.avail = avail_list;
page_map.block_bits = 1; page_alloc.block_map = (buddy_block_t*) base;
page_map.offset = 0; page_alloc.block_size = block_size;
page_map.cache = page_map_cache; page_alloc.mmap = NULL;
page_map.cache_capacity = MAX_CACHE_SIZE; page_alloc.offset = 0;
page_map.bitmap = (unsigned long*) base;
/* Allocate pages for bitmap */ /* Allocate pages for bitmap */
int pages_mapped = 0; int pages_mapped = 0;
int pages_needed = (bitmap_size(map, block_size, 1) + page_size - 1) / page_size; int pages_needed = (buddy_map_size(map, block_size) + page_size - 1) / page_size;
for(int i = 0; i < map->size && (pages_mapped < pages_needed); i++) for(int i = 0; i < map->size && (pages_mapped < pages_needed); i++)
{ {
if(map->array[i].type != M_AVAILABLE) if(map->array[i].type != M_AVAILABLE)
@@ -92,7 +91,7 @@ error_t initialize_page_map(memory_map_t *map, void *base, size_t memory_size, u
physaddr_t region_end = map->array[i].location + map->array[i].size; physaddr_t region_end = map->array[i].location + map->array[i].size;
while(location + page_size <= region_end && (pages_mapped < pages_needed)) while(location + page_size <= region_end && (pages_mapped < pages_needed))
{ {
void *page = (void*)page_map.bitmap + pages_mapped * page_size; void *page = (void*)page_alloc.block_map + pages_mapped * page_size;
for(int level = 0; level < page_table_levels; level++) for(int level = 0; level < page_table_levels; level++)
{ {
if(!(get_pte_type(page, level) & PAGE_PRESENT)) if(!(get_pte_type(page, level) & PAGE_PRESENT))
@@ -118,12 +117,29 @@ error_t initialize_page_map(memory_map_t *map, void *base, size_t memory_size, u
} }
} }
if(initialize_heap(&page_map, map, NULL)) printf("Initializing page allocator...\n");
if(buddy_alloc_init(&page_alloc, map))
{ {
return ENOMEM; return ENOMEM;
} }
else else
{ {
printf("page_alloc = {\n\t" \
"avail = %08x\n\t" \
"block_map = %08x\n\t" \
"block_map_size = %08x\n\t" \
"max_kval = %i\n\t" \
"block_size = %i\n\t" \
"offset = %08x\n\t" \
"free_block_count = %08x\n}",
page_alloc.avail,
page_alloc.block_map,
page_alloc.block_map_size,
page_alloc.max_kval,
page_alloc.block_size,
page_alloc.offset,
page_alloc.free_block_count);
return ENONE; return ENONE;
} }
} }