summaryrefslogtreecommitdiff
path: root/src/kernel/page.c
diff options
context:
space:
mode:
authorBrett Weiland <brett_weiland@bpcspace.com>2021-08-24 14:09:29 -0500
committerBrett Weiland <brett_weiland@bpcspace.com>2021-08-24 14:09:29 -0500
commit9b22a6965579ea1867aea291d910c96f386b518b (patch)
treed06dbb9c4708f1cc713bcb115b32ff9bce4cf9b9 /src/kernel/page.c
parentbad4b0e9bdfee336bfc1c23761408279eaec1558 (diff)
major backup 8.24.21
Diffstat (limited to 'src/kernel/page.c')
-rw-r--r--src/kernel/page.c573
1 files changed, 364 insertions, 209 deletions
diff --git a/src/kernel/page.c b/src/kernel/page.c
index de4f557..9db8660 100644
--- a/src/kernel/page.c
+++ b/src/kernel/page.c
@@ -1,12 +1,106 @@
#include <printf.h>
-#include <paging.h>
#include <stdint.h>
#include <libc.h>
#include <limits.h>
+#include <panic.h>
+#include <math.h>
#include <kernel.h>
+#include <stdbool.h>
+#include <cpuid.h>
+
+//just using char because c is a lil bitch and won't let us use void
+extern char _kernel_shared_zone_begin;
+
+
+
+// PAGE MAPPING
+#define PAGEMAP_LOCATION 0x10000
+
+#define MAX_BUDDY_ORDER 8
+#define PALLOC_AUTO_BLEVEL MAX_BUDDY_ORDER
+
+typedef struct phys_map {
+ struct phys_map *next;
+ unsigned int max_buddy;
+ uint64_t bsize[MAX_BUDDY_ORDER];
+ uint64_t *buddy[MAX_BUDDY_ORDER];
+} pmap_t;
+
+static pmap_t *first_pmap;
+
+#define MEM_AVAILABLE 1
+#define MEM_RESERVED 2
+#define MEM_APCI_RECLAIMABLE 3
+#define MEM_APCI_NVS 4
+#define MEM_BAD 5
+
+// ZONES
+#define ZONE_MAP_PLOC 0x7000
+#define ZONE_MAP PHYS_TO_VIRT(ZONE_MAP_PLOC)
+
+
+
+//reorganized (moved) from header
+typedef struct __attribute__((packed)) {
+ unsigned int present : 1; // present, must be one when accessed.
+ unsigned int read_write : 1; // if set to one, read and write is set
+ unsigned int user : 1; // For seperating CPL 0-2 and 3+
+ unsigned int writethrough_cache : 1; // honestly maybe I should look into caching
+ unsigned int cachable : 1; // hardware chaching. 0 is enabled, whats the worst that could happen?
+ unsigned int accessed : 1; // we'll never use any of these!
+ unsigned int zg0 : 1; // needs to be (and will be) zero'd
+ unsigned int size : 1; // if set to 1, this entry points to physical memory
+ unsigned int zg1 : 1; // needs to be (and will be) zero'd
+ unsigned int software_marks : 3; // available for our own use, I doubt we'll use it in such a simple thing
+
+ uintptr_t base_ptr : 40;
+ unsigned int avail:11;
+ unsigned int no_exec:1;
+} page_table;
+
+
+struct memory_table {
+ void *base;
+ uint64_t length;
+ uint32_t type;
+ uint32_t ACPI;
+} __attribute__((packed));
+
+static bool NX_capable;
+static bool huge_page_capable;
+
+
+void get_mem_capabilities() {
+ uint32_t unused, edx;
+ __get_cpuid(0x80000001, &unused, &unused, &unused, &edx);
+ huge_page_capable = (edx >> 26) & 1;
+ NX_capable = (edx >> 20) & 1;
+}
+
+
+void unmap_lowmem() {
+ struct stack_frame *frame;
+
+ asm("addq rsp, %0\n"
+ "addq rbp, %0\n"
+ "mov %0, rbp"
+ :"=r"(frame)
+ :"r"(PA_OFFSET));
+
+ while(frame->next != 0) {
+ printf("%p\n", frame->function_base);
+ frame->next = PHYS_TO_VIRT((void *)frame->next);
+ frame = frame->next;
+ }
+
+ //[future]
+ //eventually, you should use the function that unmaps pages when you write it
+ page_table *entry = (page_table *)PAGEMAP_LOCATION;
+ entry[0].present = 0;
+}
void debug_print_memory() {
- struct memory_table *memtable = (struct memory_table *)&_meminfo_loc;
+ struct memory_table *memtable = (void *)ZONE_MAP;
printf(" __________________________________________________________________________\n");
printf("| type\tstart\t\t\tend\t\t\tsize\t\t |\n");
printf("|--------------------------------------------------------------------------|\n");
@@ -16,32 +110,39 @@ void debug_print_memory() {
printf("----------------------------------------------------------------------------\n");
}
+void ram_stresser() {
+ struct memory_table *memtable = (void *)ZONE_MAP - PA_OFFSET;
+ memtable[6].length = 0x10000;
+}
+
void debug_pmap() {
- struct phys_map* pmap;
+ pmap_t *pmap = first_pmap;
int pmap_i = 0, order;
- uint64_t buddy_size, blong_i, bbit_i, buddy_chunksize, omit_cnt;
- printf("Maximum buddy order: %u (up to %#x sized chunks)\n", MAX_BUDDY_ORDER, (0x1000 << MAX_BUDDY_ORDER - 1));
- for(pmap = (struct phys_map*)&_stage2_pagetable; pmap != 0; pmap = pmap->next) {
+ uint64_t blong_i, bbit_i, buddy_chunksize, omit_cnt;
+ printf("Maximum buddy order: %u (up to %#x sized chunks)\n", MAX_BUDDY_ORDER, ((0x1000 << MAX_BUDDY_ORDER) - 1));
+ for(; pmap != 0; pmap = pmap->next) {
printf("Table %u:\n"
- "\tPhysical Start:\t%#p\n"
- "\tTable location:\t%#p\n", pmap_i, pmap->zone_paddr, pmap);
+ "\tPhysical/pmap start:\t%#p\n"
+ "\tTable Size:\t%u\n", pmap_i, pmap,
+ (uint64_t)(pmap->buddy[MAX_BUDDY_ORDER - 1] + pmap->bsize[MAX_BUDDY_ORDER - 1]) - (uint64_t)pmap);
+
for(order = 0; order <= MAX_BUDDY_ORDER - 1; order++) {
buddy_chunksize = (0x1000 << order); //TODO just put it in the for loop
- buddy_size = (((order == MAX_BUDDY_ORDER - 1)
- ? (uint64_t *)pmap->next : pmap->buddy[order + 1]) - pmap->buddy[order]);
printf("\tbuddy[%u]:\n"
- "\t\tAddress:\t%#x\n"
+ "\t\tAddress:\t%#p\n"
"\t\tSize:\t\t%u\n"
- "\t\tBuddies:\t\t\n", order, pmap->buddy[order], buddy_size);
+ "\t\tBuddies:\t\t\n", order, pmap->buddy[order], pmap->bsize[order]);
omit_cnt = 0;
- for(blong_i = 0; blong_i < buddy_size; blong_i++) {
+ for(blong_i = 0; blong_i < pmap->bsize[order]; blong_i++) {
for(bbit_i = 0; bbit_i < 64; bbit_i++) {
- if(*(pmap->buddy[order] + blong_i) & ((uint64_t)1 << bbit_i)) {
- if((omit_cnt < 20) || (blong_i == buddy_size - 1)) {
- printf("address %#x\tbit %u: %p\t is free\n",
- pmap->buddy[order] + blong_i, bbit_i, (uint64_t)pmap->zone_paddr + (((blong_i * 64) + bbit_i) * buddy_chunksize));
+ if((pmap->buddy[order][blong_i]) & ((uint64_t)1 << bbit_i)) {
+ if((omit_cnt < 20) || (blong_i == pmap->bsize[order] - 1)) {
+ printf("address %#p\tbit %u: %p\t is free\n",
+ pmap->buddy[order] + blong_i,
+ bbit_i,
+ ((uint64_t)pmap - PA_OFFSET) + ((((blong_i * 64) + bbit_i) * buddy_chunksize)));
}
omit_cnt++;
if(omit_cnt == 20) {
@@ -55,237 +156,291 @@ void debug_pmap() {
}
}
-void init_pmap() {
- struct memory_table *zones = (struct memory_table *)&_meminfo_loc;
- struct phys_map *pmap = (struct phys_map*)&_stage2_pagetable;
+//TODO I know you don't want to, but you need to thoroughly check this.
+void pfree(void *addr, size_t size) {
+ int blevel = 0;
+ uint64_t *onbyte; //the byte out buddy resides on in the current level
+ uint64_t page_bitloc; // how many bits we are away from buddy[0]. Helps calculate bitshifts
+ int bbitlen; //length of free'd area in current level
+ int lshift; //lshift is how many bits we shift over, rightbit is what it sounds like dumbass
+ pmap_t *pmap = first_pmap;
+
+ /* note: there's no security check to see if the page is actually allocated,
+ * or if we are freeing the table itself.
+ * This should be okay, as only the kernel will be calling it.
+ * If it gets too messy we can always come back.
+ */
- unsigned int zone_i, pmap_i = 0;
- int budorder;
- //we keep this info out of the struct because we won't need it after setting up
- uint64_t zone_len[MAX_ZONE_CNT], buddy_bitlen[MAX_ZONE_CNT][MAX_BUDDY_ORDER], *buddy_end;
- uint64_t pmap_size, pmap_bbitsize, pmap_blongsize, buddy_size, buddy_bit, pmap_bit;
- uint64_t threshold_bitsize, threshold_longsize = 0;
+ if(((uintptr_t)addr & 4095) || (size & 4095)) {
+ PANIC(KERNEL_PANIC_INVALID_PFREE);
+ return; //TODO [minor] some more specificity, not a huge deal
+ }
+ size /= 0x1000;
+ for(; pmap != 0; pmap = pmap->next) {
+ page_bitloc = (addr - (void *)pmap) / 0x1000;
+ onbyte = pmap->buddy[0] + (page_bitloc / 64);
+ if((addr >= (void *)pmap) && onbyte < pmap->buddy[1]) break;
+ }
-
- void *paged_mem = (void *)&_stage2_pagetable + 0x200000;
- map_page(&_stage2_pagetable, &_stage2_pagetable, PAGE_SIZE_2M);
+ while(blevel < MAX_BUDDY_ORDER) {
+ lshift = (page_bitloc / (1 << blevel)) & 63;
+ onbyte = pmap->buddy[blevel] + ((page_bitloc / 64) / (1 << blevel));
+ bbitlen = size / (1 << blevel);
- for(zone_i = 0; zones[zone_i].length > 0; zone_i++) {
- if((zones[zone_i].type == MEM_AVAILABLE) && (zones[zone_i].ACPI & 1)) {
+ //TODO clean up this part ------------------------------------------------------------- (below)
+ if(bbitlen <= 1) {
+ if(lshift & 1) {
+ if((*onbyte >> (lshift - 1)) & 1) {
+ *onbyte &= ~(((uint64_t)1 << (lshift - 1)) | ((uint64_t)1 << lshift));
+ size += (1 << blevel);
+ page_bitloc -= (1 << blevel);
+ bbitlen = size / (1 << blevel);
+ }
+ }
+ else if((*onbyte >> (lshift + 1)) & 1) {
+ *onbyte &= ~(((uint64_t)1 << (lshift + 1)) | ((uint64_t)1 << lshift));
+ size += (1 << blevel);
+ bbitlen = size / (1 << blevel);
+ }
+ }
+ else if(((lshift + bbitlen) & 1) && ((*onbyte >> (lshift + bbitlen)) & 1)) {
+ *onbyte ^= ((uint64_t)1 << (lshift + bbitlen));
+ size += (1 << blevel);
+ bbitlen = size / (1 << blevel);
+ }
+ //TODO clean up this part ------------------------------------------------------------- (above)
- //hopefully this should never happen...
- //I should dig into the docs to check before removing this.
- //We also could forget about MAX_ZONE_CNT if we did.
-
- if(zone_i >= MAX_ZONE_CNT) {
- printf("Only %u zones can be used! Modify MAX_ZONE_CNT in paging.h to use all memory.\n", MAX_ZONE_CNT);
+ if((!((size - 1) & size)) && (bbitlen != 1)) {
+ blevel = 63 - __builtin_clzl(size);
+ }
+ else {
+ if(bbitlen <= 1) {
+ *onbyte |= ((uint64_t)1 << lshift);
break;
+ } else if(bbitlen & 1) { //check me
+ size -= (1 << blevel);
+ *onbyte |= ((uint64_t)1 << (bbitlen + lshift));
}
+ blevel++;
+ }
+ }
+}
- if((zones[zone_i].base <= (void*)&_stage2_pagetable) &&
- (zones[zone_i].base + zones[zone_i].length >= (void *)&_stage2_pagetable)) {
- pmap->zone_paddr = &_stage2_pagetable;
- zone_len[pmap_i] = zones[zone_i].length - (pmap->zone_paddr - zones[zone_i].base);
- }
- else {
- pmap->zone_paddr = zones[zone_i].base;
- zone_len[pmap_i] = zones[zone_i].length;
- }
+void *palloc(size_t size) {
+ bool self_alloc;
+ int min_blevel, blevel;
+ uint64_t bbit, unshifted_entry, threshold, bloc; //TODO move when you've confirmed casting stuff
+ uint64_t buddy_i, *ret, *bentry;
+ int itercount;
+ pmap_t *pmap = first_pmap;
- pmap->buddy[0] = (void *)pmap + sizeof(*pmap);
- for(budorder = 1; budorder < MAX_BUDDY_ORDER; budorder++) {
- buddy_bitlen[pmap_i][budorder - 1] = GET_BUDDY_BITLEN(zone_len[pmap_i], budorder - 1);
- pmap->buddy[budorder] = (uint64_t *)pmap->buddy[budorder - 1] +
- LSIZE_FROM_BITLEN(buddy_bitlen[pmap_i][budorder - 1]);
- }
+ if(size == 0) return 0;
+ if(size & 4095) {
+ size = DIV_ROUND_UP(size, 0x1000);
+ }
+ else {
+ size = size / 0x1000;
+ }
+
+ //checking if pmap has been initilized; if not we've been called to self allocate
+ //the first buddy should never be allocated; that's where our pmap lives
+ if(pmap->buddy[pmap->max_buddy][0] & 1) {
+ self_alloc = true;
+ min_blevel = pmap->max_buddy;
+ }
+ else {
+ //log(size, 2)
+ self_alloc = false;
+ min_blevel = 63 - __builtin_clzl(size);
+ if(size & (size - 1)) min_blevel++;
+ if(min_blevel > MAX_BUDDY_ORDER - 1) return 0;
+ }
- buddy_bitlen[pmap_i][MAX_BUDDY_ORDER - 1] = GET_BUDDY_BITLEN(zone_len[pmap_i], MAX_BUDDY_ORDER - 1);
- pmap->next = (void *)pmap->buddy[MAX_BUDDY_ORDER - 1] +
- (LSIZE_FROM_BITLEN(buddy_bitlen[pmap_i][MAX_BUDDY_ORDER - 1]) * 8);
+ for(blevel = min_blevel; blevel < MAX_BUDDY_ORDER; blevel++) {
+ for(pmap = first_pmap; pmap != 0; pmap = pmap->next) {
- pmap = pmap->next;
- pmap_i++;
+ for(buddy_i = 0; buddy_i < pmap->bsize[blevel]; buddy_i++) {
+ if(pmap->buddy[blevel][buddy_i] > (uint64_t)0) {
+ bentry = &pmap->buddy[blevel][buddy_i];
+ bbit = __builtin_ctzl(*bentry);
+ bloc = bbit;
- //allocates by an extra sizeof(struct phys_map),
- //but were about to discard anyway
- while((void *)pmap + sizeof(*pmap) >= paged_mem) {
- map_page(paged_mem, paged_mem, PAGE_SIZE_2M);
- paged_mem += 0x200000;
- }
+ *bentry ^= (uint64_t)1 << bbit;
+
+ ret = (((buddy_i * 64) + bbit) * (0x1000 << blevel)) + (void *)pmap;
+
+ threshold = 0b11;
+
+ itercount = 1;
+ for(blevel--; blevel >= 0; blevel--) {
+ bentry = pmap->buddy[blevel] + ((bentry - pmap->buddy[blevel + 1]) * 2);
+ itercount++;
+ if(bloc >= 32) bentry += 1;
+ bloc = (bloc * 2) & 63; // will be the amount we need to shift
+ bbit = ceil((float)size / (1 << blevel));
+
+ unshifted_entry = ((uint64_t)1 << bbit) & threshold;
+ if(unshifted_entry) {
+ threshold = ((uint64_t)1 << (bbit * 2)) - 1;
+ }
+ else {
+ threshold = (threshold << 2) | threshold;
+ }
+ *bentry |= (unshifted_entry << bloc);
+ }
+ if(!self_alloc) bzero(ret, size * 0x1000);
+ return ret;
+ }
+ }
}
}
- pmap_size = (void*)(pmap) - (void*)&_stage2_pagetable;
- if(pmap_size >= zone_len[0]) panic(); //TODO debugging
+ return 0;
+}
- pmap_i = 0;
- for(pmap = (struct phys_map*)&_stage2_pagetable; pmap->next != 0; pmap = pmap->next) {
- for(budorder = MAX_BUDDY_ORDER - 1; budorder >= 0; budorder--) {
- pmap_bbitsize = ceil((float)pmap_size / ((uint64_t)0x1000 << budorder));
- pmap_blongsize = pmap_bbitsize / 64;
- if(budorder == MAX_BUDDY_ORDER - 1) {
- buddy_size = (uint64_t *)pmap->next - pmap->buddy[budorder];
- buddy_end = (uint64_t *)pmap->next - 1;
+//returns size of pages needed
+size_t map_complete_physical() {
+ uint64_t total_mem;
+ unsigned int pdpe_cnt, pde_cnt, pde_max_i;
+ int zone_i, entry_i;
+ struct memory_table *zones = (void *)ZONE_MAP_PLOC;
+
- threshold_bitsize = ((pmap_blongsize * 64) + pmap_bbitsize) * 2;
- }
- else {
- buddy_size = pmap->buddy[budorder + 1] - pmap->buddy[budorder];
- buddy_end = pmap->buddy[budorder + 1] - 1;
+ page_table *pml4 = (page_table *)PAGEMAP_LOCATION;
+ page_table *pdpe = (page_table *)&_kernel_shared_zone_begin;
+ page_table *pde;
- threshold_longsize = threshold_bitsize / 64;
- }
- pmap_bit = pmap_bbitsize & 63;
- buddy_bit = buddy_bitlen[pmap_i][budorder] & 63;
+ for(zone_i = 0; zones[zone_i].length > 0; zone_i++);
+ total_mem = (uint64_t)zones[zone_i - 1].base + zones[zone_i - 1].length;
+
+ pdpe_cnt = (total_mem + (0x40000000 - 1)) / 0x40000000;
+
+ entry_i = (PA_OFFSET >> 39) & 0x1ff;
+ pml4[entry_i].base_ptr = (uintptr_t)&_kernel_shared_zone_begin >> 12;
+ pml4[entry_i].read_write = 1;
+ pml4[entry_i].user = 0;
+ pml4[entry_i].size = 0;
+ pml4[entry_i].no_exec = 1;
+ pml4[entry_i].present = 1;
- if((pmap_bbitsize >= BITLEN_FROM_LSIZE(buddy_size)) && (pmap == (void *)&_stage2_pagetable)) {
- bzero(pmap->buddy[budorder], buddy_size * 8);
+ if(huge_page_capable) {
+ for(int pdpe_i = 0; pdpe_i < pdpe_cnt; pdpe_i++) {
+ pdpe[pdpe_i].base_ptr = pdpe_i << 18;
+ pdpe[pdpe_i].read_write = 1;
+ pdpe[pdpe_i].user = 0;
+ pdpe[pdpe_i].size = 1;
+ pdpe[pdpe_i].no_exec = NX_capable;
+ pdpe[pdpe_i].present = 1;
+ }
+ return pdpe_cnt * 0x1000;
+ }
+ else {
+ pde_cnt = (total_mem + 0x100000) / 0x200000;
+ for(int pdpe_i = 0; pdpe_i < pdpe_cnt; pdpe_i++) {
+ pde = (page_table *)(&_kernel_shared_zone_begin + (pdpe_cnt * 0x1000) + (pdpe_i * 0x1000));
+
+ if((pdpe_i < pdpe_cnt - 1) || (!(pde_cnt & 511))) {
+ pde_max_i = 512;
}
else {
- if(budorder == MAX_BUDDY_ORDER - 1) {
- if(pmap == (void*)&_stage2_pagetable) {
- if(pmap_blongsize) bzero(pmap->buddy[budorder], (pmap_blongsize - 1) * 8);
- if(pmap_bit) {
- *(pmap->buddy[budorder] + pmap_blongsize) = ~(((uint64_t)1 << pmap_bit) - 1);
- }
- else {
- *(pmap->buddy[budorder] + pmap_blongsize) = UINT64_MAX;
- }
- if(pmap_blongsize + 1 == buddy_size) {
- *buddy_end &= ((uint64_t)1 << buddy_bit) - 1;
- }
- else {
- memset(pmap->buddy[budorder] + pmap_blongsize + 1, UINT8_MAX, (buddy_size - 1) * 8);
- if(buddy_bit) {
- *buddy_end = ((uint64_t)1 << buddy_bit) - 1;
- }
- else {
- *buddy_end = UINT64_MAX;
- }
- }
- threshold_bitsize = ((pmap_blongsize * 64) + pmap_bbitsize) * 2;
- }
- else {
- memset(pmap->buddy[budorder], UINT8_MAX, (buddy_size - 1) * 8);
- if(buddy_bit) {
- *buddy_end = ((uint64_t)1 << buddy_bit) - 1;
- }
- else {
- *buddy_end = UINT64_MAX;
- }
- }
- }
- else if(pmap == (void *)&_stage2_pagetable) {
- if(threshold_longsize) bzero(pmap->buddy[budorder], (threshold_longsize - 1) * 8);
+ pde_max_i = pde_cnt & 511;
+ }
+
+ pdpe[pdpe_i].base_ptr = (uintptr_t)pde >> 12;
+ pdpe[pdpe_i].read_write = 1;
+ pdpe[pdpe_i].user = 0;
+ pdpe[pdpe_i].size = 0;
+ pdpe[pdpe_i].no_exec = NX_capable;
+ pdpe[pdpe_i].present = 1;
- if(threshold_bitsize > pmap_bbitsize)
- *(pmap->buddy[budorder] + threshold_longsize) = ((uint64_t)1 << ((threshold_bitsize - 1) & 63));
-
- if(buddy_size - threshold_longsize)
- bzero(pmap->buddy[budorder] + threshold_longsize + 1, buddy_size - threshold_longsize);
- if(buddy_bit & 1) {
- *buddy_end = ((uint64_t)1 << (buddy_bit - 1));
- }
- threshold_bitsize = ((pmap_blongsize * 64) + pmap_bbitsize) * 2;
- }
- else {
- bzero(pmap->buddy[budorder], buddy_size);
- if(buddy_bit & 1) {
- *buddy_end = ((uint64_t)1 << ((buddy_bit) - 1));
- }
- }
+ for(int pde_i = 0; pde_i < pde_max_i; pde_i++) {
+ pde[pde_i].base_ptr = ((pdpe_i << 9) + pde_i) << 9;
+ pde[pde_i].read_write = 1;
+ pde[pde_i].user = 0;
+ pde[pde_i].size = 1;
+ pde[pde_i].no_exec = NX_capable;
+ pde[pde_i].present = 1;
}
}
- pmap_i++;
+ return (pdpe_cnt * 2) * 0x1000;
}
}
+pmap_t *init_pmap(size_t pagetable_size) {
+ pmap_t *pmap, *last_pmap;
+ struct memory_table *zones = (void *)ZONE_MAP;
+ int budorder, zone_i;
+ uint64_t pmap_size, pmap_bbitsize, zone_size;
+ bool first_pmap_i = true;
-/**
- * BIG TODO:
- * Paging turned out to be simpler then I thought. I've temporarily fixed the code, but needs to be rewritten/simplified.
- * Let's get rid of those nasty GOTOs if we can.
- * Also, once we get physical memory allocator up and running, impliment that in this function.
-**/
+
-bool map_page(void *virtual_addr, void *physical_addr, uint8_t size) {
- //printf("map page called\n");
- uintptr_t va_ptr = (uintptr_t)virtual_addr;
- uintptr_t pa_ptr = (uintptr_t)physical_addr;
- if((va_ptr % (1 << size)) || (pa_ptr % (1 << size))) {
- return 0;
- }
- page_table *table = (page_table *)PAGEMAP_LOCATION;
- int pte_i = (va_ptr >> 12) & 0x1ff;
- int pde_i = (va_ptr >> 21) & 0x1ff;
- int pdpe_i = (va_ptr >> 30) & 0x1ff;
- int pml4e_i = (va_ptr >> 39) & 0x1ff;
-
- if(table->pml4e[pml4e_i].present) {
- if(table->pml4e[pml4e_i].base_ptr != (uintptr_t)&table->pdpe[pdpe_i] >> 12) goto error;
- if(table->pdpe[pdpe_i].present) {
- if(size == PAGE_SIZE_1G) {
- if(table->pdpe[pdpe_i].base_ptr == ((uintptr_t)pa_ptr >> 30 & 0x1ff))
- return true;
- goto error;
+ for(zone_i = 0; zones[zone_i].length > 0; zone_i++) {
+ if((zones[zone_i].type == MEM_AVAILABLE) && (zones[zone_i].ACPI & 1) &&
+ zones[zone_i].length >= (0x2000)) {
+ printf("found allocatable map at %p\n", zones[zone_i].base);
+ last_pmap = pmap;
+ if(zones[zone_i].base == (void *)0x100000) {
+ zone_size = zones[zone_i].length - (((uint64_t)&_kernel_shared_zone_begin - 0x100000) + pagetable_size);
+ pmap = PHYS_TO_VIRT((void *)&_kernel_shared_zone_begin + pagetable_size);
+ }
+ else {
+ zone_size = zones[zone_i].length;
+ pmap = PHYS_TO_VIRT(zones[zone_i].base);
}
- if(table->pdpe[pdpe_i].base_ptr != (uintptr_t)&table->pde[pde_i] >> 12) goto error;
- if(table->pde[pde_i].present) {
- if(size == PAGE_SIZE_2M) {
- if(table->pde[pde_i].base_ptr == ((uintptr_t)pa_ptr >> 21 & 0x1ff))
- return true;
- goto error;
+ if(first_pmap_i) {
+ pmap->next = NULL;
+ first_pmap_i = false;
+ }
+ else {
+ pmap->next = last_pmap;
+ }
+
+ for(budorder = 0; budorder < MAX_BUDDY_ORDER; budorder++) {
+ pmap_bbitsize = zone_size / (0x1000 << budorder);
+ pmap->bsize[budorder] = DIV_ROUND_UP(pmap_bbitsize , 64);
+ if(budorder) {
+ pmap->buddy[budorder] = pmap->buddy[budorder - 1] + pmap->bsize[budorder - 1];
+ }
+ else {
+ pmap->buddy[0] = (void *)pmap + sizeof(*pmap);
+ }
+ if(budorder < MAX_BUDDY_ORDER - 1) {
+ bzero(pmap->buddy[budorder], pmap->bsize[budorder] * 8);
+ if(pmap_bbitsize & 1) {
+ pmap->buddy[budorder][pmap->bsize[budorder] - 1] =
+ ((uint64_t)1 << ((pmap_bbitsize - 1) & 63));
+ }
+ if(pmap_bbitsize == 1) {
+ pmap->max_buddy = budorder;
+ for(budorder++; budorder < MAX_BUDDY_ORDER; budorder++) {
+ pmap->buddy[budorder] = 0;
+ pmap->bsize[budorder] = 0;
+ }
+ break;
+ }
}
- if(table->pde[pde_i].base_ptr != (uintptr_t)&table->pte[pte_i] >> 12) goto error;
- if(table->pte[pte_i].present) {
- if(table->pte[pte_i].base_ptr != ((pa_ptr >> 12) & 0x1ff)) goto error;
- return true;
+ else {
+ pmap->max_buddy = MAX_BUDDY_ORDER - 1;
+ memset(pmap->buddy[budorder], UINT8_MAX, pmap->bsize[budorder] * 8);
+ if((pmap_bbitsize / 64) != (pmap->bsize[budorder])) {
+ pmap->buddy[budorder][pmap->bsize[budorder] - 1] =
+ (((uint64_t)1 << (pmap_bbitsize & 63)) - 1);
+ }
}
- else goto mod_page_pte;
}
- else goto mod_page_pde;
- }
- else goto mod_page_pdpe;
- }
- else {
- table->pml4e[pml4e_i].base_ptr = (uintptr_t)&table->pdpe[pdpe_i] >> 12;
- table->pdpe[pml4e_i].read_write = 1;
- table->pml4e[pml4e_i].present = 1;
-mod_page_pdpe:
- table->pdpe[pdpe_i].read_write = 1;
- //TODO you just found out things are a lot more simple then you thought!
- if(size == PAGE_SIZE_1G) {
- table->pdpe[pdpe_i].size = 1;
- table->pdpe[pdpe_i].base_ptr = pa_ptr >> 12;
- table->pdpe[pdpe_i].present = 1;
- return true;
- }
- table->pdpe[pdpe_i].base_ptr = (uintptr_t)&table->pde[pde_i] >> 12;
- table->pdpe[pdpe_i].present = 1;
-mod_page_pde:
- table->pde[pde_i].read_write = 1;
- if(size == PAGE_SIZE_2M) {
- table->pde[pde_i].size = 1;
- table->pde[pde_i].base_ptr = pa_ptr >> 12;
- table->pde[pde_i].present = 1;
- return true;
+
+ pmap_size = (uint64_t)(pmap->buddy[pmap->max_buddy] + pmap->bsize[pmap->max_buddy]) - (uint64_t)pmap;
+ first_pmap = pmap; //we spoof palloc into allocating from the specific required pmap.
+ palloc(pmap_size);
}
- table->pde[pde_i].base_ptr = (uintptr_t)&table->pte[pte_i] >> 12;
- table->pde[pde_i].present = 1;
-mod_page_pte:
- table->pte[pte_i].base_ptr = pa_ptr >> 12;
- table->pte[pte_i].read_write = 1;
- table->pte[pte_i].present = 1;
- return true;
}
-error:
- printf("Page allocation error!\n");
- return false;
+ return pmap;
}