summaryrefslogtreecommitdiff
path: root/src/backup
diff options
context:
space:
mode:
authorBrett Weiland <brett_weiland@bpcspace.com>2021-04-18 16:00:17 -0500
committerBrett Weiland <brett_weiland@bpcspace.com>2021-04-18 16:00:17 -0500
commit774e3796b252383aafb8b3f30d51a19400c74516 (patch)
tree20ad93c125d4f6bad755e6a898ddb4259818b4fa /src/backup
parentf0602964daa20ad9cd05f097b943c8edbf2df2e2 (diff)
modified: src/bootloader/bios_functions/bios_disk.asm
modified: src/bootloader/bootloader.asm new file: src/include/kernel.h modified: src/include/libc.h modified: src/include/paging.h new file: src/include/panic.h modified: src/kernel/kernel.c modified: src/kernel/libc.c modified: src/kernel/page.c new file: src/kernel/panic.c modified: src/link.ld modified: src/makefile modified: tools/page/page.py
Diffstat (limited to 'src/backup')
-rw-r--r--src/backup437
1 files changed, 437 insertions, 0 deletions
diff --git a/src/backup b/src/backup
new file mode 100644
index 0000000..9191304
--- /dev/null
+++ b/src/backup
@@ -0,0 +1,437 @@
+#include <printf.h>
+#include <paging.h>
+#include <stdint.h>
+#include <libc.h>
+#include <limits.h>
+#include <kernel.h>
+
+void debug_print_memory() {
+ struct memory_table *memtable = (struct memory_table *)&_meminfo_loc;
+ printf(" __________________________________________________________________________\n");
+ printf("| type\tstart\t\t\tend\t\t\tsize\t\t |\n");
+ printf("|--------------------------------------------------------------------------|\n");
+ for(unsigned int i = 0; memtable[i].length > 0; i++) {
+ printf("| %u %u\t0x%p\t0x%p\t0x%p |\n", memtable[i].type, memtable[i].ACPI, memtable[i].base, (memtable[i].base + memtable[i].length), memtable[i].length);
+ }
+ printf("----------------------------------------------------------------------------\n");
+}
+
+void debug_pmap() {
+ struct phys_map* pmap;
+ int pmap_i = 0, order;
+ uint64_t buddy_size, blong_i, bbit_i, buddy_chunksize, omit_cnt;
+ printf("Maximum buddy order: %u (up to %#x sized chunks)\n", MAX_BUDDY_ORDER, (0x1000 << MAX_BUDDY_ORDER - 1));
+ for(pmap = (struct phys_map*)&_stage2_pagetable; pmap != 0; pmap = pmap->next) {
+ printf("Table %u:\n"
+ "\tPhysical Start:\t%#p\n"
+ "\tTable location:\t%#p\n", pmap_i, pmap->zone_paddr, pmap);
+ for(order = 0; order <= MAX_BUDDY_ORDER - 1; order++) {
+ buddy_chunksize = (0x1000 << order); //TODO just put it in the for loop
+ buddy_size = (((order == MAX_BUDDY_ORDER - 1)
+ ? (uint64_t *)pmap->next : pmap->buddy[order + 1]) - pmap->buddy[order]);
+ printf("\tbuddy[%u]:\n"
+ "\t\tAddress:\t%#x\n"
+ "\t\tSize:\t\t%u\n"
+ "\t\tBuddies:\t\t\n", order, pmap->buddy[order], buddy_size);
+
+ omit_cnt = 0;
+
+ for(blong_i = 0; blong_i < buddy_size; blong_i++) {
+ for(bbit_i = 0; bbit_i < 64; bbit_i++) {
+ if(*(pmap->buddy[order] + blong_i) & ((uint64_t)1 << bbit_i)) {
+ if((omit_cnt < 20) || (blong_i == buddy_size - 1)) {
+ printf("address %#x\tbit %u: %x\t is free\n",
+ pmap->buddy[order] + blong_i, bbit_i, pmap->zone_paddr + ((blong_i * 64) + bbit_i) * buddy_chunksize);
+ }
+ omit_cnt++;
+ if(omit_cnt == 20) {
+ printf("\t\t\t[more entries ommited]\n");
+ }
+ }
+ }
+ }
+print_next_buddy: ;
+ }
+ pmap_i++;
+ }
+}
+
+/*
+ * part 1:
+ * init tables (complete)
+ *
+ * part 2: setting the actual entires
+ * if entry contains table, set 0, set threshold
+ * else:
+ * if entry:
+ */
+
+// init_memory revision
+// rules:
+ /**
+void init_pmap() {
+ struct memory_table *memtable = (struct memory_table *)&_meminfo_loc;
+ struct phys_map *pmap = (struct phys_map*)&_stage2_pagetable;
+ struct phys_map *last_pmap;
+ unsigned int i, x;
+ uint64_t budentry_len, pmap_chunksize, total_pmap_len = 0;
+ void *y;
+ void *paged_mem = &_stage2_pagetable + 0x200000;
+ map_page(&_stage2_pagetable, &_stage2_pagetable, PAGE_SIZE_2M);
+ for(i = 0; memtable[i].length > 0; i++) {
+ if((memtable[i].type == MEM_AVAILABLE) && (memtable[i].ACPI & 1)) {
+ total_pmap_len += 88; //it'd be nice to find a cleaner way
+
+ //make sure we don't overwrite what we have so far of the kernel
+ if((memtable[i].base <= (void*)&_stage2_pagetable) && (memtable[i].base + memtable[i].length >= (void *)&_stage2_pagetable)) {
+ pmap->chunk_start = &_stage2_pagetable;
+ pmap->chunk_size = memtable[i].length - (pmap->chunk_start - memtable[i].base);
+ }
+ else {
+ pmap->chunk_start = memtable[i].base;
+ pmap->chunk_size = memtable[i].length;
+ }
+
+ for(x = 0; x < 8; x++) {
+ pmap->bsize[x] = ceil((pmap->chunk_size / (0x1000 * (1 << x))) / (double)64);
+ total_pmap_len += pmap->bsize[x] * 8;
+ }
+
+ pmap->next = (void*)&_stage2_pagetable + total_pmap_len;
+
+ while((void*)pmap->next + sizeof(struct phys_map) >= paged_mem) {
+ //do check here if nessesary
+ map_page(paged_mem, paged_mem, PAGE_SIZE_2M);
+ paged_mem += 0x200000;
+ }
+
+ last_pmap = pmap;
+ pmap = pmap->next;
+ }
+ }
+ last_pmap->next = 0; //I wonder if there's a better way to do this
+ pmap = (struct phys_map*)&_stage2_pagetable;
+
+}
+**/
+
+void init_pmap() {
+ struct memory_table *zones = (struct memory_table *)&_meminfo_loc;
+ struct phys_map *pmap = (struct phys_map*)&_stage2_pagetable;
+ struct phys_map *last_pmap = (struct phys_map*)&_stage2_pagetable;
+
+ unsigned int zone_i, pmap_i = 0;
+ int budorder;
+
+ //we keep this info out of the struct because we won't need it after setting up
+ uint64_t zone_len[MAX_ZONE_CNT], buddy_bitlen[MAX_ZONE_CNT][MAX_BUDDY_ORDER];
+ uint64_t *buddy_end;
+
+ uint64_t pmap_size, pmap_bbitsize, pmap_blongsize, buddy_size, pmap_bit;
+ uint64_t threshold_longsize = 0, threshold_bit, threshold_end, threshold_bitsize;
+ uint64_t deleteme_last_p_bits;
+
+
+ void *paged_mem = (void *)&_stage2_pagetable + 0x200000;
+ map_page(&_stage2_pagetable, &_stage2_pagetable, PAGE_SIZE_2M);
+
+ for(zone_i = 0; zones[zone_i].length > 0; zone_i++) {
+
+ if((zones[zone_i].type == MEM_AVAILABLE) && (zones[zone_i].ACPI & 1)) {
+
+ //hopefully this should never happen... but x86 is routy.
+ //I should dig into the docs to check before removing this.
+ //We also could forget about MAX_ZONE_CNT if we did.
+
+ if(zone_i >= MAX_ZONE_CNT) {
+ printf("Only %u zones can be used! Modify MAX_ZONE_CNT in paging.h to use all memory.\n", MAX_ZONE_CNT);
+ break;
+ }
+
+
+ if((zones[zone_i].base <= (void*)&_stage2_pagetable) &&
+ (zones[zone_i].base + zones[zone_i].length >= (void *)&_stage2_pagetable)) {
+ pmap->zone_paddr = &_stage2_pagetable;
+ zone_len[pmap_i] = zones[zone_i].length - (pmap->zone_paddr - zones[zone_i].base);
+ }
+ else {
+ pmap->zone_paddr = zones[zone_i].base;
+ zone_len[pmap_i] = zones[zone_i].length;
+ }
+
+ pmap->buddy[0] = (void *)pmap + sizeof(*pmap);
+
+ for(budorder = 1; budorder < MAX_BUDDY_ORDER; budorder++) {
+ buddy_bitlen[pmap_i][budorder - 1] = GET_BUDDY_BITLEN(zone_len[pmap_i], budorder - 1);
+ pmap->buddy[budorder] = (uint64_t *)pmap->buddy[budorder - 1] +
+ LSIZE_FROM_BITLEN(buddy_bitlen[pmap_i][budorder - 1]);
+ }
+
+ buddy_bitlen[pmap_i][MAX_BUDDY_ORDER - 1] = GET_BUDDY_BITLEN(zone_len[pmap_i], MAX_BUDDY_ORDER - 1);
+ pmap->next = (void *)pmap->buddy[MAX_BUDDY_ORDER - 1] +
+ (LSIZE_FROM_BITLEN(buddy_bitlen[pmap_i][MAX_BUDDY_ORDER - 1]) * 8);
+
+ pmap = pmap->next;
+ pmap_i++;
+ last_pmap = pmap;
+
+ //allocates by an extra sizeof(struct phys_map),
+ //but were about to discard anyway
+ while((void *)pmap + sizeof(*pmap) >= paged_mem) {
+ map_page(paged_mem, paged_mem, PAGE_SIZE_2M);
+ paged_mem += 0x200000;
+ }
+
+ }
+ }
+ pmap_size = (void*)(pmap) - (void*)&_stage2_pagetable;
+ if(pmap_size >= zone_len[0]) panic(); //TODO debugging
+
+ pmap = (struct phys_map*)&_stage2_pagetable;
+
+ // Honestly, this section makes me feel like Yandere Dev. It's ugly.
+ // I've been rewriting this function forever, so I'm deciding to slam it out start to finish.
+ // I know there's a lot of repeated if statements. I know it hurts.
+ // But fear not.
+ // I'll come back and rewrite this part when I've gotten a break from memory management.
+ for(pmap = (struct phys_map*)&_stage2_pagetable; pmap != 0; pmap++) {
+ for(budorder = MAX_BUDDY_ORDER - 1; budorder >= 0; budorder--) {
+ pmap_bbitsize = ceil((float)pmap_size / ((uint64_t)0x1000 << budorder));
+ pmap_blongsize = pmap_bbitsize / 64;
+
+ if(budorder == MAX_BUDDY_ORDER - 1) {
+ buddy_end = (uint64_t *)pmap->next - 1;
+ threshold_bitsize = ((pmap_blongsize * 64) + pmap_bbitsize) * 2;
+ threshold_bitsize = UINT64_MAX;
+ }
+ else {
+ buddy_end = pmap->buddy[budorder + 1] - 1;
+ threshold_longsize = threshold_bitsize / 64;
+ threshold_end = threshold_longsize + 1;
+ }
+ pmap_bit = pmap_bbitsize & 63;
+ buddy_size = buddy_end - pmap->buddy[budorder];
+
+
+ if(pmap_bbitsize >= BITLEN_FROM_LSIZE(buddy_size)) {
+ //is this nessesary?
+ bzero(pmap->buddy[budorder], buddy_size * 8);
+ }
+ else {
+ if(budorder == MAX_BUDDY_ORDER - 1) {
+ if(pmap_blongsize) bzero(pmap->buddy[budorder], (pmap_blongsize - 1) * 8);
+ if(pmap_bit) {
+ *(pmap->buddy[budorder] + pmap_blongsize) = ~(((uint64_t)1 << pmap_bit) - 1);
+ }
+ else {
+ *(pmap->buddy[budorder] + pmap_blongsize) = UINT64_MAX;
+ }
+ if(pmap_blongsize + 1 == buddy_size) {
+ //TODO why did I have this conditional? Do I need it later? Check on desktop before removing
+ if(buddy_bitlen[0][budorder]) {
+ *(pmap->buddy[budorder] + pmap_blongsize) &=
+ ((uint64_t)1 << (buddy_bitlen[0][budorder] & 63)) - 1;
+ }
+ }
+ else {
+ memset(pmap->buddy[budorder] + pmap_blongsize + 1, UINT8_MAX,
+ (void *)buddy_end - (void *)pmap->buddy[budorder] - 8);
+ *buddy_end = ((uint64_t)1 << (buddy_bitlen[0][budorder] & 63)) - 1;
+ }
+ }
+ else {
+ if(threshold_longsize) bzero(pmap->buddy[budorder], (threshold_longsize - 1) * 8);
+
+ if(threshold_bitsize > pmap_bbitsize)
+ *(pmap->buddy[budorder] + threshold_longsize) = ((uint64_t)1 << ((threshold_bitsize - 1) & 63));
+
+ if(buddy_size - threshold_longsize)
+ bzero(pmap->buddy[budorder] + threshold_longsize + 1, buddy_size - threshold_longsize);
+ *buddy_end = ((uint64_t)1 << ((buddy_bitlen[0][budorder] & 63) - 1));
+ }
+ threshold_bitsize = ((pmap_blongsize * 64) + pmap_bbitsize) * 2;
+ }
+ }
+ }
+// for(pmap = pmap->next; pmap != 0; pmap++) {
+// }
+}
+
+
+// for(budlong_ptr = pmap->buddy[budorder]; budlong_ptr < (uint64_t *)pmap->next - 1; budlong_ptr++) {
+
+/**
+//uses buddy system allocation
+void init_memory() {
+ struct memory_table *memtable = (struct memory_table *)&_meminfo_loc;
+ struct phys_map *map = (struct phys_map*)&_stage2_pagetable;
+ struct phys_map *lasttable;
+ unsigned int i, buddy_chunksize;
+ uint64_t buddy_bitsize, prev_buddy_bsize, ppt_size;
+ uint64_t *buddy_ptr, *budentry_end, *threshold;
+ uint64_t buddy_size64;
+
+ map_page((void*)&_stage2_pagetable, (void*)&_stage2_pagetable, PAGE_SIZE_2M);
+ void *next_page = (void *)0x400000;
+ // at this point, we are declaring our header and kernel itself as free (so don't forget to fix that!)
+
+ //TODO all these for loops are unsanitary.
+ //We hae a lot of branches in the for loops,
+ //And we re-do work to allocate the page-table.
+ //I'll clean this up when I'm certain my buddy system works.
+ for(i = 0; memtable[i].length > 0; i++) {
+ if((memtable[i].type == MEM_AVAILABLE) && (memtable[i].ACPI & 1)) {
+ map->chunk_start = (void*)memtable[i].base;
+ map->chunk_size = memtable[i].length;
+ buddy_ptr = (void*)&map->buddies;
+
+ for(buddy_chunksize = 0x1000; buddy_chunksize < 0x100000; buddy_chunksize *= 2) {
+ buddy_bitsize = memtable[i].length / buddy_chunksize;
+ buddy_size64 = ceil(buddy_bitsize / (double)64);
+
+ if((void*)&buddy_ptr[buddy_size64] + 24 >= next_page) {
+ map_page(next_page, next_page, PAGE_SIZE_2M);
+ next_page += 0x200000;
+ }
+
+ printf("buddy\t%x\theader bitsize\t%u\theader longsize\t%u\tbuddy start\t%p",\
+ buddy_chunksize, buddy_bitsize, buddy_size64, buddy_ptr);
+ if(((buddy_bitsize * 2) != prev_buddy_bsize) && (buddy_chunksize != 0x1000)) {
+ buddy_ptr[-1] |= ((uint64_t)1 << ((prev_buddy_bsize & 63) - 1));
+ printf("\tlast:\t%lx", buddy_ptr[-1]);
+ }
+ printf("\n");
+
+ if(buddy_chunksize < 0x80000) {
+ bzero(buddy_ptr, buddy_size64 * 8);
+ prev_buddy_bsize = buddy_bitsize;
+ }
+
+ else if(buddy_size64 % buddy_bitsize) {
+ memset(buddy_ptr, 0xff, (buddy_size64 - 1) * 8);
+ buddy_ptr[buddy_size64] |= 1 << buddy_bitsize;
+ }
+
+ else memset(buddy_ptr, 0xff, buddy_size64);
+
+ buddy_ptr += buddy_size64;
+ }
+
+ //this feels kind of gross
+ lasttable = map;
+ map->next = (struct phys_map*)buddy_ptr;
+ map = (struct phys_map*)buddy_ptr;
+ }
+ }
+ lasttable->next = (void*)0;
+ map = (struct phys_map*)&_stage2_pagetable;
+ //now we will allocate the table out of itself so we don't mess things up.
+ //We can't just call palloc(), we need to allocate out of the first available pages (where the pages are)
+ //we should clean this whole gross function
+ /**
+ ppt_size = (uint64_t)((void *)buddy_ptr - (void *)&_stage2_pagetable);
+ threshold = (void*)UINT64_MAX;
+ int buddy_bit;
+ for(buddy_chunksize = 0x80000; buddy_chunksize >= 0x1000; buddy_chunksize /= 2) {
+ //this means that our table must fit into the first page table. Fixme later, low priotrity
+ buddy_size64 = ceil((map->chunk_size / buddy_chunksize) / (double)64);
+ budentry_end = buddy_ptr;
+ for(buddy_ptr -= buddy_size64; buddy_ptr <= budentry_end; buddy_ptr++) {
+ if(buddy_ptr > threshold) {
+ buddy_ptr = budentry_end;
+ continue;
+ }
+ if(
+ }
+ }
+}
+ **/
+
+//TODO this function was deleted due to it being wrong.
+//I'll create it once I have the physical paging prerequisite set up.
+void create_pagetable_stage2(uint64_t free_mem) {
+}
+
+
+/**
+ * BIG TODO:
+ * Paging turned out to be simpler then I thought. I've temporarily fixed the code, but needs to be rewritten/simplified.
+ * Let's get rid of those nasty GOTOs if we can.
+ * Also, once we get physical memory allocator up and running, impliment that in this function.
+**/
+
+bool map_page(void *virtual_addr, void *physical_addr, uint8_t size) {
+ //printf("map page called\n");
+ uintptr_t va_ptr = (uintptr_t)virtual_addr;
+ uintptr_t pa_ptr = (uintptr_t)physical_addr;
+ if((va_ptr % (1 << size)) || (pa_ptr % (1 << size))) {
+ return 0;
+ }
+ page_table *table = (page_table *)PAGEMAP_LOCATION;
+ int pte_i = (va_ptr >> 12) & 0x1ff;
+ int pde_i = (va_ptr >> 21) & 0x1ff;
+ int pdpe_i = (va_ptr >> 30) & 0x1ff;
+ int pml4e_i = (va_ptr >> 39) & 0x1ff;
+
+ if(table->pml4e[pml4e_i].present) {
+ if(table->pml4e[pml4e_i].base_ptr != (uintptr_t)&table->pdpe[pdpe_i] >> 12) goto error;
+ if(table->pdpe[pdpe_i].present) {
+ if(size == PAGE_SIZE_1G) {
+ if(table->pdpe[pdpe_i].base_ptr == ((uintptr_t)pa_ptr >> 30 & 0x1ff))
+ return true;
+ goto error;
+ }
+ if(table->pdpe[pdpe_i].base_ptr != (uintptr_t)&table->pde[pde_i] >> 12) goto error;
+
+ if(table->pde[pde_i].present) {
+ if(size == PAGE_SIZE_2M) {
+ if(table->pde[pde_i].base_ptr == ((uintptr_t)pa_ptr >> 21 & 0x1ff))
+ return true;
+ goto error;
+ }
+ if(table->pde[pde_i].base_ptr != (uintptr_t)&table->pte[pte_i] >> 12) goto error;
+ if(table->pte[pte_i].present) {
+ if(table->pte[pte_i].base_ptr != ((pa_ptr >> 12) & 0x1ff)) goto error;
+ return true;
+ }
+ else goto mod_page_pte;
+ }
+ else goto mod_page_pde;
+ }
+ else goto mod_page_pdpe;
+ }
+ else {
+ table->pml4e[pml4e_i].base_ptr = (uintptr_t)&table->pdpe[pdpe_i] >> 12;
+ table->pdpe[pml4e_i].read_write = 1;
+ table->pml4e[pml4e_i].present = 1;
+mod_page_pdpe:
+ table->pdpe[pdpe_i].read_write = 1;
+ //TODO you just found out things are a lot more simple then you thought!
+ if(size == PAGE_SIZE_1G) {
+ table->pdpe[pdpe_i].size = 1;
+ table->pdpe[pdpe_i].base_ptr = pa_ptr >> 12;
+ table->pdpe[pdpe_i].present = 1;
+ return true;
+ }
+ table->pdpe[pdpe_i].base_ptr = (uintptr_t)&table->pde[pde_i] >> 12;
+ table->pdpe[pdpe_i].present = 1;
+mod_page_pde:
+ table->pde[pde_i].read_write = 1;
+ if(size == PAGE_SIZE_2M) {
+ table->pde[pde_i].size = 1;
+ table->pde[pde_i].base_ptr = pa_ptr >> 12;
+ table->pde[pde_i].present = 1;
+ return true;
+ }
+ table->pde[pde_i].base_ptr = (uintptr_t)&table->pte[pte_i] >> 12;
+ table->pde[pde_i].present = 1;
+mod_page_pte:
+ table->pte[pte_i].base_ptr = pa_ptr >> 12;
+ table->pte[pte_i].read_write = 1;
+ table->pte[pte_i].present = 1;
+ return true;
+ }
+error:
+ printf("Page allocation error!\n");
+ return false;
+}