From cf7cd8be60c254b44b444c97dcb238d7cf3afd4c Mon Sep 17 00:00:00 2001 From: Brett Weiland Date: Tue, 21 Sep 2021 10:50:33 -0500 Subject: palloc smp safe (testing required, NOT pfree) --- src/kernel/page.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 83 insertions(+), 14 deletions(-) (limited to 'src/kernel/page.c') diff --git a/src/kernel/page.c b/src/kernel/page.c index dea3941..0b286a4 100644 --- a/src/kernel/page.c +++ b/src/kernel/page.c @@ -7,11 +7,15 @@ #include #include #include +#include + +#include //just using char because c is a lil bitch and won't let us use void extern char _kernel_shared_zone_begin; - +//expects core_id +#define waitlist_i(y) (((core_id) * sizeof(uintptr_t)) + (y)) // PAGE MAPPING #define PAGEMAP_LOCATION 0x10000 @@ -22,12 +26,21 @@ extern char _kernel_shared_zone_begin; typedef struct phys_map { struct phys_map *next; unsigned int max_buddy; + + //has to be a 16 bit variable + uint16_t mutex; //we might improve the speed of this later + uint64_t bsize[MAX_BUDDY_ORDER]; uint64_t *buddy[MAX_BUDDY_ORDER]; } pmap_t; static pmap_t *first_pmap; +//I'd like to find out a way to get rid of this... we only use it once +static unsigned int pmap_count = 0; + +static pmap_t **waiting_pmaps; + #define MEM_AVAILABLE 1 #define MEM_RESERVED 2 #define MEM_APCI_RECLAIMABLE 3 @@ -161,10 +174,10 @@ void debug_pmap() { //TODO I know you don't want to, but you need to thoroughly check this. void pfree(void *addr, size_t size) { int blevel = 0; - uint64_t *onbyte; //the byte out buddy resides on in the current level - uint64_t page_bitloc; // how many bits we are away from buddy[0]. Helps calculate bitshifts - int bbitlen; //length of free'd area in current level - int lshift; //lshift is how many bits we shift over, rightbit is what it sounds like dumbass + uint64_t *onbyte; + uint64_t page_bitloc; + int bbitlen; + int lshift; pmap_t *pmap = first_pmap; /* note: there's no security check to see if the page is actually allocated, @@ -176,7 +189,7 @@ void pfree(void *addr, size_t size) { if(((uintptr_t)addr & 4095) || (size & 4095)) { PANIC(KERNEL_PANIC_INVALID_PFREE); - return; //TODO [minor] some more specificity, not a huge deal + return; } size /= 0x1000; for(; pmap != 0; pmap = pmap->next) { @@ -221,7 +234,7 @@ void pfree(void *addr, size_t size) { if(bbitlen <= 1) { *onbyte |= ((uint64_t)1 << lshift); break; - } else if(bbitlen & 1) { //check me + } else if(bbitlen & 1) { size -= (1 << blevel); *onbyte |= ((uint64_t)1 << (bbitlen + lshift)); } @@ -232,12 +245,16 @@ void pfree(void *addr, size_t size) { void *palloc(size_t size) { + uint8_t core_id = get_coreid(); bool self_alloc; int min_blevel, blevel; - uint64_t bbit, unshifted_entry, threshold, bloc; //TODO move when you've confirmed casting stuff + uint64_t bbit, unshifted_entry, threshold, bloc; uint64_t buddy_i, *ret, *bentry; int itercount; + bool unlocked_pmaps_searched = false; pmap_t *pmap = first_pmap; + unsigned int waitingp_i = 0; + unsigned int searchingp_i; if(size == 0) return 0; @@ -263,7 +280,19 @@ void *palloc(size_t size) { } for(blevel = min_blevel; blevel < MAX_BUDDY_ORDER; blevel++) { - for(pmap = first_pmap; pmap != 0; pmap = pmap->next) { + //for(pmap = first_pmap; pmap != 0; pmap = pmap->next) { + //while(!unlocked_pmaps_searched || + // is_empty(waiting_pmaps[core_id], sizeof(&pmap) * pmap_count)) { + pmap = first_pmap; + while(pmap) { + + if(get_set_mutex(&pmap->mutex)) { + if(!unlocked_pmaps_searched) waiting_pmaps[waitlist_i(waitingp_i++)] = pmap; + goto get_next_pmap; + } + + + if(unlocked_pmaps_searched) waiting_pmaps[waitlist_i(waitingp_i)] = 0; for(buddy_i = 0; buddy_i < pmap->bsize[blevel]; buddy_i++) { if(pmap->buddy[blevel][buddy_i] > (uint64_t)0) { @@ -295,11 +324,42 @@ void *palloc(size_t size) { } *bentry |= (unshifted_entry << bloc); } - if(!self_alloc) bzero(ret, size * 0x1000); + pmap->mutex = 0; + if(!self_alloc) bzero(ret, size * 0x1000); //TODO do we really need to bezero here? return ret; } } +get_next_pmap: + pmap->mutex = 0; + if(unlocked_pmaps_searched) { + pmap = 0; + for(searchingp_i = waitingp_i + 1; searchingp_i < pmap_count; searchingp_i++) { + if(waiting_pmaps[waitlist_i(searchingp_i)]) { + pmap = waiting_pmaps[waitlist_i(searchingp_i)]; + break; + } + } + if(!pmap) { + for(searchingp_i = 0; searchingp_i <= waitingp_i; searchingp_i++) { + if(waiting_pmaps[waitlist_i(searchingp_i)]) { + pmap = waiting_pmaps[waitlist_i(searchingp_i)]; + break; + } + } + } + } + else { + if(!pmap->next) { + pmap = waiting_pmaps ? waiting_pmaps[waitlist_i(0)] : 0; + unlocked_pmaps_searched = true; + } + else { + pmap = pmap->next; + } + } } + unlocked_pmaps_searched = false; + waitingp_i = 0; } return 0; } @@ -374,12 +434,19 @@ size_t map_complete_physical() { } } -pmap_t *init_pmap(size_t pagetable_size) { +void init_pmap_smp() { + size_t pmap_arrsize = corecount * pmap_count * sizeof(waiting_pmaps); + waiting_pmaps = malloc(pmap_arrsize); + bzero(waiting_pmaps, pmap_arrsize); +} + +void *init_pmap(size_t pagetable_size) { pmap_t *pmap, *last_pmap; struct memory_table *zones = (void *)ZONE_MAP; int budorder, zone_i; uint64_t pmap_size, pmap_bbitsize, zone_size; bool first_pmap_i = true; + @@ -387,6 +454,7 @@ pmap_t *init_pmap(size_t pagetable_size) { if((zones[zone_i].type == MEM_AVAILABLE) && (zones[zone_i].ACPI & 1) && zones[zone_i].length >= (0x2000)) { printf("found allocatable map at %p\n", zones[zone_i].base); + pmap_count++; last_pmap = pmap; if(zones[zone_i].base == (void *)0x100000) { zone_size = zones[zone_i].length - (((uint64_t)&_kernel_shared_zone_begin - 0x100000) + pagetable_size); @@ -396,7 +464,6 @@ pmap_t *init_pmap(size_t pagetable_size) { zone_size = zones[zone_i].length; pmap = PHYS_TO_VIRT(zones[zone_i].base); } - if(first_pmap_i) { pmap->next = NULL; first_pmap_i = false; @@ -405,6 +472,8 @@ pmap_t *init_pmap(size_t pagetable_size) { pmap->next = last_pmap; } + pmap->mutex = 0; + for(budorder = 0; budorder < MAX_BUDDY_ORDER; budorder++) { pmap_bbitsize = zone_size / (0x1000 << budorder); pmap->bsize[budorder] = DIV_ROUND_UP(pmap_bbitsize , 64); @@ -440,8 +509,8 @@ pmap_t *init_pmap(size_t pagetable_size) { } pmap_size = (uint64_t)(pmap->buddy[pmap->max_buddy] + pmap->bsize[pmap->max_buddy]) - (uint64_t)pmap; - first_pmap = pmap; //we spoof palloc into allocating from the specific required pmap. - palloc(pmap_size); //TODO (MAJOR BUG) something isn't right, I don't think + first_pmap = pmap; + palloc(pmap_size); } } return pmap; -- cgit v1.2.3