palloc smp safe (testing required, NOT pfree)
This commit is contained in:
		
							parent
							
								
									907fb823bf
								
							
						
					
					
						commit
						cf7cd8be60
					
				| @ -1,6 +1,9 @@ | ||||
| target remote localhost:1234 | ||||
| symbol-file debug/debug_syms.o | ||||
| 
 | ||||
| set scheduler-locking step | ||||
| 
 | ||||
| hb page.c:357 | ||||
| 
 | ||||
| define cs2bs | ||||
|   print (1 << (5 + $arg0)) | ||||
|  | ||||
| @ -2,10 +2,12 @@ | ||||
| #define _STRING_H_ | ||||
| #include <stddef.h> | ||||
| #include <stdint.h> | ||||
| #include <stdbool.h> | ||||
| 
 | ||||
| void *strcpy(char *dest, char *src); | ||||
| void *memcpy(void *dest, void *src, size_t n); //TODO
 | ||||
| void *bzero(const void *dest, size_t size); | ||||
| void *memcpy(void *dest, void *src, size_t n); | ||||
| void *bzero(void *dest, size_t size); | ||||
| bool *is_empty(void *dest, size_t size); | ||||
| void *memset(void *s, char c, size_t n); | ||||
| size_t strlen(const char *s); | ||||
| int strcmp(const char *str1, const char *str2); | ||||
|  | ||||
| @ -10,6 +10,7 @@ | ||||
| void unmap_lowmem(); | ||||
| size_t map_complete_physical(); | ||||
| void debug_pzone(); | ||||
| void init_pmap_smp(); | ||||
| 
 | ||||
| 
 | ||||
| struct phys_map *init_pmap(size_t pagetable_size); | ||||
|  | ||||
| @ -1,5 +1,6 @@ | ||||
| #ifndef random_header | ||||
| #define random_header | ||||
| void randinit(); | ||||
| void sync_malloc(); | ||||
| unsigned int randint(); | ||||
| #endif | ||||
|  | ||||
| @ -1,7 +1,10 @@ | ||||
| #ifndef SMP_INCLUDED | ||||
| #define SMP_INCLUDED | ||||
| #include <stdint.h> | ||||
| void smp_boot(); | ||||
| #include <stdbool.h> | ||||
| #include <cpuid.h> | ||||
| #include <printf.h> | ||||
| void smp_prepare(); | ||||
| extern uint8_t corecount; | ||||
| 
 | ||||
| static inline void lock(uint8_t *lock) { | ||||
| @ -18,9 +21,25 @@ static inline void unlock(uint8_t *lock) { | ||||
|   asm("lock andb [%0], 0"::"r"(lock)); | ||||
| } | ||||
| 
 | ||||
| static inline void waitup(uint8_t *loto) { | ||||
| static inline bool get_set_mutex(uint16_t *mutex) { | ||||
|   bool ret; | ||||
|   asm("lock bts %1, 0\n" | ||||
|       "jc .mutex_taken\n" | ||||
|       "mov %0, 0\n" | ||||
|       "jmp .done\n" | ||||
|       ".mutex_taken:\n" | ||||
|       "mov %0, 1\n" | ||||
|       ".done:\n" | ||||
|       :"=r"(ret) | ||||
|       :"m"(*mutex)); | ||||
|   return ret; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| //THIS IS ONLY UNTIL WE GET MULTITHREADING SET UP
 | ||||
| uint8_t get_coreid(); | ||||
| 
 | ||||
| #define CREATE_LOTO(name) | ||||
| 
 | ||||
| #endif | ||||
|  | ||||
| @ -1,23 +0,0 @@ | ||||
| #ifndef SMP_SYNC_INCLUDED | ||||
| #define SMP_SYNC_INCLUDED | ||||
| 
 | ||||
| static inline void lock(uint8_t *lock) { | ||||
|   asm("mov al, 1\n" | ||||
|       "spinlock:\n" | ||||
|       "lock xchgb [%0], al\n" | ||||
|       "test al, al\n" | ||||
|       "pause\n" | ||||
|       "jnz spinlock\n" | ||||
|       ::"r"(lock):"al"); | ||||
| }  | ||||
| 
 | ||||
| static inline void unlock(uint8_t *lock) { | ||||
|   asm("lock andb [%0], 0"::"r"(lock)); | ||||
| } | ||||
| 
 | ||||
| static inline void waitup(uint8_t *loto) { | ||||
| } | ||||
| 
 | ||||
| #define CREATE_LOTO(name) | ||||
| 
 | ||||
| #endif | ||||
							
								
								
									
										
											BIN
										
									
								
								src/indigo_os
									
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								src/indigo_os
									
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							| @ -26,10 +26,11 @@ | ||||
| 
 | ||||
| 
 | ||||
| typedef struct __attribute__((packed)) heap_chunk { | ||||
|   unsigned int free:1; | ||||
|   unsigned int size:4; //will use with flags later if needed
 | ||||
|   unsigned int lsize:4; | ||||
|   unsigned long reserved:55; | ||||
|   unsigned int free:1; | ||||
|   unsigned int mutex:1; | ||||
|   unsigned long reserved:54; | ||||
|   struct heap_chunk *fd; | ||||
|   struct heap_chunk *bk; | ||||
| } chunk; | ||||
| @ -203,6 +204,7 @@ void *malloc(size_t size) { | ||||
|   return (void *)on_chunk + sizeof(chunk); | ||||
|     | ||||
| } | ||||
| 
 | ||||
| void *realloc(void *old_chunk, size_t size) { | ||||
|   void *new_chunk = malloc(size); | ||||
|   memcpy(new_chunk, old_chunk, CHUNK_SIZE_FROM_INDEX(((chunk *)(old_chunk-24))->size));  | ||||
|  | ||||
| @ -18,15 +18,24 @@ | ||||
| #include <smp.h> | ||||
| 
 | ||||
| //testing headers
 | ||||
| //#include <testmalloc.h>
 | ||||
| #include <smp_racetest.h> | ||||
| 
 | ||||
| 
 | ||||
| void kmain() { | ||||
|   printf("Kernal started on core %i\n", get_coreid());  | ||||
|   sync_malloc(); | ||||
|   PANIC(KERNEL_PANIC_KERNEL_RETURNED); | ||||
| } | ||||
| 
 | ||||
| static bool smp_unlocked = false; | ||||
| void smp_kinit() { | ||||
|   printf("Kernal started on core <<<<< \n");  | ||||
| 
 | ||||
|   asm(".wait_for_release:\n" | ||||
|       "mov al, [%0]\n" | ||||
|       "test al, al\n" | ||||
|       "jz .wait_for_release\n" | ||||
|       ::"m"(smp_unlocked)); | ||||
|        | ||||
|   smp_load_idt(); | ||||
|   kmain(); | ||||
| } | ||||
| @ -39,7 +48,8 @@ void kernel_init() { | ||||
|   pmap_size = map_complete_physical(); | ||||
|   init_klog(); | ||||
|   init_pmap(pmap_size);  | ||||
|   printf("\nKernal started on CPU 1!\n"); | ||||
|   printf("\nKernal started on core 1!\n"); | ||||
|   //test_malloc(100);
 | ||||
|   | ||||
|    | ||||
|   find_root_sdp(); | ||||
| @ -52,7 +62,12 @@ void kernel_init() { | ||||
| 
 | ||||
|   clear_screen(); | ||||
|   debug_pzone(); | ||||
|   smp_boot(); | ||||
|   smp_prepare(); | ||||
| 
 | ||||
|   //the rest of this needs to get done before the cores start executing
 | ||||
|   init_pmap_smp(); | ||||
|   smp_unlocked = true; | ||||
| 
 | ||||
|   fix_stack(); | ||||
|   unmap_lowmem(); | ||||
|   kmain(); | ||||
|  | ||||
| @ -1,5 +1,6 @@ | ||||
| #include <stddef.h> | ||||
| #include <stdint.h> | ||||
| #include <stdbool.h> | ||||
| // TODO clean up variable names
 | ||||
| int strncmp(const char *s1, const char *s2, unsigned int n) { | ||||
|   int i; | ||||
| @ -18,6 +19,13 @@ size_t strlen(const char *s) { | ||||
|   return(len); | ||||
| } | ||||
| 
 | ||||
| bool is_empty(const char *s, size_t size) { | ||||
|   for(size_t i = 0; i < size; i++) { | ||||
|     if(s[i]) return false; | ||||
|   } | ||||
|   return true; | ||||
| } | ||||
| 
 | ||||
| int strcmp(const char *s1, const char *s2) { | ||||
|   int i; | ||||
|   for(i = 0; ((s1[i] != '\0') && (s2[i] != '\0')); i++) { | ||||
|  | ||||
| @ -7,11 +7,15 @@ | ||||
| #include <addr.h> | ||||
| #include <stdbool.h> | ||||
| #include <cpuid.h> | ||||
| #include <smp.h> | ||||
| 
 | ||||
| #include <heap.h> | ||||
| 
 | ||||
| //just using char because c is a lil bitch and won't let us use void
 | ||||
| extern char _kernel_shared_zone_begin; | ||||
| 
 | ||||
| 
 | ||||
| //expects core_id
 | ||||
| #define waitlist_i(y) (((core_id) * sizeof(uintptr_t)) + (y)) | ||||
| 
 | ||||
| //    PAGE MAPPING
 | ||||
| #define PAGEMAP_LOCATION 0x10000 | ||||
| @ -22,12 +26,21 @@ extern char _kernel_shared_zone_begin; | ||||
| typedef struct phys_map { | ||||
|   struct phys_map *next; | ||||
|   unsigned int max_buddy; | ||||
| 
 | ||||
|   //has to be a 16 bit variable
 | ||||
|   uint16_t mutex; //we might improve the speed of this later
 | ||||
| 
 | ||||
|   uint64_t bsize[MAX_BUDDY_ORDER]; | ||||
|   uint64_t *buddy[MAX_BUDDY_ORDER]; | ||||
| } pmap_t; | ||||
| 
 | ||||
| static pmap_t *first_pmap; | ||||
| 
 | ||||
| //I'd like to find out a way to get rid of this... we only use it once
 | ||||
| static unsigned int pmap_count = 0;  | ||||
| 
 | ||||
| static pmap_t **waiting_pmaps; | ||||
| 
 | ||||
| #define MEM_AVAILABLE             1 | ||||
| #define MEM_RESERVED              2 | ||||
| #define MEM_APCI_RECLAIMABLE      3 | ||||
| @ -161,10 +174,10 @@ void debug_pmap() { | ||||
| //TODO I know you don't want to, but you need to thoroughly check this.
 | ||||
| void pfree(void *addr, size_t size) {  | ||||
|   int blevel = 0; | ||||
|   uint64_t *onbyte;         //the byte out buddy resides on in the current level
 | ||||
|   uint64_t page_bitloc; // how many bits we are away from buddy[0]. Helps calculate bitshifts
 | ||||
|   int bbitlen;              //length of free'd area in current level
 | ||||
|   int lshift;         //lshift is how many bits we shift over, rightbit is what it sounds like dumbass
 | ||||
|   uint64_t *onbyte;          | ||||
|   uint64_t page_bitloc;  | ||||
|   int bbitlen;               | ||||
|   int lshift;          | ||||
|   pmap_t *pmap = first_pmap; | ||||
|    | ||||
|   /* note: there's no security check to see if the page is actually allocated,
 | ||||
| @ -176,7 +189,7 @@ void pfree(void *addr, size_t size) { | ||||
| 
 | ||||
|   if(((uintptr_t)addr & 4095) || (size & 4095)) { | ||||
|     PANIC(KERNEL_PANIC_INVALID_PFREE); | ||||
|     return; //TODO [minor] some more specificity, not a huge deal 
 | ||||
|     return;  | ||||
|   } | ||||
|   size /= 0x1000;  | ||||
|   for(; pmap != 0; pmap = pmap->next) { | ||||
| @ -221,7 +234,7 @@ void pfree(void *addr, size_t size) { | ||||
|       if(bbitlen <= 1) {  | ||||
|         *onbyte |= ((uint64_t)1 << lshift); | ||||
|         break; | ||||
|       } else if(bbitlen & 1) { //check me
 | ||||
|       } else if(bbitlen & 1) { | ||||
|         size -= (1 << blevel); | ||||
|         *onbyte |= ((uint64_t)1 << (bbitlen + lshift)); | ||||
|       } | ||||
| @ -232,12 +245,16 @@ void pfree(void *addr, size_t size) { | ||||
| 
 | ||||
| 
 | ||||
| void *palloc(size_t size) { | ||||
|   uint8_t core_id = get_coreid(); | ||||
|   bool self_alloc; | ||||
|   int min_blevel, blevel; | ||||
|   uint64_t bbit, unshifted_entry, threshold, bloc; //TODO move when you've confirmed casting stuff
 | ||||
|   uint64_t bbit, unshifted_entry, threshold, bloc;  | ||||
|   uint64_t buddy_i, *ret, *bentry; | ||||
|   int itercount; | ||||
|   bool unlocked_pmaps_searched = false; | ||||
|   pmap_t *pmap = first_pmap; | ||||
|   unsigned int waitingp_i = 0; | ||||
|   unsigned int searchingp_i; | ||||
| 
 | ||||
| 
 | ||||
|   if(size == 0) return 0; | ||||
| @ -263,7 +280,19 @@ void *palloc(size_t size) { | ||||
|   } | ||||
| 
 | ||||
|   for(blevel = min_blevel; blevel < MAX_BUDDY_ORDER; blevel++) { | ||||
|     for(pmap = first_pmap; pmap != 0; pmap = pmap->next) { | ||||
|     //for(pmap = first_pmap; pmap != 0; pmap = pmap->next) {
 | ||||
|     //while(!unlocked_pmaps_searched || 
 | ||||
|     //   is_empty(waiting_pmaps[core_id], sizeof(&pmap) * pmap_count)) {
 | ||||
|     pmap = first_pmap; | ||||
|     while(pmap) { | ||||
| 
 | ||||
|       if(get_set_mutex(&pmap->mutex)) { | ||||
|         if(!unlocked_pmaps_searched) waiting_pmaps[waitlist_i(waitingp_i++)] = pmap; | ||||
|         goto get_next_pmap; | ||||
|       } | ||||
| 
 | ||||
| 
 | ||||
|       if(unlocked_pmaps_searched) waiting_pmaps[waitlist_i(waitingp_i)] = 0; | ||||
| 
 | ||||
|       for(buddy_i = 0; buddy_i < pmap->bsize[blevel]; buddy_i++) { | ||||
|         if(pmap->buddy[blevel][buddy_i] > (uint64_t)0) { | ||||
| @ -295,11 +324,42 @@ void *palloc(size_t size) { | ||||
|             } | ||||
|             *bentry |= (unshifted_entry << bloc); | ||||
|           } | ||||
|           if(!self_alloc) bzero(ret, size * 0x1000);  | ||||
|           pmap->mutex = 0; | ||||
|           if(!self_alloc) bzero(ret, size * 0x1000); //TODO do we really need to bezero here?
 | ||||
|           return ret; | ||||
|         } | ||||
|       } | ||||
| get_next_pmap: | ||||
|       pmap->mutex = 0; | ||||
|       if(unlocked_pmaps_searched) { | ||||
|         pmap = 0; | ||||
|         for(searchingp_i = waitingp_i + 1; searchingp_i < pmap_count; searchingp_i++) { | ||||
|           if(waiting_pmaps[waitlist_i(searchingp_i)]) { | ||||
|             pmap = waiting_pmaps[waitlist_i(searchingp_i)]; | ||||
|             break; | ||||
|           } | ||||
|         } | ||||
|         if(!pmap) { | ||||
|           for(searchingp_i = 0; searchingp_i <= waitingp_i; searchingp_i++) { | ||||
|             if(waiting_pmaps[waitlist_i(searchingp_i)]) { | ||||
|               pmap = waiting_pmaps[waitlist_i(searchingp_i)]; | ||||
|               break; | ||||
|             } | ||||
|           } | ||||
|         } | ||||
|       } | ||||
|       else { | ||||
|         if(!pmap->next) { | ||||
|           pmap = waiting_pmaps ? waiting_pmaps[waitlist_i(0)] : 0; | ||||
|           unlocked_pmaps_searched = true; | ||||
|         } | ||||
|         else { | ||||
|           pmap = pmap->next; | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|     unlocked_pmaps_searched = false; | ||||
|     waitingp_i = 0; | ||||
|   } | ||||
|   return 0; | ||||
| } | ||||
| @ -374,12 +434,19 @@ size_t map_complete_physical() { | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| pmap_t *init_pmap(size_t pagetable_size) { | ||||
| void init_pmap_smp() { | ||||
|   size_t pmap_arrsize = corecount * pmap_count * sizeof(waiting_pmaps); | ||||
|   waiting_pmaps = malloc(pmap_arrsize); | ||||
|   bzero(waiting_pmaps, pmap_arrsize);  | ||||
| } | ||||
| 
 | ||||
| void *init_pmap(size_t pagetable_size) { | ||||
|   pmap_t *pmap, *last_pmap;  | ||||
|   struct memory_table *zones = (void *)ZONE_MAP; | ||||
|   int budorder, zone_i;  | ||||
|   uint64_t pmap_size, pmap_bbitsize, zone_size; | ||||
|   bool first_pmap_i = true; | ||||
|    | ||||
| 
 | ||||
|    | ||||
| 
 | ||||
| @ -387,6 +454,7 @@ pmap_t *init_pmap(size_t pagetable_size) { | ||||
|     if((zones[zone_i].type == MEM_AVAILABLE) && (zones[zone_i].ACPI & 1) && | ||||
|         zones[zone_i].length >= (0x2000)) {  | ||||
|       printf("found allocatable map at %p\n", zones[zone_i].base); | ||||
|       pmap_count++; | ||||
|       last_pmap = pmap; | ||||
|       if(zones[zone_i].base == (void *)0x100000) { | ||||
|         zone_size = zones[zone_i].length - (((uint64_t)&_kernel_shared_zone_begin - 0x100000) + pagetable_size); | ||||
| @ -396,7 +464,6 @@ pmap_t *init_pmap(size_t pagetable_size) { | ||||
|         zone_size = zones[zone_i].length; | ||||
|         pmap = PHYS_TO_VIRT(zones[zone_i].base); | ||||
|       } | ||||
| 
 | ||||
|       if(first_pmap_i) { | ||||
|         pmap->next = NULL; | ||||
|         first_pmap_i = false; | ||||
| @ -405,6 +472,8 @@ pmap_t *init_pmap(size_t pagetable_size) { | ||||
|         pmap->next = last_pmap; | ||||
|       } | ||||
| 
 | ||||
|       pmap->mutex = 0; | ||||
| 
 | ||||
|       for(budorder = 0; budorder < MAX_BUDDY_ORDER; budorder++) { | ||||
|         pmap_bbitsize = zone_size / (0x1000 << budorder); | ||||
|         pmap->bsize[budorder] = DIV_ROUND_UP(pmap_bbitsize , 64); | ||||
| @ -440,8 +509,8 @@ pmap_t *init_pmap(size_t pagetable_size) { | ||||
|       } | ||||
|       | ||||
|       pmap_size = (uint64_t)(pmap->buddy[pmap->max_buddy] + pmap->bsize[pmap->max_buddy]) - (uint64_t)pmap; | ||||
|       first_pmap = pmap; //we spoof palloc into allocating from the specific required pmap.
 | ||||
|       palloc(pmap_size); //TODO (MAJOR BUG) something isn't right, I don't think
 | ||||
|       first_pmap = pmap;  | ||||
|       palloc(pmap_size);  | ||||
|     } | ||||
|   } | ||||
|   return pmap; | ||||
|  | ||||
| @ -7,10 +7,9 @@ static bool hw_random = false; | ||||
| static unsigned long int seed = -1; | ||||
| 
 | ||||
| void randinit() { | ||||
|   unsigned int unused, eax, ecx; | ||||
|   eax = 0; | ||||
|   unsigned int unused, ecx; | ||||
|   ecx = 0; | ||||
|   __get_cpuid(1, &eax, &unused, &ecx, &unused);  | ||||
|   __get_cpuid(1, &unused, &unused, &ecx, &unused);  | ||||
|   hw_random = (ecx >> 30) & 1; | ||||
|   printf("Kernel random source: %s.\n", (hw_random) ? "rdrand" : "pseudo"); | ||||
| } | ||||
| @ -31,6 +30,6 @@ unsigned int randint() { | ||||
|     seed = 1103515245 * seed + 12345; | ||||
|     return(unsigned int)(seed / 65536) % 32768; | ||||
|   } | ||||
|   return 0; | ||||
|   return random_long; | ||||
| } | ||||
| 
 | ||||
|  | ||||
| @ -53,11 +53,30 @@ struct cpu_info { | ||||
| 
 | ||||
| 
 | ||||
| static struct gdt_descriptor gdtr; | ||||
| struct cores_info cores; | ||||
| 
 | ||||
| void smp_boot() { | ||||
| struct apicid_to_coreid_deleteme { //WILL BE DELETED AFTER THREADING EXISTS TODO
 | ||||
|   uint8_t apic_id; | ||||
|   uint8_t core_id; | ||||
| }; | ||||
| 
 | ||||
| static struct apicid_to_coreid_deleteme *apicid_to_coreid; | ||||
| 
 | ||||
| 
 | ||||
| uint8_t get_coreid() { //WILL BE DELETED AFTER THREADING EXISTS TODO
 | ||||
|   uint32_t ebx, unused_cpuid; | ||||
|   uint8_t apic_id; | ||||
|   __get_cpuid(1, &unused_cpuid, &ebx, &unused_cpuid, &unused_cpuid);  | ||||
|   apic_id = ebx >> 24; | ||||
|   for(uint8_t core = 0; core < corecount; core++) { | ||||
|     if(apicid_to_coreid[core].apic_id == apic_id) return apicid_to_coreid[core].core_id; | ||||
|   } | ||||
|   return 0; | ||||
| } | ||||
| 
 | ||||
| void smp_prepare() { | ||||
|   uint8_t cores_active = 1; | ||||
|   uint8_t stack_i = 0, core_i; | ||||
|   struct cores_info cores; | ||||
|   struct icr_reg icr; | ||||
|   struct cpu_info *stackarray; | ||||
|   get_coreinfo(&cores); | ||||
| @ -72,11 +91,18 @@ void smp_boot() { | ||||
|   corecount = cores.corecount; | ||||
|   | ||||
|   stackarray = malloc(sizeof(struct cpu_info) * (cores.corecount - 1)); | ||||
|   apicid_to_coreid = malloc(sizeof(struct apicid_to_coreid_deleteme) * (cores.corecount - 1)); | ||||
|   for(core_i = 0; core_i < cores.corecount; core_i++) { | ||||
| 
 | ||||
|     //WILL BE DELETED AFTER THREADING EXISTS TODO
 | ||||
|     apicid_to_coreid[core_i].apic_id = cores.apic_id[core_i]; | ||||
|     apicid_to_coreid[core_i].core_id = core_i; | ||||
| 
 | ||||
|     if(cores.apic_id[core_i] == cores.bsp) continue; | ||||
|     stackarray[stack_i].apic_id = cores.apic_id[core_i]; | ||||
|     stackarray[stack_i].stack = palloc(0x1000); | ||||
|     stackarray[stack_i].secondary_bsp = (stack_i)? false : true; | ||||
| 
 | ||||
|     stack_i++; | ||||
|   } | ||||
|   for(stack_i = 0; stack_i < (cores.corecount - 1); stack_i++) { | ||||
|  | ||||
| @ -4,9 +4,13 @@ | ||||
| #include <heap.h> | ||||
| #include <libc.h> | ||||
| #include <random.h> | ||||
| #include <smp.h> | ||||
| #include <paging.h> | ||||
| 
 | ||||
| //will delete later
 | ||||
| 
 | ||||
| static uint8_t lockeroni = 0; | ||||
| 
 | ||||
| void test_malloc(unsigned int cnt) { | ||||
|   void *testchunks[cnt]; | ||||
|   unsigned int rindex[cnt], testchunk_size, i, x; | ||||
| @ -15,7 +19,7 @@ void test_malloc(unsigned int cnt) { | ||||
|   for(x = 0; x < cnt; x++) { | ||||
|     testchunk_size = (CHUNK_SIZE_FROM_INDEX(randint() % 7) - 24); | ||||
|     testchunks[x] = malloc(testchunk_size); | ||||
|     //printf("ALLOCATING CHUNK %p SIZE %i\n", (void *)testchunks[x] - 24, testchunk_size);
 | ||||
|     printf("ALLOCATING CHUNK %p SIZE %i\n", (void *)testchunks[x] - 24, testchunk_size); | ||||
|   } | ||||
|   for(x = 0; x < cnt;) { | ||||
|     i = randint() % cnt; | ||||
| @ -23,13 +27,26 @@ void test_malloc(unsigned int cnt) { | ||||
|     rindex[i] = x; | ||||
|     x++; | ||||
|   } | ||||
| 
 | ||||
|   for(x = 0; x < cnt; x++) { | ||||
|     //printf("FREEING CHUNK %p\n", (void *)testchunks[rindex[x]]);
 | ||||
|     free(testchunks[rindex[x]]); | ||||
|   } | ||||
| 
 | ||||
|   printf("\nmalloc tester:\n"); | ||||
|   printf("THIS NEEDS TO BE EMPTY______________\n"); | ||||
|   debug_heap(); | ||||
|   printf("____________________________________\n"); | ||||
| 
 | ||||
|   unlock(&lockeroni); | ||||
| } | ||||
| uint8_t cores_waiting = 4; | ||||
| void sync_malloc() { | ||||
|   void *mtest;  | ||||
|   asm("lock decb [%0]\n" | ||||
|       "spinlock:\n" | ||||
|       "cmpb [%0], 0\n" | ||||
|       "jnz spinlock\n" | ||||
|       ::"m"(cores_waiting)); | ||||
|   mtest = palloc(0x1000); | ||||
|   printf("Make sure none of these match -> %lx\n", mtest); | ||||
| } | ||||
| @ -185,6 +185,7 @@ pause | ||||
| jnz .wait_for_gdt | ||||
| lgdt [final_gdt_descriptor] | ||||
| 
 | ||||
| 
 | ||||
| mov rax, smp_kinit | ||||
| jmp rax | ||||
| 
 | ||||
|  | ||||
| @ -1,5 +1,4 @@ | ||||
| LD=../compiler/indigo_gcc/bin/x86_64-elf-ld | ||||
| #CC=../compiler/indigo_gcc/bin/x86_64-elf-gcc
 | ||||
| CC=../compiler/indigo_gcc/bin/x86_64-pc-linux-gnu-gcc | ||||
| OBJCPY=../compiler/indigo_gcc/bin/x86_64-elf-objcopy | ||||
| INC=-I./include | ||||
| @ -60,6 +59,9 @@ smp_trampoline.o: | ||||
| run: | ||||
| 	qemu-system-x86_64 $(QEMU_OPTS) $(QEMU_PRINTDEBUG) -nographic | ||||
| 
 | ||||
| run_quiet: | ||||
| 	qemu-system-x86_64 $(QEMU_OPTS) $(QEMU_PRINTDEBUG) -nographic 2>/dev/null | ||||
| 
 | ||||
| gdb: indigo_os | ||||
| 	tmux new-session -s os_gdb "qemu-system-x86_64 -S -s $(QEMU_OPTS) -nographic"\;\
 | ||||
| 		split-window -h "gdb -x debug/gdbinit.gdb; killall qemu-system-x86_64" | ||||
|  | ||||
							
								
								
									
										465
									
								
								src/page_backup.c
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										465
									
								
								src/page_backup.c
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,465 @@ | ||||
| #include <printf.h> | ||||
| #include <stdint.h> | ||||
| #include <libc.h>  | ||||
| #include <limits.h> | ||||
| #include <panic.h> | ||||
| #include <math.h> | ||||
| #include <addr.h> | ||||
| #include <stdbool.h> | ||||
| #include <cpuid.h> | ||||
| #include <smp.h> | ||||
| 
 | ||||
| //just using char because c is a lil bitch and won't let us use void
 | ||||
| extern char _kernel_shared_zone_begin; | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| //    PAGE MAPPING
 | ||||
| #define PAGEMAP_LOCATION 0x10000 | ||||
| 
 | ||||
| #define MAX_BUDDY_ORDER           8  | ||||
| #define PALLOC_AUTO_BLEVEL MAX_BUDDY_ORDER | ||||
| 
 | ||||
| typedef struct phys_map { | ||||
|   struct phys_map *next; | ||||
|   unsigned int max_buddy; | ||||
| 
 | ||||
|   bool mutex; //we might improve the speed of this later
 | ||||
| 
 | ||||
|   uint64_t bsize[MAX_BUDDY_ORDER]; | ||||
|   uint64_t *buddy[MAX_BUDDY_ORDER]; | ||||
| } pmap_t; | ||||
| 
 | ||||
| static pmap_t *first_pmap; | ||||
| 
 | ||||
| #define MEM_AVAILABLE             1 | ||||
| #define MEM_RESERVED              2 | ||||
| #define MEM_APCI_RECLAIMABLE      3 | ||||
| #define MEM_APCI_NVS              4 | ||||
| #define MEM_BAD                   5 | ||||
| 
 | ||||
| //    ZONES
 | ||||
| #define ZONE_MAP_PLOC 0x7000 | ||||
| #define ZONE_MAP PHYS_TO_VIRT(ZONE_MAP_PLOC) | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| //reorganized (moved) from header
 | ||||
| typedef struct __attribute__((packed)) { | ||||
|   unsigned int present : 1;            // present, must be one when accessed.
 | ||||
|   unsigned int read_write : 1;         // if set to one, read and write is set
 | ||||
|   unsigned int user : 1;               // For seperating CPL 0-2 and 3+
 | ||||
|   unsigned int writethrough_cache : 1; // honestly maybe I should look into caching
 | ||||
|   unsigned int cachable : 1;           // hardware chaching. 0 is enabled, whats the worst that could happen? 
 | ||||
|   unsigned int accessed : 1;           // we'll never use any of these!
 | ||||
|   unsigned int zg0 : 1;                // needs to be (and will be) zero'd
 | ||||
|   unsigned int size : 1;               // if set to 1, this entry points to physical memory
 | ||||
|   unsigned int zg1 : 1;                // needs to be (and will be) zero'd
 | ||||
|   unsigned int software_marks : 3;     // available for our own use, I doubt we'll use it in such a simple thing
 | ||||
|    | ||||
|   uintptr_t base_ptr : 40; | ||||
|   unsigned int avail:11; | ||||
|   unsigned int no_exec:1; | ||||
| } page_table; | ||||
| 
 | ||||
| 
 | ||||
| struct memory_table { | ||||
|   void *base; | ||||
|   uint64_t length; | ||||
|   uint32_t type; | ||||
|   uint32_t ACPI;  | ||||
| } __attribute__((packed)); | ||||
| 
 | ||||
| static bool NX_capable; | ||||
| static bool huge_page_capable; | ||||
| 
 | ||||
| 
 | ||||
| void get_mem_capabilities() { | ||||
|   uint32_t unused, edx; | ||||
|   __get_cpuid(0x80000001, &unused, &unused, &unused, &edx); | ||||
|   huge_page_capable = (edx >> 26) & 1; | ||||
|   NX_capable = (edx >> 20) & 1; | ||||
| } | ||||
| 
 | ||||
| void fix_stack() { | ||||
|   struct stack_frame *frame; | ||||
| 
 | ||||
|   asm("addq rsp, %0\n" | ||||
|       "addq rbp, %0\n" | ||||
|       "mov %0, rbp" | ||||
|       :"=r"(frame) | ||||
|       :"r"(PA_OFFSET)); | ||||
|    | ||||
|   while(frame->next != 0) { | ||||
|     printf("%p\n", frame->function_base); | ||||
|     frame->next = PHYS_TO_VIRT((void *)frame->next); | ||||
|     frame = frame->next; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| void unmap_lowmem() { | ||||
|   //[future] 
 | ||||
|   //eventually, you should use the function that unmaps pages when you write it
 | ||||
|   page_table *entry = (page_table *)PAGEMAP_LOCATION; | ||||
|   entry[0].present = 0; | ||||
| } | ||||
| 
 | ||||
| void debug_pzone() { | ||||
|   struct memory_table *memtable = (void *)ZONE_MAP;  | ||||
|   printf(" __________________________________________________________________________\n"); | ||||
|   printf("| type\tstart\t\t\tend\t\t\tsize\t\t   |\n"); | ||||
|   printf("|--------------------------------------------------------------------------|\n"); | ||||
|   for(unsigned int i = 0; memtable[i].length > 0; i++) { | ||||
|     printf("| %u %u\t0x%p\t0x%p\t0x%p |\n", memtable[i].type, memtable[i].ACPI, memtable[i].base, (memtable[i].base + memtable[i].length), memtable[i].length); | ||||
|   } | ||||
|   printf("----------------------------------------------------------------------------\n"); | ||||
| } | ||||
| 
 | ||||
| void ram_stresser() { | ||||
|   struct memory_table *memtable = (void *)ZONE_MAP - PA_OFFSET;  | ||||
|   memtable[6].length = 0x10000; | ||||
| } | ||||
| 
 | ||||
| void debug_pmap() { | ||||
|   pmap_t *pmap = first_pmap; | ||||
|   int pmap_i = 0, order; | ||||
|   uint64_t blong_i, bbit_i, buddy_chunksize, omit_cnt; | ||||
|   printf("Maximum buddy order: %u (up to %#x sized chunks)\n", MAX_BUDDY_ORDER, ((0x1000 << MAX_BUDDY_ORDER) - 1)); | ||||
|   for(; pmap != 0; pmap = pmap->next) { | ||||
|     printf("Table %u:\n" | ||||
|         "\tPhysical/pmap start:\t%#p\n" | ||||
|         "\tTable Size:\t%u\n", pmap_i, pmap, | ||||
|         (uint64_t)(pmap->buddy[MAX_BUDDY_ORDER - 1] + pmap->bsize[MAX_BUDDY_ORDER - 1]) - (uint64_t)pmap); | ||||
| 
 | ||||
|     for(order = 0; order <= MAX_BUDDY_ORDER - 1; order++) { | ||||
|       buddy_chunksize = (0x1000 << order); //TODO just put it in the for loop
 | ||||
|       printf("\tbuddy[%u]:\n" | ||||
|           "\t\tAddress:\t%#p\n" | ||||
|           "\t\tSize:\t\t%u\n" | ||||
|           "\t\tBuddies:\t\t\n", order, pmap->buddy[order], pmap->bsize[order]); | ||||
| 
 | ||||
|       omit_cnt = 0; | ||||
| 
 | ||||
|       for(blong_i = 0; blong_i < pmap->bsize[order]; blong_i++) { | ||||
|         for(bbit_i = 0; bbit_i < 64; bbit_i++) { | ||||
|           if((pmap->buddy[order][blong_i]) & ((uint64_t)1 << bbit_i)) { | ||||
|             if((omit_cnt < 20) || (blong_i == pmap->bsize[order] - 1)) { | ||||
|               printf("address %#p\tbit %u: %p\t is free\n", | ||||
|                 pmap->buddy[order] + blong_i,  | ||||
|                 bbit_i,  | ||||
|                 ((uint64_t)pmap  - PA_OFFSET) + ((((blong_i * 64) + bbit_i) * buddy_chunksize))); | ||||
|             } | ||||
|             omit_cnt++; | ||||
|             if(omit_cnt == 20) { | ||||
|               printf("\t\t\t[more entries ommited]\n"); | ||||
|             } | ||||
|           } | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|     pmap_i++; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| //TODO I know you don't want to, but you need to thoroughly check this.
 | ||||
| void pfree(void *addr, size_t size) {  | ||||
|   int blevel = 0; | ||||
|   uint64_t *onbyte;         //the byte out buddy resides on in the current level
 | ||||
|   uint64_t page_bitloc; // how many bits we are away from buddy[0]. Helps calculate bitshifts
 | ||||
|   int bbitlen;              //length of free'd area in current level
 | ||||
|   int lshift;         //lshift is how many bits we shift over, rightbit is what it sounds like dumbass
 | ||||
|   pmap_t *pmap = first_pmap; | ||||
|    | ||||
|   /* note: there's no security check to see if the page is actually allocated,
 | ||||
|    * or if we are freeing the table itself. | ||||
|    * This should be okay, as only the kernel will be calling it. | ||||
|    * If it gets too messy we can always come back. | ||||
|    */ | ||||
| 
 | ||||
| 
 | ||||
|   if(((uintptr_t)addr & 4095) || (size & 4095)) { | ||||
|     PANIC(KERNEL_PANIC_INVALID_PFREE); | ||||
|     return; //TODO [minor] some more specificity, not a huge deal 
 | ||||
|   } | ||||
|   size /= 0x1000;  | ||||
|   for(; pmap != 0; pmap = pmap->next) { | ||||
|     page_bitloc = (addr - (void *)pmap) / 0x1000; | ||||
|     onbyte = pmap->buddy[0] + (page_bitloc / 64); | ||||
|     if((addr >= (void *)pmap) && onbyte < pmap->buddy[1]) break; | ||||
|   } | ||||
| 
 | ||||
|   while(blevel < MAX_BUDDY_ORDER) { | ||||
|     lshift = (page_bitloc / (1 << blevel)) & 63; | ||||
|     onbyte = pmap->buddy[blevel] + ((page_bitloc / 64) / (1 << blevel)); | ||||
|     bbitlen = size / (1 << blevel);  | ||||
| 
 | ||||
| 
 | ||||
|     //TODO clean up this part ------------------------------------------------------------- (below)
 | ||||
|     if(bbitlen <= 1) { | ||||
|       if(lshift & 1) { | ||||
|         if((*onbyte >> (lshift - 1)) & 1) { | ||||
|           *onbyte &= ~(((uint64_t)1 << (lshift - 1)) | ((uint64_t)1 << lshift)); | ||||
|           size += (1 << blevel); | ||||
|           page_bitloc -= (1 << blevel); | ||||
|           bbitlen = size / (1 << blevel);  | ||||
|         } | ||||
|       } | ||||
|       else if((*onbyte >> (lshift + 1)) & 1) { | ||||
|         *onbyte &= ~(((uint64_t)1 << (lshift + 1)) | ((uint64_t)1 << lshift)); | ||||
|         size += (1 << blevel); | ||||
|         bbitlen = size / (1 << blevel);  | ||||
|       } | ||||
|     } | ||||
|     else if(((lshift + bbitlen) & 1) && ((*onbyte >> (lshift + bbitlen)) & 1)) { | ||||
|       *onbyte ^= ((uint64_t)1 << (lshift + bbitlen)); | ||||
|       size += (1 << blevel); | ||||
|       bbitlen = size / (1 << blevel);  | ||||
|     } | ||||
|     //TODO clean up this part ------------------------------------------------------------- (above)
 | ||||
| 
 | ||||
|     if((!((size - 1) & size)) && (bbitlen != 1)) { | ||||
|       blevel = 63 - __builtin_clzl(size); | ||||
|     } | ||||
|     else { | ||||
|       if(bbitlen <= 1) {  | ||||
|         *onbyte |= ((uint64_t)1 << lshift); | ||||
|         break; | ||||
|       } else if(bbitlen & 1) { //check me
 | ||||
|         size -= (1 << blevel); | ||||
|         *onbyte |= ((uint64_t)1 << (bbitlen + lshift)); | ||||
|       } | ||||
|       blevel++; | ||||
|     } | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| void *palloc(size_t size) { | ||||
|   bool self_alloc; | ||||
|   int min_blevel, blevel; | ||||
|   uint64_t bbit, unshifted_entry, threshold, bloc; //TODO move when you've confirmed casting stuff
 | ||||
|   uint64_t buddy_i, *ret, *bentry; | ||||
|   int itercount; | ||||
|   pmap_t *pmap = first_pmap; | ||||
| 
 | ||||
| 
 | ||||
|   if(size == 0) return 0; | ||||
|   if(size & 4095) { | ||||
|     size = DIV_ROUND_UP(size, 0x1000); | ||||
|   } | ||||
|   else { | ||||
|     size = size / 0x1000; | ||||
|   } | ||||
| 
 | ||||
|   //checking if pmap has been initilized; if not we've been called to self allocate
 | ||||
|   //the first buddy should never be allocated; that's where our pmap lives
 | ||||
|   if(pmap->buddy[pmap->max_buddy][0] & 1) { | ||||
|     self_alloc = true; | ||||
|     min_blevel = pmap->max_buddy; | ||||
|   } | ||||
|   else { | ||||
|     //log(size, 2)
 | ||||
|     self_alloc = false; | ||||
|     min_blevel = 63 - __builtin_clzl(size); | ||||
|     if(size & (size - 1)) min_blevel++; | ||||
|     if(min_blevel > MAX_BUDDY_ORDER - 1) return 0; | ||||
|   } | ||||
| 
 | ||||
|   for(blevel = min_blevel; blevel < MAX_BUDDY_ORDER; blevel++) { | ||||
|     for(pmap = first_pmap; pmap != 0; pmap = pmap->next) { | ||||
|       //pmap->mutex = true;
 | ||||
|       /**
 | ||||
|       if(!maps_transversed && get_set_mutex(&pmap->mutex)) { | ||||
|         //change get_coreid once we multithread
 | ||||
|         asm("mov al, 1\n" | ||||
|             "mov cl, %0\n" | ||||
|             "shl al, cl\n" | ||||
|             "lock or [%1], al\n" | ||||
|            ::"r"(get_coreid()), "m"(pmap->threads_searched) | ||||
|            :); | ||||
|       } | ||||
|       **/ | ||||
| 
 | ||||
|       for(buddy_i = 0; buddy_i < pmap->bsize[blevel]; buddy_i++) { | ||||
|         if(pmap->buddy[blevel][buddy_i] > (uint64_t)0) { //found buddy
 | ||||
|           bentry = &pmap->buddy[blevel][buddy_i]; | ||||
|           bbit = __builtin_ctzl(*bentry); | ||||
|           bloc = bbit; | ||||
| 
 | ||||
|           *bentry ^= (uint64_t)1 << bbit; | ||||
|            | ||||
|           ret = (((buddy_i * 64) + bbit) * (0x1000 << blevel)) + (void *)pmap; | ||||
| 
 | ||||
|           threshold = 0b11; | ||||
|            | ||||
|           itercount = 1; | ||||
|           for(blevel--; blevel >= 0; blevel--) { | ||||
|             bentry = pmap->buddy[blevel] + ((bentry - pmap->buddy[blevel + 1]) * 2); | ||||
|             itercount++; | ||||
|             if(bloc >= 32) bentry += 1; | ||||
|             bloc = (bloc * 2) & 63; // will be the amount we need to shift
 | ||||
|             bbit = ceil((float)size / (1 << blevel)); | ||||
|              | ||||
| 
 | ||||
|             unshifted_entry = ((uint64_t)1 << bbit) & threshold; | ||||
|             if(unshifted_entry) { | ||||
|               threshold = ((uint64_t)1 << (bbit * 2)) - 1; | ||||
|             } | ||||
|             else { | ||||
|               threshold = (threshold << 2) | threshold;  | ||||
|             } | ||||
|             *bentry |= (unshifted_entry << bloc); | ||||
|           } | ||||
|           if(!self_alloc) bzero(ret, size * 0x1000); //TODO do we really need to bezero here?
 | ||||
|           return ret; | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|   return 0; | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| //returns size of pages needed
 | ||||
| size_t map_complete_physical() { | ||||
|   uint64_t total_mem; | ||||
|   unsigned int pdpe_cnt, pde_cnt, pde_max_i; | ||||
|   int zone_i, entry_i; | ||||
|   struct memory_table *zones = (void *)ZONE_MAP_PLOC; | ||||
|      | ||||
| 
 | ||||
|   page_table *pml4 = (page_table *)PAGEMAP_LOCATION; | ||||
|   page_table *pdpe = (page_table *)&_kernel_shared_zone_begin; | ||||
|   page_table *pde; | ||||
| 
 | ||||
|   for(zone_i = 0; zones[zone_i].length > 0; zone_i++); | ||||
|   total_mem = (uint64_t)zones[zone_i - 1].base + zones[zone_i - 1].length; | ||||
|    | ||||
|   pdpe_cnt = (total_mem + (0x40000000 - 1)) / 0x40000000; | ||||
| 
 | ||||
|    | ||||
|   entry_i = (PA_OFFSET >> 39) & 0x1ff; | ||||
|   pml4[entry_i].base_ptr = (uintptr_t)&_kernel_shared_zone_begin >> 12; | ||||
|   pml4[entry_i].read_write = 1; | ||||
|   pml4[entry_i].user = 0; | ||||
|   pml4[entry_i].size = 0; | ||||
|   pml4[entry_i].no_exec = 1; | ||||
|   pml4[entry_i].present = 1; | ||||
| 
 | ||||
|   if(huge_page_capable) { | ||||
|     for(int pdpe_i = 0; pdpe_i < pdpe_cnt; pdpe_i++) { | ||||
|       pdpe[pdpe_i].base_ptr = pdpe_i << 18;  | ||||
|       pdpe[pdpe_i].read_write = 1; | ||||
|       pdpe[pdpe_i].user = 0; | ||||
|       pdpe[pdpe_i].size = 1; | ||||
|       pdpe[pdpe_i].no_exec = NX_capable; | ||||
|       pdpe[pdpe_i].present = 1; | ||||
|     } | ||||
|     return pdpe_cnt * 0x1000;  | ||||
|   } | ||||
|   else { | ||||
|     pde_cnt = (total_mem + 0x100000) / 0x200000; | ||||
|     for(int pdpe_i = 0; pdpe_i < pdpe_cnt; pdpe_i++) { | ||||
|       pde = (page_table *)(&_kernel_shared_zone_begin + (pdpe_cnt * 0x1000) + (pdpe_i * 0x1000)); | ||||
| 
 | ||||
|       if((pdpe_i < pdpe_cnt - 1) || (!(pde_cnt & 511))) { | ||||
|         pde_max_i = 512; | ||||
|       } | ||||
|       else { | ||||
|         pde_max_i = pde_cnt & 511; | ||||
|       } | ||||
|        | ||||
|       pdpe[pdpe_i].base_ptr = (uintptr_t)pde >> 12; | ||||
|       pdpe[pdpe_i].read_write = 1; | ||||
|       pdpe[pdpe_i].user = 0; | ||||
|       pdpe[pdpe_i].size = 0; | ||||
|       pdpe[pdpe_i].no_exec = NX_capable; | ||||
|       pdpe[pdpe_i].present = 1; | ||||
| 
 | ||||
|       for(int pde_i = 0; pde_i < pde_max_i; pde_i++) { | ||||
|         pde[pde_i].base_ptr = ((pdpe_i << 9) + pde_i) << 9; | ||||
|         pde[pde_i].read_write = 1; | ||||
|         pde[pde_i].user = 0; | ||||
|         pde[pde_i].size = 1; | ||||
|         pde[pde_i].no_exec = NX_capable; | ||||
|         pde[pde_i].present = 1; | ||||
|       } | ||||
|     } | ||||
|     return (pdpe_cnt * 2) * 0x1000; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| pmap_t *init_pmap(size_t pagetable_size) { | ||||
|   pmap_t *pmap, *last_pmap;  | ||||
|   struct memory_table *zones = (void *)ZONE_MAP; | ||||
|   int budorder, zone_i;  | ||||
|   uint64_t pmap_size, pmap_bbitsize, zone_size; | ||||
|   bool first_pmap_i = true; | ||||
| 
 | ||||
|    | ||||
| 
 | ||||
|   for(zone_i = 0; zones[zone_i].length > 0; zone_i++) { | ||||
|     if((zones[zone_i].type == MEM_AVAILABLE) && (zones[zone_i].ACPI & 1) && | ||||
|         zones[zone_i].length >= (0x2000)) {  | ||||
|       printf("found allocatable map at %p\n", zones[zone_i].base); | ||||
|       last_pmap = pmap; | ||||
|       if(zones[zone_i].base == (void *)0x100000) { | ||||
|         zone_size = zones[zone_i].length - (((uint64_t)&_kernel_shared_zone_begin - 0x100000) + pagetable_size); | ||||
|         pmap = PHYS_TO_VIRT((void *)&_kernel_shared_zone_begin + pagetable_size);  | ||||
|       }  | ||||
|       else { | ||||
|         zone_size = zones[zone_i].length; | ||||
|         pmap = PHYS_TO_VIRT(zones[zone_i].base); | ||||
|       } | ||||
|       if(first_pmap_i) { | ||||
|         pmap->next = NULL; | ||||
|         first_pmap_i = false; | ||||
|       }  | ||||
|       else { | ||||
|         pmap->next = last_pmap; | ||||
|       } | ||||
| 
 | ||||
|       pmap->mutex = false; | ||||
| 
 | ||||
|       for(budorder = 0; budorder < MAX_BUDDY_ORDER; budorder++) { | ||||
|         pmap_bbitsize = zone_size / (0x1000 << budorder); | ||||
|         pmap->bsize[budorder] = DIV_ROUND_UP(pmap_bbitsize , 64); | ||||
|         if(budorder) { | ||||
|           pmap->buddy[budorder] = pmap->buddy[budorder - 1] + pmap->bsize[budorder - 1]; | ||||
|         } | ||||
|         else { | ||||
|           pmap->buddy[0] = (void *)pmap + sizeof(*pmap); | ||||
|         } | ||||
|         if(budorder < MAX_BUDDY_ORDER - 1) { | ||||
|           bzero(pmap->buddy[budorder], pmap->bsize[budorder] * 8); | ||||
|           if(pmap_bbitsize & 1) { | ||||
|             pmap->buddy[budorder][pmap->bsize[budorder] - 1] = | ||||
|               ((uint64_t)1 << ((pmap_bbitsize - 1) & 63)); | ||||
|           } | ||||
|           if(pmap_bbitsize == 1) { | ||||
|             pmap->max_buddy = budorder; | ||||
|             for(budorder++; budorder < MAX_BUDDY_ORDER; budorder++) { | ||||
|               pmap->buddy[budorder] = 0; | ||||
|               pmap->bsize[budorder] = 0; | ||||
|             } | ||||
|             break; | ||||
|           } | ||||
|         } | ||||
|         else { | ||||
|           pmap->max_buddy = MAX_BUDDY_ORDER - 1; | ||||
|           memset(pmap->buddy[budorder], UINT8_MAX, pmap->bsize[budorder] * 8); | ||||
|           if((pmap_bbitsize / 64) != (pmap->bsize[budorder])) { | ||||
|             pmap->buddy[budorder][pmap->bsize[budorder] - 1] = | ||||
|               (((uint64_t)1 << (pmap_bbitsize & 63)) - 1); | ||||
|           } | ||||
|         } | ||||
|       } | ||||
|       | ||||
|       pmap_size = (uint64_t)(pmap->buddy[pmap->max_buddy] + pmap->bsize[pmap->max_buddy]) - (uint64_t)pmap; | ||||
|       first_pmap = pmap; //we spoof palloc into allocating from the specific required pmap.
 | ||||
|       palloc(pmap_size);  | ||||
|     } | ||||
|   } | ||||
|   return pmap; | ||||
| } | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 Brett Weiland
						Brett Weiland