palloc/pfree found smp safe

This commit is contained in:
Brett Weiland 2021-09-24 14:20:20 -05:00
parent cf7cd8be60
commit b62706c9f2
12 changed files with 148 additions and 33 deletions

View File

@ -20,13 +20,21 @@ With all that out of the way, check out the features.
This project will likely go very slow and take multiple years, as I'm dealing with things such as college and adulting. If there are blatantly obvious bugs, or things look unfinished, I'm still working on them.
Additionally, there's some pretty messy code. I've improved while working on this project, so expect those things to be changed some time down the line.
# Just completed
1. palloc()/pfree() is now smp safe
# What I'm working on now
1. Ensuring palloc() and malloc() are smp safe (current)
1. Ensuring malloc()/free() is smp safe (current)
2. Creating kernel space threads with primitive scheduler
3. VFS filesystem
4. ACPI, PCI
5. ext2 filesystem
6. Userspace processes!!!
7. IO buffering, process waitlists
8. Improve scheduler
8. Implement permanent scheduler
9. Let's not get too ahead of ourselves...
# Backlog
## These are things I want to evencually fix, but don't effect current development
1. Making the bootloader pass kernel command paramaters from fat filesystem. Bootloader has no filesystem and is part of kernel, this will be hard
2. Making mutexes more space efficent; right now we use a 16 bit variable for every boolean operation

View File

@ -3,7 +3,6 @@ symbol-file debug/debug_syms.o
set scheduler-locking step
hb page.c:357
define cs2bs
print (1 << (5 + $arg0))

View File

@ -53,6 +53,7 @@ void panic(int reason, void *frame_p, struct registers *regs);
#define KERNEL_PANIC_INVALID_IOAPIC_VEC 36
#define KERNEL_PANIC_HPET_REQUIRED 37
#define KERNEL_PANIC_SMP_FAILED 38
#define KERNEL_PANIC_PALLOC_TOO_LARGE 39
#define DEV_EMAIL "brett_weiland@bpcspace.com"

View File

@ -7,6 +7,7 @@
void smp_prepare();
extern uint8_t corecount;
/**
static inline void lock(uint8_t *lock) {
asm("mov al, 1\n"
"spinlock:\n"
@ -16,30 +17,45 @@ static inline void lock(uint8_t *lock) {
"jnz spinlock\n"
::"r"(lock):"al");
}
**/
static inline void lock(uint16_t *mutex) {
asm(".spinlock_%=:\n"
"lock bts %0, 0\n"
"jnc .done_%=\n"
"pause\n"
"jmp .spinlock_%=\n"
".done_%=:\n"
::"m"(*mutex));
}
static inline void unlock(uint16_t *mutex) {
asm("lock btr %0, 0\n"
::"m"(*mutex));
}
/**
static inline void unlock(uint8_t *lock) {
asm("lock andb [%0], 0"::"r"(lock));
}
**/
static inline bool get_set_mutex(uint16_t *mutex) {
bool ret;
asm("lock bts %1, 0\n"
"jc .mutex_taken\n"
"jc .mutex_taken_%=\n"
"mov %0, 0\n"
"jmp .done\n"
".mutex_taken:\n"
"jmp .done_%=\n"
".mutex_taken_%=:\n"
"mov %0, 1\n"
".done:\n"
".done_%=:\n"
:"=r"(ret)
:"m"(*mutex));
return ret;
}
//THIS IS ONLY UNTIL WE GET MULTITHREADING SET UP
uint8_t get_coreid();
#define CREATE_LOTO(name)
#endif

View File

@ -1,6 +1,6 @@
#ifndef testmalloc_header
#define testmalloc_header
void test_malloc(unsigned int cnt);
void racetest();
#endif

Binary file not shown.

View File

@ -23,7 +23,7 @@
void kmain() {
printf("Kernal started on core %i\n", get_coreid());
sync_malloc();
racetest();
PANIC(KERNEL_PANIC_KERNEL_RETURNED);
}
@ -49,7 +49,6 @@ void kernel_init() {
init_klog();
init_pmap(pmap_size);
printf("\nKernal started on core 1!\n");
//test_malloc(100);
find_root_sdp();
@ -66,6 +65,9 @@ void kernel_init() {
//the rest of this needs to get done before the cores start executing
init_pmap_smp();
smp_unlocked = true;
fix_stack();

View File

@ -100,7 +100,6 @@ void fix_stack() {
:"r"(PA_OFFSET));
while(frame->next != 0) {
printf("%p\n", frame->function_base);
frame->next = PHYS_TO_VIRT((void *)frame->next);
frame = frame->next;
}
@ -171,14 +170,13 @@ void debug_pmap() {
}
}
//TODO I know you don't want to, but you need to thoroughly check this.
void pfree(void *addr, size_t size) {
int blevel = 0;
uint64_t *onbyte;
uint64_t page_bitloc;
int bbitlen;
int lshift;
pmap_t *pmap = first_pmap;
pmap_t *pmap;
/* note: there's no security check to see if the page is actually allocated,
* or if we are freeing the table itself.
@ -192,10 +190,13 @@ void pfree(void *addr, size_t size) {
return;
}
size /= 0x1000;
for(; pmap != 0; pmap = pmap->next) {
for(pmap = first_pmap; pmap; pmap = pmap->next) {
page_bitloc = (addr - (void *)pmap) / 0x1000;
onbyte = pmap->buddy[0] + (page_bitloc / 64);
if((addr >= (void *)pmap) && onbyte < pmap->buddy[1]) break;
if((addr >= (void *)pmap) && onbyte < pmap->buddy[1]) {
lock(&pmap->mutex);
break;
}
}
while(blevel < MAX_BUDDY_ORDER) {
@ -241,6 +242,7 @@ void pfree(void *addr, size_t size) {
blevel++;
}
}
unlock(&pmap->mutex);
}
@ -276,13 +278,10 @@ void *palloc(size_t size) {
self_alloc = false;
min_blevel = 63 - __builtin_clzl(size);
if(size & (size - 1)) min_blevel++;
if(min_blevel > MAX_BUDDY_ORDER - 1) return 0;
if(min_blevel > MAX_BUDDY_ORDER - 1) PANIC(KERNEL_PANIC_PALLOC_TOO_LARGE);
}
for(blevel = min_blevel; blevel < MAX_BUDDY_ORDER; blevel++) {
//for(pmap = first_pmap; pmap != 0; pmap = pmap->next) {
//while(!unlocked_pmaps_searched ||
// is_empty(waiting_pmaps[core_id], sizeof(&pmap) * pmap_count)) {
pmap = first_pmap;
while(pmap) {
@ -329,12 +328,13 @@ void *palloc(size_t size) {
return ret;
}
}
get_next_pmap:
pmap->mutex = 0;
get_next_pmap:
if(unlocked_pmaps_searched) {
pmap = 0;
for(searchingp_i = waitingp_i + 1; searchingp_i < pmap_count; searchingp_i++) {
if(waiting_pmaps[waitlist_i(searchingp_i)]) {
waitingp_i = searchingp_i;
pmap = waiting_pmaps[waitlist_i(searchingp_i)];
break;
}
@ -342,6 +342,7 @@ get_next_pmap:
if(!pmap) {
for(searchingp_i = 0; searchingp_i <= waitingp_i; searchingp_i++) {
if(waiting_pmaps[waitlist_i(searchingp_i)]) {
waitingp_i = searchingp_i;
pmap = waiting_pmaps[waitlist_i(searchingp_i)];
break;
}
@ -350,6 +351,7 @@ get_next_pmap:
}
else {
if(!pmap->next) {
waitingp_i = 0;
pmap = waiting_pmaps ? waiting_pmaps[waitlist_i(0)] : 0;
unlocked_pmaps_searched = true;
}
@ -515,3 +517,18 @@ void *init_pmap(size_t pagetable_size) {
}
return pmap;
}
void lock_all_maps() {
pmap_t *pmap;
for(pmap = first_pmap; pmap; pmap = pmap->next) pmap->mutex = 1;
pmap = first_pmap;
pmap_count++;
for(pmap = first_pmap;;pmap = pmap->next) {
if(!pmap->next) {
pmap->next = malloc(sizeof(pmap_t));
pmap->next->mutex = 1;
break;
}
}
first_pmap->mutex = 0;
}

View File

@ -6,7 +6,7 @@
#include <isv.h>
#include <smp.h>
static uint8_t panic_lock = 0;
static uint16_t panic_lock = 0;
void panic(int reason, void *eframe_p, struct registers *regs) { // will fill with debugging info latter
lock(&panic_lock);
@ -56,6 +56,9 @@ void panic(int reason, void *eframe_p, struct registers *regs) { // will fill wi
case KERNEL_PANIC_SMP_FAILED:
printf("\nNot all cores booted successfully (see text before panic).\n");
break;
case KERNEL_PANIC_PALLOC_TOO_LARGE:
printf("\npalloc was called with a size greater then supported.\n");
break;
default:
printf("\nUnknown panic code %i\n.", reason);
break;

View File

@ -873,7 +873,7 @@ static int _vsnprintf(out_fct_type out, char* buffer, const size_t maxlen, const
///////////////////////////////////////////////////////////////////////////////
static uint8_t printf_lock = 0;
static uint16_t printf_lock = 0;
int printf_(const char* format, ...)
{
//BRETT modification

View File

@ -101,6 +101,7 @@ void smp_prepare() {
if(cores.apic_id[core_i] == cores.bsp) continue;
stackarray[stack_i].apic_id = cores.apic_id[core_i];
stackarray[stack_i].stack = palloc(0x1000);
printf("core %i's stack: %lx\n", core_i, stackarray[stack_i].stack);
stackarray[stack_i].secondary_bsp = (stack_i)? false : true;
stack_i++;

View File

@ -1,3 +1,6 @@
//Disregard bad code here.
//I'm going to delete this whole file once I am confident smp is safe.
#define CHUNK_SIZE_FROM_INDEX(i) ((1 << ((i) + 5)))
#include <printf.h>
@ -9,7 +12,7 @@
//will delete later
static uint8_t lockeroni = 0;
static uint16_t lockeroni = 0;
void test_malloc(unsigned int cnt) {
void *testchunks[cnt];
@ -39,14 +42,79 @@ void test_malloc(unsigned int cnt) {
printf("____________________________________\n");
unlock(&lockeroni);
}
uint8_t cores_waiting = 4;
void sync_malloc() {
void *mtest;
#define DEBUG_CORE_CNT 2
uint8_t cores_waiting = DEBUG_CORE_CNT;
uint8_t cores_waiting_2 = DEBUG_CORE_CNT;
uint8_t cores_waiting_3 = DEBUG_CORE_CNT;
uint8_t cores_waiting_4 = DEBUG_CORE_CNT;
uint8_t cores_waiting_5 = DEBUG_CORE_CNT;
void *smp_outputs[DEBUG_CORE_CNT];
void racetest() {
uint8_t core_id = get_coreid();
uint8_t c_check;
unsigned int core_i;
asm("lock decb [%0]\n"
"spinlock:\n"
"spinlock_%=:\n"
"cmpb [%0], 0\n"
"jnz spinlock\n"
"jnz spinlock_%=\n"
::"m"(cores_waiting));
mtest = palloc(0x1000);
printf("Make sure none of these match -> %lx\n", mtest);
smp_outputs[core_id] = palloc(0x1000);
printf("Make sure none of these match (palloc) -> %lx\n", smp_outputs[core_id]);
free(smp_outputs[core_id]);
asm("lock decb [%0]\n"
"spinlock_%=:\n"
"cmpb [%0], 0\n"
"jnz spinlock_%=\n"
::"m"(cores_waiting_2));
if(core_id == 0) {
for(core_i = 0; core_i < DEBUG_CORE_CNT; core_i++) {
for(c_check = core_i + 1; c_check < DEBUG_CORE_CNT; c_check++) {
if(smp_outputs[core_i] == smp_outputs[c_check]) {
printf("TEST FAILED\n");
for(;;);
}
}
}
printf("TEST PASSED\n");
printf("malloc beforehand: \n");
debug_heap();
}
asm("lock decb [%0]\n"
"spinlock_%=:\n"
"cmpb [%0], 0\n"
"jnz spinlock_%=\n"
::"m"(cores_waiting_3));
smp_outputs[core_id] = malloc(1);
printf("Make sure none of these match (malloc) -> %lx\n", smp_outputs[core_id]);
asm("lock decb [%0]\n"
"spinlock_%=:\n"
"cmpb [%0], 0\n"
"jnz spinlock_%=\n"
::"m"(cores_waiting_4));
if(core_id == 0) {
for(core_i = 0; core_i < DEBUG_CORE_CNT; core_i++) {
for(c_check = core_i + 1; c_check < DEBUG_CORE_CNT; c_check++) {
if(smp_outputs[core_i] == smp_outputs[c_check]) {
printf("TEST FAILED\n");
for(;;);
}
}
}
printf("TEST PASSED\n");
printf("malloc afterhand: \n");
debug_heap();
}
}