summaryrefslogtreecommitdiff
path: root/src/include/paging.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/include/paging.h')
-rw-r--r--src/include/paging.h39
1 files changed, 32 insertions, 7 deletions
diff --git a/src/include/paging.h b/src/include/paging.h
index 267da43..3d31ef2 100644
--- a/src/include/paging.h
+++ b/src/include/paging.h
@@ -45,19 +45,38 @@ typedef struct __attribute__((packed)) {
#define PAGE_SIZE_2M 21
#define PAGE_SIZE_1G 30
+#define MAX_BUDDY_ORDER 8
+
+#define MAX_ZONE_CNT 16 //should cover all cases
+
struct memory_table {
- uint64_t base;
+ void *base;
uint64_t length;
uint32_t type;
uint32_t ACPI;
} __attribute__((packed));
+/**
+ * the bsizes are there so we don't have to calculate it every time.
+ *
+ * Keep in mind that at if the allocator reaches the last buddy,
+ * it _will_ have to calculate the bitwidth, even if it's a multiple of 64.
+ * This scenario hopefully won't happen much during time sensitive areas,
+ * and (I think) linux calculates the buddy size every single time anyway.
+**/
struct phys_map {
- uint64_t map_size; //this way we won't have to calculate it every time
- void *chunk_start;
- uint64_t chunk_size;
- uint64_t *buddies;
-} __attribute__((packed));
+ struct phys_map *next;
+ void *zone_paddr;
+ uint64_t extra_bits;
+ uint64_t *buddy[MAX_BUDDY_ORDER];
+};
+
+//clean up doing extra work some other time
+#define LSIZE_FROM_BITLEN(bitlen) (((bitlen) + 63) / 64)
+#define BITLEN_FROM_LSIZE(lsize) ((lsize) * 64)
+#define GET_BUDDY_BITLEN(zone_len, order) ((zone_len) / (0x1000 << (order)))
+#define GET_ORDER_CHUNKSIZE(order) (0x1000 << ((order)))
+
extern void* _meminfo_loc;
@@ -66,6 +85,12 @@ extern void* _stage2_pagetable;
bool map_page(void *virtual_addr, void *physical_addr, uint8_t PAGE_SIZE);
void debug_print_memory();
void create_pagetable_stage2(uint64_t free_mem);
-void init_memory();
+void init_memory(); //TODO removeme
+void init_pmap();
+void *palloc();
+void *pfree();
+void debug_pmap();
+
+
#endif