summaryrefslogtreecommitdiff
path: root/src/include/paging.h
blob: 3d31ef27cec35acdfc60cfb0f8d2157285d1365b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
#include <stdbool.h>
//paging errors
#define PAGEMAP_LOCATION 0x4000
#ifndef _PAGE_H_
#define _PAGE_H_ 
#include <stdint.h>

#define PAGE_VIRT_UNALIGNED 0x01 
#define PAGE_PHYS_UNALIGNED 0x02
#define PAGE_PHYS_INVALID   0x03
#define PAGE_VIRT_INVALID   0x04

//*
typedef struct __attribute__((packed)) {
  unsigned int present : 1;            // present, must be one when accessed.
  unsigned int read_write : 1;         // if set to one, read and write is set
  unsigned int user : 1;               // another bit we'll never use, for seperating CPL 0-2 and 3+
  unsigned int writethrough_cache : 1; // honestly maybe I should look into caching
  unsigned int cachable : 1;           // hardware chaching. 0 is enabled, whats the worst that could happen? 
  unsigned int accessed : 1;           // we'll never use any of these!
  unsigned int zg0 : 1;                // needs to be (and will be) zero'd
  unsigned int size : 1;
  unsigned int zg1 : 1;                // needs to be (and will be) zero'd
  unsigned int software_marks : 3;     // available for our own use, I doubt we'll use it in such a simple thing
  
  uintptr_t base_ptr : 40;
  unsigned int sign:11;
} page_entry;


typedef struct __attribute__((packed)) {
  page_entry pml4e[512];
  page_entry pdpe[512];
  page_entry pde[512];
  page_entry pte[512];
} page_table;

#define MEM_AVAILABLE             1
#define MEM_RESERVED              2
#define MEM_APCI_RECLAIMABLE      3
#define MEM_APCI_NVS              4
#define MEM_BAD                   5

#define PAGE_SIZE_4K              12
#define PAGE_SIZE_2M              21
#define PAGE_SIZE_1G              30

#define MAX_BUDDY_ORDER           8

#define MAX_ZONE_CNT              16 //should cover all cases

struct memory_table {
  void *base;
  uint64_t length;
  uint32_t type;
  uint32_t ACPI; 
} __attribute__((packed));

/**
 * the bsizes are there so we don't have to calculate it every time.
 *
 * Keep in mind that at if the allocator reaches the last buddy, 
 * it _will_ have to calculate the bitwidth, even if it's a multiple of 64.
 * This scenario hopefully won't happen much during time sensitive areas, 
 * and (I think) linux calculates the buddy size every single time anyway.
**/
struct phys_map {
  struct phys_map *next;
  void *zone_paddr;
  uint64_t extra_bits;
  uint64_t *buddy[MAX_BUDDY_ORDER];
};

//clean up doing extra work some other time
#define LSIZE_FROM_BITLEN(bitlen) (((bitlen) + 63) / 64)
#define BITLEN_FROM_LSIZE(lsize) ((lsize) * 64)
#define GET_BUDDY_BITLEN(zone_len, order) ((zone_len) / (0x1000 << (order)))
#define GET_ORDER_CHUNKSIZE(order) (0x1000 << ((order)))



extern void* _meminfo_loc;
extern void* _stage2_pagetable;

bool map_page(void *virtual_addr, void *physical_addr, uint8_t PAGE_SIZE);
void debug_print_memory();
void create_pagetable_stage2(uint64_t free_mem);
void init_memory(); //TODO removeme
void init_pmap();
void *palloc();
void *pfree();
void debug_pmap();


#endif