summaryrefslogtreecommitdiff
path: root/src/backup
blob: 91913046e2411738822384ec7d09d79fecc38a72 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
#include <printf.h>
#include <paging.h>
#include <stdint.h>
#include <libc.h> 
#include <limits.h>
#include <kernel.h>

void debug_print_memory() {
  struct memory_table *memtable = (struct memory_table *)&_meminfo_loc; 
  printf(" __________________________________________________________________________\n");
  printf("| type\tstart\t\t\tend\t\t\tsize\t\t   |\n");
  printf("|--------------------------------------------------------------------------|\n");
  for(unsigned int i = 0; memtable[i].length > 0; i++) {
    printf("| %u %u\t0x%p\t0x%p\t0x%p |\n", memtable[i].type, memtable[i].ACPI, memtable[i].base, (memtable[i].base + memtable[i].length), memtable[i].length);
  }
  printf("----------------------------------------------------------------------------\n");
}

void debug_pmap() {
  struct phys_map* pmap;
  int pmap_i = 0, order;
  uint64_t buddy_size, blong_i, bbit_i, buddy_chunksize, omit_cnt;
  printf("Maximum buddy order: %u (up to %#x sized chunks)\n", MAX_BUDDY_ORDER, (0x1000 << MAX_BUDDY_ORDER - 1));
  for(pmap = (struct phys_map*)&_stage2_pagetable; pmap != 0; pmap = pmap->next) {
    printf("Table %u:\n"
        "\tPhysical Start:\t%#p\n"
        "\tTable location:\t%#p\n", pmap_i, pmap->zone_paddr, pmap);
    for(order = 0; order <= MAX_BUDDY_ORDER - 1; order++) {
      buddy_chunksize = (0x1000 << order); //TODO just put it in the for loop
      buddy_size = (((order == MAX_BUDDY_ORDER - 1) 
          ? (uint64_t *)pmap->next : pmap->buddy[order + 1]) - pmap->buddy[order]);
      printf("\tbuddy[%u]:\n"
          "\t\tAddress:\t%#x\n"
          "\t\tSize:\t\t%u\n"
          "\t\tBuddies:\t\t\n", order, pmap->buddy[order], buddy_size);

      omit_cnt = 0;

      for(blong_i = 0; blong_i < buddy_size; blong_i++) {
        for(bbit_i = 0; bbit_i < 64; bbit_i++) {
          if(*(pmap->buddy[order] + blong_i) & ((uint64_t)1 << bbit_i)) {
            if((omit_cnt < 20) || (blong_i == buddy_size - 1)) {
              printf("address %#x\tbit %u: %x\t is free\n",
                pmap->buddy[order] + blong_i, bbit_i, pmap->zone_paddr + ((blong_i * 64) + bbit_i) * buddy_chunksize);
            }
            omit_cnt++;
            if(omit_cnt == 20) {
              printf("\t\t\t[more entries ommited]\n");
            }
          }
        }
      }
print_next_buddy: ;
    }
    pmap_i++;
  }
}

/*
 * part 1: 
 *  init tables (complete)
 *  
 * part 2: setting the actual entires
 *  if entry contains table, set 0, set threshold
 *  else:
 *    if entry:  
 */

// init_memory revision
// rules: 
 /**
void init_pmap() {
  struct memory_table *memtable = (struct memory_table *)&_meminfo_loc;
  struct phys_map *pmap = (struct phys_map*)&_stage2_pagetable;
  struct phys_map *last_pmap;
  unsigned int i, x;
  uint64_t budentry_len, pmap_chunksize, total_pmap_len = 0;
  void *y;
  void *paged_mem = &_stage2_pagetable + 0x200000;
  map_page(&_stage2_pagetable, &_stage2_pagetable, PAGE_SIZE_2M);
  for(i = 0; memtable[i].length > 0; i++) {
    if((memtable[i].type == MEM_AVAILABLE) && (memtable[i].ACPI & 1)) {
      total_pmap_len += 88; //it'd be nice to find a cleaner way

      //make sure we don't overwrite what we have so far of the kernel
      if((memtable[i].base <= (void*)&_stage2_pagetable) && (memtable[i].base + memtable[i].length >= (void *)&_stage2_pagetable)) { 
        pmap->chunk_start = &_stage2_pagetable;
        pmap->chunk_size = memtable[i].length - (pmap->chunk_start - memtable[i].base);
      }
      else {
        pmap->chunk_start = memtable[i].base;
        pmap->chunk_size = memtable[i].length;
      }

      for(x = 0; x < 8; x++) {
        pmap->bsize[x] = ceil((pmap->chunk_size / (0x1000 * (1 << x))) / (double)64);
        total_pmap_len += pmap->bsize[x] * 8;
      }

      pmap->next = (void*)&_stage2_pagetable + total_pmap_len;

      while((void*)pmap->next + sizeof(struct phys_map) >= paged_mem) {
        //do check here if nessesary
        map_page(paged_mem, paged_mem, PAGE_SIZE_2M);
        paged_mem += 0x200000;
      }

      last_pmap = pmap;
      pmap = pmap->next;
    }
  }
  last_pmap->next = 0; //I wonder if there's a better way to do this
  pmap = (struct phys_map*)&_stage2_pagetable;

}
**/

void init_pmap() {
  struct memory_table *zones = (struct memory_table *)&_meminfo_loc;
  struct phys_map *pmap = (struct phys_map*)&_stage2_pagetable;
  struct phys_map *last_pmap = (struct phys_map*)&_stage2_pagetable;

  unsigned int zone_i, pmap_i = 0;
  int budorder; 

  //we keep this info out of the struct because we won't need it after setting up
  uint64_t zone_len[MAX_ZONE_CNT], buddy_bitlen[MAX_ZONE_CNT][MAX_BUDDY_ORDER]; 
  uint64_t *buddy_end;

  uint64_t pmap_size, pmap_bbitsize, pmap_blongsize, buddy_size, pmap_bit;
  uint64_t threshold_longsize = 0, threshold_bit, threshold_end, threshold_bitsize;
  uint64_t deleteme_last_p_bits;

  
  void *paged_mem = (void *)&_stage2_pagetable + 0x200000;
  map_page(&_stage2_pagetable, &_stage2_pagetable, PAGE_SIZE_2M);

  for(zone_i = 0; zones[zone_i].length > 0; zone_i++) {

    if((zones[zone_i].type == MEM_AVAILABLE) && (zones[zone_i].ACPI & 1)) {

      //hopefully this should never happen... but x86 is routy.
      //I should dig into the docs to check before removing this.
      //We also could forget about MAX_ZONE_CNT if we did.
      
      if(zone_i >= MAX_ZONE_CNT) {
        printf("Only %u zones can be used! Modify MAX_ZONE_CNT in paging.h to use all memory.\n", MAX_ZONE_CNT);
        break;
      }


      if((zones[zone_i].base <= (void*)&_stage2_pagetable) && 
          (zones[zone_i].base + zones[zone_i].length >= (void *)&_stage2_pagetable)) { 
        pmap->zone_paddr = &_stage2_pagetable;
        zone_len[pmap_i] = zones[zone_i].length - (pmap->zone_paddr - zones[zone_i].base);
      }
      else {
        pmap->zone_paddr = zones[zone_i].base;
        zone_len[pmap_i] = zones[zone_i].length;
      }

      pmap->buddy[0] = (void *)pmap + sizeof(*pmap);

      for(budorder = 1; budorder < MAX_BUDDY_ORDER; budorder++) {
        buddy_bitlen[pmap_i][budorder - 1] = GET_BUDDY_BITLEN(zone_len[pmap_i], budorder - 1);
        pmap->buddy[budorder] = (uint64_t *)pmap->buddy[budorder - 1] + 
          LSIZE_FROM_BITLEN(buddy_bitlen[pmap_i][budorder - 1]);
      } 

      buddy_bitlen[pmap_i][MAX_BUDDY_ORDER - 1] = GET_BUDDY_BITLEN(zone_len[pmap_i], MAX_BUDDY_ORDER - 1);
      pmap->next = (void *)pmap->buddy[MAX_BUDDY_ORDER - 1] + 
        (LSIZE_FROM_BITLEN(buddy_bitlen[pmap_i][MAX_BUDDY_ORDER - 1]) * 8);

      pmap = pmap->next;
      pmap_i++;
      last_pmap = pmap;

      //allocates by an extra sizeof(struct phys_map),
      //but were about to discard anyway
      while((void *)pmap + sizeof(*pmap) >= paged_mem) {
        map_page(paged_mem, paged_mem, PAGE_SIZE_2M);
        paged_mem += 0x200000;
      }

    }
  }
  pmap_size = (void*)(pmap) - (void*)&_stage2_pagetable;
  if(pmap_size >= zone_len[0]) panic(); //TODO debugging

  pmap = (struct phys_map*)&_stage2_pagetable;

  // Honestly, this section makes me feel like Yandere Dev. It's ugly.
  // I've been rewriting this function forever, so I'm deciding to slam it out start to finish.
  // I know there's a lot of repeated if statements. I know it hurts.
  // But fear not. 
  // I'll come back and rewrite this part when I've gotten a break from memory management.
  for(pmap = (struct phys_map*)&_stage2_pagetable; pmap != 0; pmap++) {
    for(budorder = MAX_BUDDY_ORDER - 1; budorder >= 0; budorder--) {
      pmap_bbitsize = ceil((float)pmap_size / ((uint64_t)0x1000 << budorder));
      pmap_blongsize = pmap_bbitsize / 64;

      if(budorder == MAX_BUDDY_ORDER - 1) {
        buddy_end = (uint64_t *)pmap->next - 1;
        threshold_bitsize = ((pmap_blongsize * 64) + pmap_bbitsize) * 2;
        threshold_bitsize = UINT64_MAX;
      }
      else {
        buddy_end = pmap->buddy[budorder + 1] - 1;
        threshold_longsize = threshold_bitsize / 64;
        threshold_end = threshold_longsize + 1;
      }
      pmap_bit = pmap_bbitsize & 63;
      buddy_size = buddy_end - pmap->buddy[budorder];


      if(pmap_bbitsize >= BITLEN_FROM_LSIZE(buddy_size)) {
        //is this nessesary?
        bzero(pmap->buddy[budorder], buddy_size * 8);
      }
      else {
        if(budorder == MAX_BUDDY_ORDER - 1) {
          if(pmap_blongsize) bzero(pmap->buddy[budorder], (pmap_blongsize - 1) * 8);
          if(pmap_bit) {
            *(pmap->buddy[budorder] + pmap_blongsize) = ~(((uint64_t)1 << pmap_bit) - 1);
          }
          else {
            *(pmap->buddy[budorder] + pmap_blongsize) = UINT64_MAX;
          }
          if(pmap_blongsize + 1 == buddy_size) {
            //TODO why did I have this conditional? Do I need it later? Check on desktop before removing
            if(buddy_bitlen[0][budorder]) { 
              *(pmap->buddy[budorder] + pmap_blongsize) &= 
                ((uint64_t)1 << (buddy_bitlen[0][budorder] & 63)) - 1;
            }
          }
          else {
            memset(pmap->buddy[budorder] + pmap_blongsize + 1, UINT8_MAX, 
                (void *)buddy_end - (void *)pmap->buddy[budorder] - 8);
            *buddy_end = ((uint64_t)1 << (buddy_bitlen[0][budorder] & 63)) - 1;
          }
        }
        else {
          if(threshold_longsize) bzero(pmap->buddy[budorder], (threshold_longsize - 1) * 8);

          if(threshold_bitsize > pmap_bbitsize)
            *(pmap->buddy[budorder] + threshold_longsize) = ((uint64_t)1 << ((threshold_bitsize - 1) & 63));
          
          if(buddy_size - threshold_longsize) 
            bzero(pmap->buddy[budorder] + threshold_longsize + 1, buddy_size - threshold_longsize);
          *buddy_end = ((uint64_t)1 << ((buddy_bitlen[0][budorder] & 63) - 1));
        }
        threshold_bitsize = ((pmap_blongsize * 64) + pmap_bbitsize) * 2;
      }
    }
  }
//  for(pmap = pmap->next; pmap != 0; pmap++) {
//  }
}


//  for(budlong_ptr = pmap->buddy[budorder]; budlong_ptr < (uint64_t *)pmap->next - 1; budlong_ptr++) {
  
/**
//uses buddy system allocation
void init_memory() {
  struct memory_table *memtable = (struct memory_table *)&_meminfo_loc;
  struct phys_map *map = (struct phys_map*)&_stage2_pagetable;
  struct phys_map *lasttable;
  unsigned int i, buddy_chunksize; 
  uint64_t buddy_bitsize, prev_buddy_bsize, ppt_size;
  uint64_t *buddy_ptr, *budentry_end, *threshold;
  uint64_t buddy_size64;
  
  map_page((void*)&_stage2_pagetable, (void*)&_stage2_pagetable, PAGE_SIZE_2M);
  void *next_page = (void *)0x400000;
  // at this point, we are declaring our header and kernel itself as free (so don't forget to fix that!) 
  
  //TODO all these for loops are unsanitary. 
  //We hae a lot of branches in the for loops,
  //And we re-do work to allocate the page-table.
  //I'll clean this up when I'm certain my buddy system works.
  for(i = 0; memtable[i].length > 0; i++) {
    if((memtable[i].type == MEM_AVAILABLE) && (memtable[i].ACPI & 1)) {
      map->chunk_start = (void*)memtable[i].base;
      map->chunk_size = memtable[i].length;
      buddy_ptr = (void*)&map->buddies;

      for(buddy_chunksize = 0x1000; buddy_chunksize < 0x100000; buddy_chunksize *= 2) {
        buddy_bitsize = memtable[i].length / buddy_chunksize;
        buddy_size64 = ceil(buddy_bitsize / (double)64);
      
        if((void*)&buddy_ptr[buddy_size64] + 24 >= next_page) {
          map_page(next_page, next_page, PAGE_SIZE_2M);
          next_page += 0x200000;
        }

        printf("buddy\t%x\theader bitsize\t%u\theader longsize\t%u\tbuddy start\t%p",\
            buddy_chunksize, buddy_bitsize, buddy_size64, buddy_ptr);
        if(((buddy_bitsize * 2) != prev_buddy_bsize) && (buddy_chunksize != 0x1000)) {
          buddy_ptr[-1] |= ((uint64_t)1 << ((prev_buddy_bsize & 63) - 1));
          printf("\tlast:\t%lx", buddy_ptr[-1]);
        }
        printf("\n");

        if(buddy_chunksize < 0x80000) {
          bzero(buddy_ptr, buddy_size64 * 8);
          prev_buddy_bsize = buddy_bitsize;
        }

        else if(buddy_size64 % buddy_bitsize) {
          memset(buddy_ptr, 0xff, (buddy_size64 - 1) * 8);
          buddy_ptr[buddy_size64] |= 1 << buddy_bitsize;
        }

        else memset(buddy_ptr, 0xff, buddy_size64);
        
        buddy_ptr += buddy_size64;
      }

      //this feels kind of gross
      lasttable = map;
      map->next = (struct phys_map*)buddy_ptr;
      map = (struct phys_map*)buddy_ptr;
    }
  }
  lasttable->next = (void*)0;
  map = (struct phys_map*)&_stage2_pagetable;
  //now we will allocate the table out of itself so we don't mess things up. 
  //We can't just call palloc(), we need to allocate out of the first available pages (where the pages are)
  //we should clean this whole gross function
  /**
  ppt_size = (uint64_t)((void *)buddy_ptr - (void *)&_stage2_pagetable); 
  threshold = (void*)UINT64_MAX;
  int buddy_bit;
  for(buddy_chunksize = 0x80000; buddy_chunksize >= 0x1000; buddy_chunksize /= 2) {
    //this means that our table must fit into the first page table. Fixme later, low priotrity
    buddy_size64 = ceil((map->chunk_size / buddy_chunksize) / (double)64);
    budentry_end = buddy_ptr;
    for(buddy_ptr -= buddy_size64; buddy_ptr <= budentry_end; buddy_ptr++) {
      if(buddy_ptr > threshold) {
        buddy_ptr = budentry_end;
        continue;
      }
      if(
    }
  }
}
  **/

//TODO this function was deleted due to it being wrong. 
//I'll create it once I have the physical paging prerequisite set up. 
void create_pagetable_stage2(uint64_t free_mem) {
}


/**
 * BIG TODO:
 * Paging turned out to be simpler then I thought. I've temporarily fixed the code, but needs to be rewritten/simplified.
 * Let's get rid of those nasty GOTOs if we can.
 * Also, once we get physical memory allocator up and running, impliment that in this function.
**/

bool map_page(void *virtual_addr, void *physical_addr, uint8_t size) {
  //printf("map page called\n");
  uintptr_t va_ptr = (uintptr_t)virtual_addr;
  uintptr_t pa_ptr = (uintptr_t)physical_addr;
  if((va_ptr % (1 << size)) || (pa_ptr % (1 << size))) {
    return 0; 
  }
  page_table *table = (page_table *)PAGEMAP_LOCATION;
  int pte_i = (va_ptr >> 12) & 0x1ff;
  int pde_i = (va_ptr >> 21) & 0x1ff;
  int pdpe_i = (va_ptr >> 30) & 0x1ff;
  int pml4e_i = (va_ptr >> 39) & 0x1ff;

  if(table->pml4e[pml4e_i].present) {
    if(table->pml4e[pml4e_i].base_ptr != (uintptr_t)&table->pdpe[pdpe_i] >> 12) goto error;
    if(table->pdpe[pdpe_i].present) {
      if(size == PAGE_SIZE_1G) {
        if(table->pdpe[pdpe_i].base_ptr == ((uintptr_t)pa_ptr >> 30 & 0x1ff))
          return true;
        goto error;
      }
      if(table->pdpe[pdpe_i].base_ptr != (uintptr_t)&table->pde[pde_i] >> 12) goto error;

      if(table->pde[pde_i].present) { 
        if(size == PAGE_SIZE_2M) {
          if(table->pde[pde_i].base_ptr == ((uintptr_t)pa_ptr >> 21 & 0x1ff))
            return true;
          goto error;
        }
        if(table->pde[pde_i].base_ptr != (uintptr_t)&table->pte[pte_i] >> 12) goto error;
        if(table->pte[pte_i].present) {
          if(table->pte[pte_i].base_ptr != ((pa_ptr >> 12) & 0x1ff)) goto error;
          return true;
        }
        else goto mod_page_pte;
      }
      else goto mod_page_pde;
    }
    else goto mod_page_pdpe;
  }
  else {
    table->pml4e[pml4e_i].base_ptr = (uintptr_t)&table->pdpe[pdpe_i] >> 12;
    table->pdpe[pml4e_i].read_write = 1;
    table->pml4e[pml4e_i].present = 1;
mod_page_pdpe:
    table->pdpe[pdpe_i].read_write = 1;
    //TODO you just found out things are a lot more simple then you thought!
    if(size == PAGE_SIZE_1G) {
      table->pdpe[pdpe_i].size = 1;
      table->pdpe[pdpe_i].base_ptr = pa_ptr >> 12;
      table->pdpe[pdpe_i].present = 1;
      return true;
    }
    table->pdpe[pdpe_i].base_ptr = (uintptr_t)&table->pde[pde_i] >> 12;
    table->pdpe[pdpe_i].present = 1;
mod_page_pde:
    table->pde[pde_i].read_write = 1;
    if(size == PAGE_SIZE_2M) {
      table->pde[pde_i].size = 1;
      table->pde[pde_i].base_ptr = pa_ptr >> 12;
      table->pde[pde_i].present = 1;
      return true;
    }
    table->pde[pde_i].base_ptr = (uintptr_t)&table->pte[pte_i] >> 12;
    table->pde[pde_i].present = 1;
mod_page_pte:
    table->pte[pte_i].base_ptr = pa_ptr >> 12;
    table->pte[pte_i].read_write = 1;
    table->pte[pte_i].present = 1;
    return true;
  }
error:
  printf("Page allocation error!\n");
  return false;
}