summaryrefslogtreecommitdiff
path: root/src/kernel/pagealloc.c
diff options
context:
space:
mode:
authordzwdz2024-07-15 12:56:25 +0200
committerdzwdz2024-07-15 12:56:25 +0200
commit0be35869d226aa2edc47dea07e0aca1c73f677d5 (patch)
tree0c78809aece4ad99dae4ea2a8ca196b56667283c /src/kernel/pagealloc.c
parentc92276ee58473ea1ae42470d56972c427187e326 (diff)
kernel: split the page allocator and kmalloc
Diffstat (limited to 'src/kernel/pagealloc.c')
-rw-r--r--src/kernel/pagealloc.c108
1 files changed, 108 insertions, 0 deletions
diff --git a/src/kernel/pagealloc.c b/src/kernel/pagealloc.c
new file mode 100644
index 0000000..f01c295
--- /dev/null
+++ b/src/kernel/pagealloc.c
@@ -0,0 +1,108 @@
+#include <kernel/arch/generic.h>
+#include <kernel/malloc.h>
+#include <kernel/panic.h>
+#include <kernel/util.h>
+#include <shared/mem.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+extern uint8_t pbitmap[]; /* linker.ld */
+static size_t pbitmap_len; /* in bytes */
+static void *memtop;
+static void *firstfreepage; /* not necessarily actually free */
+
+static size_t
+toindex(void *p)
+{
+ assert((void*)pbitmap <= p);
+ return ((uintptr_t)p - (uintptr_t)pbitmap) / PAGE_SIZE;
+}
+
+static bool
+pbitmap_get(void *p)
+{
+ size_t i = toindex(p);
+ size_t b = i / 8;
+ uint8_t m = 1 << (i&7);
+ assert(b < pbitmap_len); // TODO the bitmap should be a tad longer
+ return (pbitmap[b]&m) != 0;
+}
+
+static bool
+pbitmap_set(void *p, bool v)
+{
+ size_t i = toindex(p);
+ size_t b = i / 8;
+ uint8_t m = 1 << (i&7);
+ assert(b < pbitmap_len);
+ bool prev = (pbitmap[b]&m) != 0;
+ if (v) {
+ pbitmap[b] |= m;
+ } else {
+ pbitmap[b] &= ~m;
+ }
+ return prev;
+}
+
+void
+mem_init(void *p)
+{
+ memtop = p;
+ kprintf("memory %8x -> %8x\n", &_bss_end, memtop);
+ pbitmap_len = toindex(memtop) / 8;
+ memset(pbitmap, 0, pbitmap_len);
+ mem_reserve(pbitmap, pbitmap_len);
+ firstfreepage = pbitmap;
+}
+
+void
+mem_reserve(void *addr, size_t len)
+{
+ kprintf("reserved %8x -> %8x\n", addr, addr + len);
+
+ void *top = min(addr + len, memtop);
+ addr = (void*)((uintptr_t)addr & ~PAGE_MASK); /* round down to page */
+ for (void *p = max(addr, (void*)pbitmap); p < top; p += PAGE_SIZE) {
+ /* this doesn't allow overlapping reserved regions, but, more
+ * importantly, it prevents reserving an already allocated page */
+ if (pbitmap_get(p)) {
+ panic_invalid_state();
+ }
+ pbitmap_set(p, true);
+ }
+}
+
+void *
+page_zalloc(size_t pages)
+{
+ void *p = page_alloc(pages);
+ memset(p, 0, pages * PAGE_SIZE);
+ return p;
+}
+
+void *
+page_alloc(size_t pages)
+{
+ assert(pages == 1);
+ for (void *p = firstfreepage; p < memtop; p += PAGE_SIZE) {
+ if (!pbitmap_get(p)) {
+ pbitmap_set(p, true);
+ firstfreepage = p + PAGE_SIZE;
+ return p;
+ }
+ }
+ kprintf("we ran out of memory :(\ngoodbye.\n");
+ panic_unimplemented();
+}
+
+void
+page_free(void *addr, size_t pages)
+{
+ assert((void*)pbitmap <= addr);
+ for (size_t i = 0; i < pages; i++) {
+ if (pbitmap_set(addr + i*PAGE_SIZE, false) == false) {
+ panic_invalid_state();
+ }
+ }
+ firstfreepage = min(firstfreepage, addr);
+}