summaryrefslogtreecommitdiff
path: root/src/kernel
diff options
context:
space:
mode:
authordzwdz2022-07-18 23:55:58 +0200
committerdzwdz2022-07-18 23:55:58 +0200
commit96f41ff64d8113307f1b60d2eb6852423db34d14 (patch)
tree5a85a9706a2a11d047f7f5c91f4964bd2167b1ee /src/kernel
parent121794214fd5ae36609c30418dfaf1a073b8784c (diff)
syscalls: implement execbuf
i have been planning to implement something like this for a while now. it should be faster when doing consecutive syscalls (to be tested). it will also be helpful in writing the elf loader
Diffstat (limited to 'src/kernel')
-rw-r--r--src/kernel/execbuf.c38
-rw-r--r--src/kernel/execbuf.h4
-rw-r--r--src/kernel/proc.c11
-rw-r--r--src/kernel/proc.h6
-rw-r--r--src/kernel/syscalls.c26
5 files changed, 83 insertions, 2 deletions
diff --git a/src/kernel/execbuf.c b/src/kernel/execbuf.c
new file mode 100644
index 0000000..6136d0e
--- /dev/null
+++ b/src/kernel/execbuf.c
@@ -0,0 +1,38 @@
+#include <kernel/execbuf.h>
+#include <kernel/mem/alloc.h>
+#include <kernel/panic.h>
+#include <shared/execbuf.h>
+#include <shared/mem.h>
+
+_Noreturn static void halt(struct process *proc) {
+ kfree(proc->execbuf.buf);
+ proc->execbuf.buf = NULL;
+ process_switch_any();
+}
+
+static void try_fetch(struct process *proc, uint64_t *buf, size_t amt) {
+ size_t bytes = amt * sizeof(uint64_t);
+ if (proc->execbuf.pos + bytes > proc->execbuf.len)
+ halt(proc);
+ memcpy(buf, proc->execbuf.buf + proc->execbuf.pos, bytes);
+ proc->execbuf.pos += bytes;
+}
+
+_Noreturn void execbuf_run(struct process *proc) {
+ uint64_t buf[5];
+ assert(proc == process_current); // idiotic, but needed because of _syscall.
+ assert(proc->state == PS_RUNNING);
+ assert(proc->execbuf.buf);
+
+ for (;;) {
+ try_fetch(proc, buf, 1);
+ switch (buf[0]) {
+ case EXECBUF_SYSCALL:
+ try_fetch(proc, buf, 5);
+ _syscall(buf[0], buf[1], buf[2], buf[3], buf[4]);
+ break;
+ default:
+ halt(proc);
+ }
+ }
+}
diff --git a/src/kernel/execbuf.h b/src/kernel/execbuf.h
new file mode 100644
index 0000000..02ada92
--- /dev/null
+++ b/src/kernel/execbuf.h
@@ -0,0 +1,4 @@
+#pragma once
+#include <kernel/proc.h>
+
+_Noreturn void execbuf_run(struct process *proc);
diff --git a/src/kernel/proc.c b/src/kernel/proc.c
index 324f0b4..9f6f6b7 100644
--- a/src/kernel/proc.c
+++ b/src/kernel/proc.c
@@ -1,4 +1,5 @@
#include <kernel/arch/generic.h>
+#include <kernel/execbuf.h>
#include <kernel/main.h>
#include <kernel/mem/alloc.h>
#include <kernel/mem/virt.h>
@@ -135,6 +136,11 @@ void process_kill(struct process *p, int ret) {
process_transition(p, PS_DEAD);
p->death_msg = ret;
+ if (p->execbuf.buf) {
+ kfree(p->execbuf.buf);
+ p->execbuf.buf = NULL;
+ }
+
if (p->parent)
pagedir_free(p->pages); // TODO put init's pages in the allocator
@@ -199,7 +205,10 @@ static _Noreturn void process_switch(struct process *proc) {
assert(proc->state == PS_RUNNING);
process_current = proc;
pagedir_switch(proc->pages);
- sysexit(proc->regs);
+ if (proc->execbuf.buf)
+ execbuf_run(proc);
+ else
+ sysexit(proc->regs);
}
_Noreturn void process_switch_any(void) {
diff --git a/src/kernel/proc.h b/src/kernel/proc.h
index c400f7d..c642a42 100644
--- a/src/kernel/proc.h
+++ b/src/kernel/proc.h
@@ -57,6 +57,12 @@ struct process {
struct vfs_mount *mount;
+ struct {
+ void *buf;
+ size_t len;
+ size_t pos;
+ } execbuf;
+
struct handle *handles[HANDLE_MAX];
};
diff --git a/src/kernel/syscalls.c b/src/kernel/syscalls.c
index 786578a..a2ae5dd 100644
--- a/src/kernel/syscalls.c
+++ b/src/kernel/syscalls.c
@@ -361,6 +361,25 @@ long _syscall_pipe(handle_t __user user_ends[2], int flags) {
SYSCALL_RETURN(0);
}
+long _syscall_execbuf(void __user *ubuf, size_t len) {
+ if (len == 0) SYSCALL_RETURN(0);
+ if (len > sizeof(uint64_t) * 6 * 4) // TODO specify max size somewhere
+ SYSCALL_RETURN(-1);
+ if (process_current->execbuf.buf)
+ SYSCALL_RETURN(-1); /* no nesting */
+ // actually TODO, nesting makes sense for infinite loops. maybe
+
+ char *kbuf = kmalloc(len);
+ if (!virt_cpy_from(process_current->pages, kbuf, ubuf, len)) {
+ kfree(kbuf);
+ SYSCALL_RETURN(-1);
+ }
+ process_current->execbuf.buf = kbuf;
+ process_current->execbuf.len = len;
+ process_current->execbuf.pos = 0;
+ SYSCALL_RETURN(0);
+}
+
void _syscall_debug_klog(const void __user *buf, size_t len) {
(void)buf; (void)len;
// static char kbuf[256];
@@ -371,10 +390,12 @@ void _syscall_debug_klog(const void __user *buf, size_t len) {
}
long _syscall(long num, long a, long b, long c, long d) {
+ /* note: this isn't the only place where syscalls get called from!
+ * see execbuf */
switch (num) {
case _SYSCALL_EXIT:
_syscall_exit(a);
- // _syscall_exit doesn't exit
+ // _syscall_exit doesn't return
case _SYSCALL_AWAIT:
_syscall_await();
break;
@@ -411,6 +432,9 @@ long _syscall(long num, long a, long b, long c, long d) {
case _SYSCALL_PIPE:
_syscall_pipe((userptr_t)a, b);
break;
+ case _SYSCALL_EXECBUF:
+ _syscall_execbuf((userptr_t)a, b);
+ break;
case _SYSCALL_DEBUG_KLOG:
_syscall_debug_klog((userptr_t)a, b);
break;