#include #include #include #include #include #include #include /** Checks if the request flags don't contradict each other. * While this could be done by caller of vfsreq_dispatchcopy, as currently * there's only one of them per operation, doing this here means that any * potential future callers can't forget those checks. * Doing this so late is kinda inefficient but I don't really care. */ static bool vfsreq_isvalid(VfsReq *r) { /* Notable omissions: * - Simplifying the path for open(). Already done when resolving the mount. * - Checking the handle permissions. We don't have access to it. * */ if (r->type == VFSOP_OPEN && !(r->flags & OPEN_WRITE) && (r->flags & OPEN_CREATE)) { return false; } // XXX if i add a handle field to vfs_request, check ->readable ->writeable here return true; } void vfsreq_dispatchcopy(VfsReq tmpl) { VfsReq *req; VfsBackend *backend; /* allocate memory for the request and move it there */ req = kmalloc(sizeof *req, "VfsReq"); memcpy(req, &tmpl, sizeof *req); backend = req->backend; if (backend) { assert(backend->usehcnt > 0); backend->usehcnt++; } if (req->caller) { proc_setstate(req->caller, PS_WAITS4FS); req->caller->reqslot = req; } /* Check request validity. Doing this after the memcpy means that it * doesn't need to be special cased in vfsreq_finish. */ if (!vfsreq_isvalid(req)) { vfsreq_finish_short(req, -EINVAL); return; } assert(req->queue_next == NULL); assert(req->postqueue_next == NULL); if (backend == NULL) { /* null mount - probably never had a real backend */ vfsreq_finish_short(req, -ENOENT); } else if (backend->is_user) { vfsback_useraccept(req); } else { /* kernel backend */ assert(backend->kern.accept); backend->kern.accept(req); } } void vfsreq_finish(VfsReq *req, char __user *stored, long ret, int flags, Proc *handler) { if (req->type == VFSOP_OPEN && ret >= 0) { Handle *h; if (!(flags & FSR_DELEGATE)) { /* default behavior - create a new handle for the file, wrap the id */ h = handle_init(HANDLE_FILE); h->backend = req->backend; req->backend->usehcnt++; h->file_id = stored; h->readable = OPEN_READABLE(req->flags); h->writeable = OPEN_WRITEABLE(req->flags); } else { /* delegating - moving a handle to the caller */ assert(handler); h = hs_take(handler->hs, ret); if (h) { h->readable = h->readable && OPEN_READABLE(req->flags); h->writeable = h->writeable && OPEN_WRITEABLE(req->flags); } } if (h) { // TODO write tests for caller getting killed while opening a file if (!req->caller) panic_unimplemented(); ret = hs_put(req->caller->hs, h); if (ret < 0) ret = -EMFILE; } else { ret = -1; } } if (req->type == VFSOP_READ && ret >= 0) { assert((size_t)ret <= req->output.len); } if (req->input.kern) { kfree(req->input.buf_kern); } if (req->backend) { vfsback_userdown(req->backend); } assert(req->queue_next == NULL); assert(req->postqueue_next == NULL); if (req->caller) { assert(req->caller->state == PS_WAITS4FS); regs_savereturn(&req->caller->regs, ret); proc_setstate(req->caller, PS_RUNNING); } kfree(req); } void vfsback_useraccept(VfsReq *req) { VfsBackend *backend; Proc *handler; struct ufs_request res = {0}; int len; assert(req != NULL); backend = req->backend; assert(backend); assert(backend->is_user); if (backend->provhcnt == 0) { vfsreq_finish_short(req, -EPIPE); return; } else if (backend->user.handler == NULL) { /* queue the request up */ // TODO use postqueue VfsReq **it = &backend->queue; while (*it != NULL) { /* find a free spot in queue */ it = &(*it)->queue_next; } *it = req; return; } handler = backend->user.handler; assert(handler->state == PS_WAITS4REQUEST); // the pcpy calls aren't present in all kernel backends // it's a way to tell apart kernel and user backends apart // TODO check validity of memory regions somewhere else if (req->input.buf) { __user void *buf = handler->awaited_req.buf; len = min(req->input.len, handler->awaited_req.max_len); if (req->input.kern) { pcpy_to(handler, buf, req->input.buf_kern, len); } else { len = pcpy_bi( handler, buf, req->caller, req->input.buf, len ); } } else { len = req->output.len; } res.len = len; res.capacity = req->output.len; res.id = req->id; res.offset = req->offset; res.flags = req->flags; res.op = req->type; if (pcpy_to(handler, handler->awaited_req.res, &res, sizeof res) < sizeof(res)) { panic_unimplemented(); } Handle *h; hid_t hid = hs_hinit(handler->hs, HANDLE_FS_REQ, &h); if (hid < 0) panic_unimplemented(); h->req = req; proc_setstate(handler, PS_RUNNING); regs_savereturn(&handler->regs, hid); req->backend->user.handler = NULL; } static void vfsback_checkfree(VfsBackend *b) { if (b->usehcnt == 0 && b->provhcnt == 0) { assert(!b->queue); kfree(b); } } void vfsback_userdown(VfsBackend *b) { assert(b); assert(0 < b->usehcnt); b->usehcnt--; if (b->usehcnt == 0) { if (!b->is_user && b->kern.cleanup) { b->kern.cleanup(b); } if (b->is_user && b->user.handler) { /* tell the process that the filesystem won't receive any more * requests */ Proc *p = b->user.handler; b->user.handler = NULL; assert(p->state == PS_WAITS4REQUEST); regs_savereturn(&p->regs, -EPIPE); proc_setstate(p, PS_RUNNING); } } vfsback_checkfree(b); } void vfsback_provdown(VfsBackend *b) { assert(b); assert(0 < b->provhcnt); b->provhcnt--; if (b->provhcnt == 0) { /* discard everything in the queue */ VfsReq *q = b->queue; while (q) { VfsReq *q2 = q->queue_next; q->queue_next = NULL; vfsreq_finish_short(q, -EPIPE); q = q2; } b->queue = NULL; } vfsback_checkfree(b); }