2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
22 static kmem_cache_t *fuse_req_cachep;
24 static struct fuse_conn *fuse_get_conn(struct file *file)
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
30 return file->private_data;
33 static void fuse_request_init(struct fuse_req *req)
35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list);
37 init_waitqueue_head(&req->waitq);
38 atomic_set(&req->count, 1);
41 struct fuse_req *fuse_request_alloc(void)
43 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
45 fuse_request_init(req);
49 void fuse_request_free(struct fuse_req *req)
51 kmem_cache_free(fuse_req_cachep, req);
54 static void block_sigs(sigset_t *oldset)
58 siginitsetinv(&mask, sigmask(SIGKILL));
59 sigprocmask(SIG_BLOCK, &mask, oldset);
62 static void restore_sigs(sigset_t *oldset)
64 sigprocmask(SIG_SETMASK, oldset, NULL);
68 * Reset request, so that it can be reused
70 * The caller must be _very_ careful to make sure, that it is holding
71 * the only reference to req
73 void fuse_reset_request(struct fuse_req *req)
75 BUG_ON(atomic_read(&req->count) != 1);
76 fuse_request_init(req);
79 static void __fuse_get_request(struct fuse_req *req)
81 atomic_inc(&req->count);
84 /* Must be called with > 1 refcount */
85 static void __fuse_put_request(struct fuse_req *req)
87 BUG_ON(atomic_read(&req->count) < 2);
88 atomic_dec(&req->count);
91 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
98 atomic_inc(&fc->num_waiting);
100 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
101 restore_sigs(&oldset);
106 req = fuse_request_alloc();
111 req->in.h.uid = current->fsuid;
112 req->in.h.gid = current->fsgid;
113 req->in.h.pid = current->pid;
118 atomic_dec(&fc->num_waiting);
122 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
124 if (atomic_dec_and_test(&req->count)) {
126 atomic_dec(&fc->num_waiting);
127 fuse_request_free(req);
131 void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req)
137 spin_lock(&fc->lock);
138 list_del(&req->bg_entry);
139 if (fc->num_background == FUSE_MAX_BACKGROUND) {
141 wake_up_all(&fc->blocked_waitq);
143 fc->num_background--;
144 spin_unlock(&fc->lock);
148 * This function is called when a request is finished. Either a reply
149 * has arrived or it was interrupted (and not yet sent) or some error
150 * occurred during communication with userspace, or the device file
151 * was closed. In case of a background request the reference to the
152 * stored objects are released. The requester thread is woken up (if
153 * still waiting), the 'end' callback is called if given, else the
154 * reference to the request is released
156 * Releasing extra reference for foreground requests must be done
157 * within the same locked region as setting state to finished. This
158 * is because fuse_reset_request() may be called after request is
159 * finished and it must be the sole possessor. If request is
160 * interrupted and put in the background, it will return with an error
161 * and hence never be reset and reused.
163 * Called with fc->lock, unlocks it
165 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
167 list_del(&req->list);
168 req->state = FUSE_REQ_FINISHED;
169 if (!req->background) {
170 spin_unlock(&fc->lock);
171 wake_up(&req->waitq);
172 fuse_put_request(fc, req);
174 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
176 spin_unlock(&fc->lock);
177 down_read(&fc->sbput_sem);
179 fuse_release_background(fc, req);
180 up_read(&fc->sbput_sem);
184 fuse_put_request(fc, req);
189 * Unfortunately request interruption not just solves the deadlock
190 * problem, it causes problems too. These stem from the fact, that an
191 * interrupted request is continued to be processed in userspace,
192 * while all the locks and object references (inode and file) held
193 * during the operation are released.
195 * To release the locks is exactly why there's a need to interrupt the
196 * request, so there's not a lot that can be done about this, except
197 * introduce additional locking in userspace.
199 * More important is to keep inode and file references until userspace
200 * has replied, otherwise FORGET and RELEASE could be sent while the
201 * inode/file is still used by the filesystem.
203 * For this reason the concept of "background" request is introduced.
204 * An interrupted request is backgrounded if it has been already sent
205 * to userspace. Backgrounding involves getting an extra reference to
206 * inode(s) or file used in the request, and adding the request to
207 * fc->background list. When a reply is received for a background
208 * request, the object references are released, and the request is
209 * removed from the list. If the filesystem is unmounted while there
210 * are still background requests, the list is walked and references
211 * are released as if a reply was received.
213 * There's one more use for a background request. The RELEASE message is
214 * always sent as background, since it doesn't return an error or
217 static void background_request(struct fuse_conn *fc, struct fuse_req *req)
220 list_add(&req->bg_entry, &fc->background);
221 fc->num_background++;
222 if (fc->num_background == FUSE_MAX_BACKGROUND)
225 req->inode = igrab(req->inode);
227 req->inode2 = igrab(req->inode2);
232 /* Called with fc->lock held. Releases, and then reacquires it. */
233 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
237 spin_unlock(&fc->lock);
239 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
240 restore_sigs(&oldset);
241 spin_lock(&fc->lock);
242 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
245 if (!req->interrupted) {
246 req->out.h.error = -EINTR;
247 req->interrupted = 1;
250 /* This is uninterruptible sleep, because data is
251 being copied to/from the buffers of req. During
252 locked state, there mustn't be any filesystem
253 operation (e.g. page fault), since that could lead
255 spin_unlock(&fc->lock);
256 wait_event(req->waitq, !req->locked);
257 spin_lock(&fc->lock);
259 if (req->state == FUSE_REQ_PENDING) {
260 list_del(&req->list);
261 __fuse_put_request(req);
262 } else if (req->state == FUSE_REQ_SENT)
263 background_request(fc, req);
266 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
271 for (i = 0; i < numargs; i++)
272 nbytes += args[i].size;
277 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
280 /* zero is special */
283 req->in.h.unique = fc->reqctr;
284 req->in.h.len = sizeof(struct fuse_in_header) +
285 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
286 list_add_tail(&req->list, &fc->pending);
287 req->state = FUSE_REQ_PENDING;
290 atomic_inc(&fc->num_waiting);
293 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
297 * This can only be interrupted by a SIGKILL
299 void request_send(struct fuse_conn *fc, struct fuse_req *req)
302 spin_lock(&fc->lock);
304 req->out.h.error = -ENOTCONN;
305 else if (fc->conn_error)
306 req->out.h.error = -ECONNREFUSED;
308 queue_request(fc, req);
309 /* acquire extra reference, since request is still needed
310 after request_end() */
311 __fuse_get_request(req);
313 request_wait_answer(fc, req);
315 spin_unlock(&fc->lock);
318 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
320 spin_lock(&fc->lock);
321 background_request(fc, req);
323 queue_request(fc, req);
324 spin_unlock(&fc->lock);
326 req->out.h.error = -ENOTCONN;
327 request_end(fc, req);
331 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
334 request_send_nowait(fc, req);
337 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
340 request_send_nowait(fc, req);
344 * Lock the request. Up to the next unlock_request() there mustn't be
345 * anything that could cause a page-fault. If the request was already
346 * interrupted bail out.
348 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
352 spin_lock(&fc->lock);
353 if (req->interrupted)
357 spin_unlock(&fc->lock);
363 * Unlock request. If it was interrupted during being locked, the
364 * requester thread is currently waiting for it to be unlocked, so
367 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
370 spin_lock(&fc->lock);
372 if (req->interrupted)
373 wake_up(&req->waitq);
374 spin_unlock(&fc->lock);
378 struct fuse_copy_state {
379 struct fuse_conn *fc;
381 struct fuse_req *req;
382 const struct iovec *iov;
383 unsigned long nr_segs;
384 unsigned long seglen;
392 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
393 int write, struct fuse_req *req,
394 const struct iovec *iov, unsigned long nr_segs)
396 memset(cs, 0, sizeof(*cs));
401 cs->nr_segs = nr_segs;
404 /* Unmap and put previous page of userspace buffer */
405 static void fuse_copy_finish(struct fuse_copy_state *cs)
408 kunmap_atomic(cs->mapaddr, KM_USER0);
410 flush_dcache_page(cs->pg);
411 set_page_dirty_lock(cs->pg);
419 * Get another pagefull of userspace buffer, and map it to kernel
420 * address space, and lock request
422 static int fuse_copy_fill(struct fuse_copy_state *cs)
424 unsigned long offset;
427 unlock_request(cs->fc, cs->req);
428 fuse_copy_finish(cs);
430 BUG_ON(!cs->nr_segs);
431 cs->seglen = cs->iov[0].iov_len;
432 cs->addr = (unsigned long) cs->iov[0].iov_base;
436 down_read(¤t->mm->mmap_sem);
437 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
439 up_read(¤t->mm->mmap_sem);
443 offset = cs->addr % PAGE_SIZE;
444 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
445 cs->buf = cs->mapaddr + offset;
446 cs->len = min(PAGE_SIZE - offset, cs->seglen);
447 cs->seglen -= cs->len;
450 return lock_request(cs->fc, cs->req);
453 /* Do as much copy to/from userspace buffer as we can */
454 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
456 unsigned ncpy = min(*size, cs->len);
459 memcpy(cs->buf, *val, ncpy);
461 memcpy(*val, cs->buf, ncpy);
471 * Copy a page in the request to/from the userspace buffer. Must be
474 static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
475 unsigned offset, unsigned count, int zeroing)
477 if (page && zeroing && count < PAGE_SIZE) {
478 void *mapaddr = kmap_atomic(page, KM_USER1);
479 memset(mapaddr, 0, PAGE_SIZE);
480 kunmap_atomic(mapaddr, KM_USER1);
484 if (!cs->len && (err = fuse_copy_fill(cs)))
487 void *mapaddr = kmap_atomic(page, KM_USER1);
488 void *buf = mapaddr + offset;
489 offset += fuse_copy_do(cs, &buf, &count);
490 kunmap_atomic(mapaddr, KM_USER1);
492 offset += fuse_copy_do(cs, NULL, &count);
494 if (page && !cs->write)
495 flush_dcache_page(page);
499 /* Copy pages in the request to/from userspace buffer */
500 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
504 struct fuse_req *req = cs->req;
505 unsigned offset = req->page_offset;
506 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
508 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
509 struct page *page = req->pages[i];
510 int err = fuse_copy_page(cs, page, offset, count, zeroing);
515 count = min(nbytes, (unsigned) PAGE_SIZE);
521 /* Copy a single argument in the request to/from userspace buffer */
522 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
526 if (!cs->len && (err = fuse_copy_fill(cs)))
528 fuse_copy_do(cs, &val, &size);
533 /* Copy request arguments to/from userspace buffer */
534 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
535 unsigned argpages, struct fuse_arg *args,
541 for (i = 0; !err && i < numargs; i++) {
542 struct fuse_arg *arg = &args[i];
543 if (i == numargs - 1 && argpages)
544 err = fuse_copy_pages(cs, arg->size, zeroing);
546 err = fuse_copy_one(cs, arg->value, arg->size);
551 /* Wait until a request is available on the pending list */
552 static void request_wait(struct fuse_conn *fc)
554 DECLARE_WAITQUEUE(wait, current);
556 add_wait_queue_exclusive(&fc->waitq, &wait);
557 while (fc->connected && list_empty(&fc->pending)) {
558 set_current_state(TASK_INTERRUPTIBLE);
559 if (signal_pending(current))
562 spin_unlock(&fc->lock);
564 spin_lock(&fc->lock);
566 set_current_state(TASK_RUNNING);
567 remove_wait_queue(&fc->waitq, &wait);
571 * Read a single request into the userspace filesystem's buffer. This
572 * function waits until a request is available, then removes it from
573 * the pending list and copies request data to userspace buffer. If
574 * no reply is needed (FORGET) or request has been interrupted or
575 * there was an error during the copying then it's finished by calling
576 * request_end(). Otherwise add it to the processing list, and set
579 static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
580 unsigned long nr_segs, loff_t *off)
583 struct fuse_req *req;
585 struct fuse_copy_state cs;
587 struct fuse_conn *fc = fuse_get_conn(file);
592 spin_lock(&fc->lock);
594 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
595 list_empty(&fc->pending))
603 if (list_empty(&fc->pending))
606 req = list_entry(fc->pending.next, struct fuse_req, list);
607 req->state = FUSE_REQ_READING;
608 list_move(&req->list, &fc->io);
612 /* If request is too large, reply with an error and restart the read */
613 if (iov_length(iov, nr_segs) < reqsize) {
614 req->out.h.error = -EIO;
615 /* SETXATTR is special, since it may contain too large data */
616 if (in->h.opcode == FUSE_SETXATTR)
617 req->out.h.error = -E2BIG;
618 request_end(fc, req);
621 spin_unlock(&fc->lock);
622 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
623 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
625 err = fuse_copy_args(&cs, in->numargs, in->argpages,
626 (struct fuse_arg *) in->args, 0);
627 fuse_copy_finish(&cs);
628 spin_lock(&fc->lock);
630 if (!err && req->interrupted)
633 if (!req->interrupted)
634 req->out.h.error = -EIO;
635 request_end(fc, req);
639 request_end(fc, req);
641 req->state = FUSE_REQ_SENT;
642 list_move_tail(&req->list, &fc->processing);
643 spin_unlock(&fc->lock);
648 spin_unlock(&fc->lock);
652 static ssize_t fuse_dev_read(struct file *file, char __user *buf,
653 size_t nbytes, loff_t *off)
656 iov.iov_len = nbytes;
658 return fuse_dev_readv(file, &iov, 1, off);
661 /* Look up request on processing list by unique ID */
662 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
664 struct list_head *entry;
666 list_for_each(entry, &fc->processing) {
667 struct fuse_req *req;
668 req = list_entry(entry, struct fuse_req, list);
669 if (req->in.h.unique == unique)
675 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
678 unsigned reqsize = sizeof(struct fuse_out_header);
681 return nbytes != reqsize ? -EINVAL : 0;
683 reqsize += len_args(out->numargs, out->args);
685 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
687 else if (reqsize > nbytes) {
688 struct fuse_arg *lastarg = &out->args[out->numargs-1];
689 unsigned diffsize = reqsize - nbytes;
690 if (diffsize > lastarg->size)
692 lastarg->size -= diffsize;
694 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
699 * Write a single reply to a request. First the header is copied from
700 * the write buffer. The request is then searched on the processing
701 * list by the unique ID found in the header. If found, then remove
702 * it from the list and copy the rest of the buffer to the request.
703 * The request is finished by calling request_end()
705 static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
706 unsigned long nr_segs, loff_t *off)
709 unsigned nbytes = iov_length(iov, nr_segs);
710 struct fuse_req *req;
711 struct fuse_out_header oh;
712 struct fuse_copy_state cs;
713 struct fuse_conn *fc = fuse_get_conn(file);
717 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
718 if (nbytes < sizeof(struct fuse_out_header))
721 err = fuse_copy_one(&cs, &oh, sizeof(oh));
725 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
729 spin_lock(&fc->lock);
734 req = request_find(fc, oh.unique);
739 if (req->interrupted) {
740 spin_unlock(&fc->lock);
741 fuse_copy_finish(&cs);
742 spin_lock(&fc->lock);
743 request_end(fc, req);
746 list_move(&req->list, &fc->io);
750 spin_unlock(&fc->lock);
752 err = copy_out_args(&cs, &req->out, nbytes);
753 fuse_copy_finish(&cs);
755 spin_lock(&fc->lock);
758 if (req->interrupted)
760 } else if (!req->interrupted)
761 req->out.h.error = -EIO;
762 request_end(fc, req);
764 return err ? err : nbytes;
767 spin_unlock(&fc->lock);
769 fuse_copy_finish(&cs);
773 static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
774 size_t nbytes, loff_t *off)
777 iov.iov_len = nbytes;
778 iov.iov_base = (char __user *) buf;
779 return fuse_dev_writev(file, &iov, 1, off);
782 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
784 unsigned mask = POLLOUT | POLLWRNORM;
785 struct fuse_conn *fc = fuse_get_conn(file);
789 poll_wait(file, &fc->waitq, wait);
791 spin_lock(&fc->lock);
794 else if (!list_empty(&fc->pending))
795 mask |= POLLIN | POLLRDNORM;
796 spin_unlock(&fc->lock);
802 * Abort all requests on the given list (pending or processing)
804 * This function releases and reacquires fc->lock
806 static void end_requests(struct fuse_conn *fc, struct list_head *head)
808 while (!list_empty(head)) {
809 struct fuse_req *req;
810 req = list_entry(head->next, struct fuse_req, list);
811 req->out.h.error = -ECONNABORTED;
812 request_end(fc, req);
813 spin_lock(&fc->lock);
818 * Abort requests under I/O
820 * The requests are set to interrupted and finished, and the request
821 * waiter is woken up. This will make request_wait_answer() wait
822 * until the request is unlocked and then return.
824 * If the request is asynchronous, then the end function needs to be
825 * called after waiting for the request to be unlocked (if it was
828 static void end_io_requests(struct fuse_conn *fc)
830 while (!list_empty(&fc->io)) {
831 struct fuse_req *req =
832 list_entry(fc->io.next, struct fuse_req, list);
833 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
835 req->interrupted = 1;
836 req->out.h.error = -ECONNABORTED;
837 req->state = FUSE_REQ_FINISHED;
838 list_del_init(&req->list);
839 wake_up(&req->waitq);
842 /* The end function will consume this reference */
843 __fuse_get_request(req);
844 spin_unlock(&fc->lock);
845 wait_event(req->waitq, !req->locked);
847 spin_lock(&fc->lock);
853 * Abort all requests.
855 * Emergency exit in case of a malicious or accidental deadlock, or
856 * just a hung filesystem.
858 * The same effect is usually achievable through killing the
859 * filesystem daemon and all users of the filesystem. The exception
860 * is the combination of an asynchronous request and the tricky
861 * deadlock (see Documentation/filesystems/fuse.txt).
863 * During the aborting, progression of requests from the pending and
864 * processing lists onto the io list, and progression of new requests
865 * onto the pending list is prevented by req->connected being false.
867 * Progression of requests under I/O to the processing list is
868 * prevented by the req->interrupted flag being true for these
869 * requests. For this reason requests on the io list must be aborted
872 void fuse_abort_conn(struct fuse_conn *fc)
874 spin_lock(&fc->lock);
878 end_requests(fc, &fc->pending);
879 end_requests(fc, &fc->processing);
880 wake_up_all(&fc->waitq);
881 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
883 spin_unlock(&fc->lock);
886 static int fuse_dev_release(struct inode *inode, struct file *file)
888 struct fuse_conn *fc = fuse_get_conn(file);
890 spin_lock(&fc->lock);
892 end_requests(fc, &fc->pending);
893 end_requests(fc, &fc->processing);
894 spin_unlock(&fc->lock);
895 fasync_helper(-1, file, 0, &fc->fasync);
896 kobject_put(&fc->kobj);
902 static int fuse_dev_fasync(int fd, struct file *file, int on)
904 struct fuse_conn *fc = fuse_get_conn(file);
908 /* No locking - fasync_helper does its own locking */
909 return fasync_helper(fd, file, on, &fc->fasync);
912 const struct file_operations fuse_dev_operations = {
913 .owner = THIS_MODULE,
915 .read = fuse_dev_read,
916 .readv = fuse_dev_readv,
917 .write = fuse_dev_write,
918 .writev = fuse_dev_writev,
919 .poll = fuse_dev_poll,
920 .release = fuse_dev_release,
921 .fasync = fuse_dev_fasync,
924 static struct miscdevice fuse_miscdevice = {
927 .fops = &fuse_dev_operations,
930 int __init fuse_dev_init(void)
933 fuse_req_cachep = kmem_cache_create("fuse_request",
934 sizeof(struct fuse_req),
936 if (!fuse_req_cachep)
939 err = misc_register(&fuse_miscdevice);
941 goto out_cache_clean;
946 kmem_cache_destroy(fuse_req_cachep);
951 void fuse_dev_cleanup(void)
953 misc_deregister(&fuse_miscdevice);
954 kmem_cache_destroy(fuse_req_cachep);