]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/fuse/dev.c
aio: don't include aio.h in sched.h
[karo-tx-linux.git] / fs / fuse / dev.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
22 #include <linux/aio.h>
23
24 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
25 MODULE_ALIAS("devname:fuse");
26
27 static struct kmem_cache *fuse_req_cachep;
28
29 static struct fuse_conn *fuse_get_conn(struct file *file)
30 {
31         /*
32          * Lockless access is OK, because file->private data is set
33          * once during mount and is valid until the file is released.
34          */
35         return file->private_data;
36 }
37
38 static void fuse_request_init(struct fuse_req *req, struct page **pages,
39                               struct fuse_page_desc *page_descs,
40                               unsigned npages)
41 {
42         memset(req, 0, sizeof(*req));
43         memset(pages, 0, sizeof(*pages) * npages);
44         memset(page_descs, 0, sizeof(*page_descs) * npages);
45         INIT_LIST_HEAD(&req->list);
46         INIT_LIST_HEAD(&req->intr_entry);
47         init_waitqueue_head(&req->waitq);
48         atomic_set(&req->count, 1);
49         req->pages = pages;
50         req->page_descs = page_descs;
51         req->max_pages = npages;
52 }
53
54 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
55 {
56         struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
57         if (req) {
58                 struct page **pages;
59                 struct fuse_page_desc *page_descs;
60
61                 if (npages <= FUSE_REQ_INLINE_PAGES) {
62                         pages = req->inline_pages;
63                         page_descs = req->inline_page_descs;
64                 } else {
65                         pages = kmalloc(sizeof(struct page *) * npages, flags);
66                         page_descs = kmalloc(sizeof(struct fuse_page_desc) *
67                                              npages, flags);
68                 }
69
70                 if (!pages || !page_descs) {
71                         kfree(pages);
72                         kfree(page_descs);
73                         kmem_cache_free(fuse_req_cachep, req);
74                         return NULL;
75                 }
76
77                 fuse_request_init(req, pages, page_descs, npages);
78         }
79         return req;
80 }
81
82 struct fuse_req *fuse_request_alloc(unsigned npages)
83 {
84         return __fuse_request_alloc(npages, GFP_KERNEL);
85 }
86 EXPORT_SYMBOL_GPL(fuse_request_alloc);
87
88 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
89 {
90         return __fuse_request_alloc(npages, GFP_NOFS);
91 }
92
93 void fuse_request_free(struct fuse_req *req)
94 {
95         if (req->pages != req->inline_pages) {
96                 kfree(req->pages);
97                 kfree(req->page_descs);
98         }
99         kmem_cache_free(fuse_req_cachep, req);
100 }
101
102 static void block_sigs(sigset_t *oldset)
103 {
104         sigset_t mask;
105
106         siginitsetinv(&mask, sigmask(SIGKILL));
107         sigprocmask(SIG_BLOCK, &mask, oldset);
108 }
109
110 static void restore_sigs(sigset_t *oldset)
111 {
112         sigprocmask(SIG_SETMASK, oldset, NULL);
113 }
114
115 static void __fuse_get_request(struct fuse_req *req)
116 {
117         atomic_inc(&req->count);
118 }
119
120 /* Must be called with > 1 refcount */
121 static void __fuse_put_request(struct fuse_req *req)
122 {
123         BUG_ON(atomic_read(&req->count) < 2);
124         atomic_dec(&req->count);
125 }
126
127 static void fuse_req_init_context(struct fuse_req *req)
128 {
129         req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
130         req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
131         req->in.h.pid = current->pid;
132 }
133
134 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
135 {
136         struct fuse_req *req;
137         sigset_t oldset;
138         int intr;
139         int err;
140
141         atomic_inc(&fc->num_waiting);
142         block_sigs(&oldset);
143         intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
144         restore_sigs(&oldset);
145         err = -EINTR;
146         if (intr)
147                 goto out;
148
149         err = -ENOTCONN;
150         if (!fc->connected)
151                 goto out;
152
153         req = fuse_request_alloc(npages);
154         err = -ENOMEM;
155         if (!req)
156                 goto out;
157
158         fuse_req_init_context(req);
159         req->waiting = 1;
160         return req;
161
162  out:
163         atomic_dec(&fc->num_waiting);
164         return ERR_PTR(err);
165 }
166 EXPORT_SYMBOL_GPL(fuse_get_req);
167
168 /*
169  * Return request in fuse_file->reserved_req.  However that may
170  * currently be in use.  If that is the case, wait for it to become
171  * available.
172  */
173 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
174                                          struct file *file)
175 {
176         struct fuse_req *req = NULL;
177         struct fuse_file *ff = file->private_data;
178
179         do {
180                 wait_event(fc->reserved_req_waitq, ff->reserved_req);
181                 spin_lock(&fc->lock);
182                 if (ff->reserved_req) {
183                         req = ff->reserved_req;
184                         ff->reserved_req = NULL;
185                         req->stolen_file = get_file(file);
186                 }
187                 spin_unlock(&fc->lock);
188         } while (!req);
189
190         return req;
191 }
192
193 /*
194  * Put stolen request back into fuse_file->reserved_req
195  */
196 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
197 {
198         struct file *file = req->stolen_file;
199         struct fuse_file *ff = file->private_data;
200
201         spin_lock(&fc->lock);
202         fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
203         BUG_ON(ff->reserved_req);
204         ff->reserved_req = req;
205         wake_up_all(&fc->reserved_req_waitq);
206         spin_unlock(&fc->lock);
207         fput(file);
208 }
209
210 /*
211  * Gets a requests for a file operation, always succeeds
212  *
213  * This is used for sending the FLUSH request, which must get to
214  * userspace, due to POSIX locks which may need to be unlocked.
215  *
216  * If allocation fails due to OOM, use the reserved request in
217  * fuse_file.
218  *
219  * This is very unlikely to deadlock accidentally, since the
220  * filesystem should not have it's own file open.  If deadlock is
221  * intentional, it can still be broken by "aborting" the filesystem.
222  */
223 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
224                                              struct file *file)
225 {
226         struct fuse_req *req;
227
228         atomic_inc(&fc->num_waiting);
229         wait_event(fc->blocked_waitq, !fc->blocked);
230         req = fuse_request_alloc(0);
231         if (!req)
232                 req = get_reserved_req(fc, file);
233
234         fuse_req_init_context(req);
235         req->waiting = 1;
236         return req;
237 }
238
239 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
240 {
241         if (atomic_dec_and_test(&req->count)) {
242                 if (req->waiting)
243                         atomic_dec(&fc->num_waiting);
244
245                 if (req->stolen_file)
246                         put_reserved_req(fc, req);
247                 else
248                         fuse_request_free(req);
249         }
250 }
251 EXPORT_SYMBOL_GPL(fuse_put_request);
252
253 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
254 {
255         unsigned nbytes = 0;
256         unsigned i;
257
258         for (i = 0; i < numargs; i++)
259                 nbytes += args[i].size;
260
261         return nbytes;
262 }
263
264 static u64 fuse_get_unique(struct fuse_conn *fc)
265 {
266         fc->reqctr++;
267         /* zero is special */
268         if (fc->reqctr == 0)
269                 fc->reqctr = 1;
270
271         return fc->reqctr;
272 }
273
274 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
275 {
276         req->in.h.len = sizeof(struct fuse_in_header) +
277                 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
278         list_add_tail(&req->list, &fc->pending);
279         req->state = FUSE_REQ_PENDING;
280         if (!req->waiting) {
281                 req->waiting = 1;
282                 atomic_inc(&fc->num_waiting);
283         }
284         wake_up(&fc->waitq);
285         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
286 }
287
288 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
289                        u64 nodeid, u64 nlookup)
290 {
291         forget->forget_one.nodeid = nodeid;
292         forget->forget_one.nlookup = nlookup;
293
294         spin_lock(&fc->lock);
295         if (fc->connected) {
296                 fc->forget_list_tail->next = forget;
297                 fc->forget_list_tail = forget;
298                 wake_up(&fc->waitq);
299                 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
300         } else {
301                 kfree(forget);
302         }
303         spin_unlock(&fc->lock);
304 }
305
306 static void flush_bg_queue(struct fuse_conn *fc)
307 {
308         while (fc->active_background < fc->max_background &&
309                !list_empty(&fc->bg_queue)) {
310                 struct fuse_req *req;
311
312                 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
313                 list_del(&req->list);
314                 fc->active_background++;
315                 req->in.h.unique = fuse_get_unique(fc);
316                 queue_request(fc, req);
317         }
318 }
319
320 /*
321  * This function is called when a request is finished.  Either a reply
322  * has arrived or it was aborted (and not yet sent) or some error
323  * occurred during communication with userspace, or the device file
324  * was closed.  The requester thread is woken up (if still waiting),
325  * the 'end' callback is called if given, else the reference to the
326  * request is released
327  *
328  * Called with fc->lock, unlocks it
329  */
330 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
331 __releases(fc->lock)
332 {
333         void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
334         req->end = NULL;
335         list_del(&req->list);
336         list_del(&req->intr_entry);
337         req->state = FUSE_REQ_FINISHED;
338         if (req->background) {
339                 if (fc->num_background == fc->max_background) {
340                         fc->blocked = 0;
341                         wake_up_all(&fc->blocked_waitq);
342                 }
343                 if (fc->num_background == fc->congestion_threshold &&
344                     fc->connected && fc->bdi_initialized) {
345                         clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
346                         clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
347                 }
348                 fc->num_background--;
349                 fc->active_background--;
350                 flush_bg_queue(fc);
351         }
352         spin_unlock(&fc->lock);
353         wake_up(&req->waitq);
354         if (end)
355                 end(fc, req);
356         fuse_put_request(fc, req);
357 }
358
359 static void wait_answer_interruptible(struct fuse_conn *fc,
360                                       struct fuse_req *req)
361 __releases(fc->lock)
362 __acquires(fc->lock)
363 {
364         if (signal_pending(current))
365                 return;
366
367         spin_unlock(&fc->lock);
368         wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
369         spin_lock(&fc->lock);
370 }
371
372 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
373 {
374         list_add_tail(&req->intr_entry, &fc->interrupts);
375         wake_up(&fc->waitq);
376         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
377 }
378
379 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
380 __releases(fc->lock)
381 __acquires(fc->lock)
382 {
383         if (!fc->no_interrupt) {
384                 /* Any signal may interrupt this */
385                 wait_answer_interruptible(fc, req);
386
387                 if (req->aborted)
388                         goto aborted;
389                 if (req->state == FUSE_REQ_FINISHED)
390                         return;
391
392                 req->interrupted = 1;
393                 if (req->state == FUSE_REQ_SENT)
394                         queue_interrupt(fc, req);
395         }
396
397         if (!req->force) {
398                 sigset_t oldset;
399
400                 /* Only fatal signals may interrupt this */
401                 block_sigs(&oldset);
402                 wait_answer_interruptible(fc, req);
403                 restore_sigs(&oldset);
404
405                 if (req->aborted)
406                         goto aborted;
407                 if (req->state == FUSE_REQ_FINISHED)
408                         return;
409
410                 /* Request is not yet in userspace, bail out */
411                 if (req->state == FUSE_REQ_PENDING) {
412                         list_del(&req->list);
413                         __fuse_put_request(req);
414                         req->out.h.error = -EINTR;
415                         return;
416                 }
417         }
418
419         /*
420          * Either request is already in userspace, or it was forced.
421          * Wait it out.
422          */
423         spin_unlock(&fc->lock);
424         wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
425         spin_lock(&fc->lock);
426
427         if (!req->aborted)
428                 return;
429
430  aborted:
431         BUG_ON(req->state != FUSE_REQ_FINISHED);
432         if (req->locked) {
433                 /* This is uninterruptible sleep, because data is
434                    being copied to/from the buffers of req.  During
435                    locked state, there mustn't be any filesystem
436                    operation (e.g. page fault), since that could lead
437                    to deadlock */
438                 spin_unlock(&fc->lock);
439                 wait_event(req->waitq, !req->locked);
440                 spin_lock(&fc->lock);
441         }
442 }
443
444 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
445 {
446         spin_lock(&fc->lock);
447         if (!fc->connected)
448                 req->out.h.error = -ENOTCONN;
449         else if (fc->conn_error)
450                 req->out.h.error = -ECONNREFUSED;
451         else {
452                 req->in.h.unique = fuse_get_unique(fc);
453                 queue_request(fc, req);
454                 /* acquire extra reference, since request is still needed
455                    after request_end() */
456                 __fuse_get_request(req);
457
458                 request_wait_answer(fc, req);
459         }
460         spin_unlock(&fc->lock);
461 }
462
463 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
464 {
465         req->isreply = 1;
466         __fuse_request_send(fc, req);
467 }
468 EXPORT_SYMBOL_GPL(fuse_request_send);
469
470 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
471                                             struct fuse_req *req)
472 {
473         req->background = 1;
474         fc->num_background++;
475         if (fc->num_background == fc->max_background)
476                 fc->blocked = 1;
477         if (fc->num_background == fc->congestion_threshold &&
478             fc->bdi_initialized) {
479                 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
480                 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
481         }
482         list_add_tail(&req->list, &fc->bg_queue);
483         flush_bg_queue(fc);
484 }
485
486 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
487 {
488         spin_lock(&fc->lock);
489         if (fc->connected) {
490                 fuse_request_send_nowait_locked(fc, req);
491                 spin_unlock(&fc->lock);
492         } else {
493                 req->out.h.error = -ENOTCONN;
494                 request_end(fc, req);
495         }
496 }
497
498 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
499 {
500         req->isreply = 1;
501         fuse_request_send_nowait(fc, req);
502 }
503 EXPORT_SYMBOL_GPL(fuse_request_send_background);
504
505 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
506                                           struct fuse_req *req, u64 unique)
507 {
508         int err = -ENODEV;
509
510         req->isreply = 0;
511         req->in.h.unique = unique;
512         spin_lock(&fc->lock);
513         if (fc->connected) {
514                 queue_request(fc, req);
515                 err = 0;
516         }
517         spin_unlock(&fc->lock);
518
519         return err;
520 }
521
522 /*
523  * Called under fc->lock
524  *
525  * fc->connected must have been checked previously
526  */
527 void fuse_request_send_background_locked(struct fuse_conn *fc,
528                                          struct fuse_req *req)
529 {
530         req->isreply = 1;
531         fuse_request_send_nowait_locked(fc, req);
532 }
533
534 void fuse_force_forget(struct file *file, u64 nodeid)
535 {
536         struct inode *inode = file_inode(file);
537         struct fuse_conn *fc = get_fuse_conn(inode);
538         struct fuse_req *req;
539         struct fuse_forget_in inarg;
540
541         memset(&inarg, 0, sizeof(inarg));
542         inarg.nlookup = 1;
543         req = fuse_get_req_nofail_nopages(fc, file);
544         req->in.h.opcode = FUSE_FORGET;
545         req->in.h.nodeid = nodeid;
546         req->in.numargs = 1;
547         req->in.args[0].size = sizeof(inarg);
548         req->in.args[0].value = &inarg;
549         req->isreply = 0;
550         __fuse_request_send(fc, req);
551         /* ignore errors */
552         fuse_put_request(fc, req);
553 }
554
555 /*
556  * Lock the request.  Up to the next unlock_request() there mustn't be
557  * anything that could cause a page-fault.  If the request was already
558  * aborted bail out.
559  */
560 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
561 {
562         int err = 0;
563         if (req) {
564                 spin_lock(&fc->lock);
565                 if (req->aborted)
566                         err = -ENOENT;
567                 else
568                         req->locked = 1;
569                 spin_unlock(&fc->lock);
570         }
571         return err;
572 }
573
574 /*
575  * Unlock request.  If it was aborted during being locked, the
576  * requester thread is currently waiting for it to be unlocked, so
577  * wake it up.
578  */
579 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
580 {
581         if (req) {
582                 spin_lock(&fc->lock);
583                 req->locked = 0;
584                 if (req->aborted)
585                         wake_up(&req->waitq);
586                 spin_unlock(&fc->lock);
587         }
588 }
589
590 struct fuse_copy_state {
591         struct fuse_conn *fc;
592         int write;
593         struct fuse_req *req;
594         const struct iovec *iov;
595         struct pipe_buffer *pipebufs;
596         struct pipe_buffer *currbuf;
597         struct pipe_inode_info *pipe;
598         unsigned long nr_segs;
599         unsigned long seglen;
600         unsigned long addr;
601         struct page *pg;
602         void *mapaddr;
603         void *buf;
604         unsigned len;
605         unsigned move_pages:1;
606 };
607
608 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
609                            int write,
610                            const struct iovec *iov, unsigned long nr_segs)
611 {
612         memset(cs, 0, sizeof(*cs));
613         cs->fc = fc;
614         cs->write = write;
615         cs->iov = iov;
616         cs->nr_segs = nr_segs;
617 }
618
619 /* Unmap and put previous page of userspace buffer */
620 static void fuse_copy_finish(struct fuse_copy_state *cs)
621 {
622         if (cs->currbuf) {
623                 struct pipe_buffer *buf = cs->currbuf;
624
625                 if (!cs->write) {
626                         buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
627                 } else {
628                         kunmap(buf->page);
629                         buf->len = PAGE_SIZE - cs->len;
630                 }
631                 cs->currbuf = NULL;
632                 cs->mapaddr = NULL;
633         } else if (cs->mapaddr) {
634                 kunmap(cs->pg);
635                 if (cs->write) {
636                         flush_dcache_page(cs->pg);
637                         set_page_dirty_lock(cs->pg);
638                 }
639                 put_page(cs->pg);
640                 cs->mapaddr = NULL;
641         }
642 }
643
644 /*
645  * Get another pagefull of userspace buffer, and map it to kernel
646  * address space, and lock request
647  */
648 static int fuse_copy_fill(struct fuse_copy_state *cs)
649 {
650         unsigned long offset;
651         int err;
652
653         unlock_request(cs->fc, cs->req);
654         fuse_copy_finish(cs);
655         if (cs->pipebufs) {
656                 struct pipe_buffer *buf = cs->pipebufs;
657
658                 if (!cs->write) {
659                         err = buf->ops->confirm(cs->pipe, buf);
660                         if (err)
661                                 return err;
662
663                         BUG_ON(!cs->nr_segs);
664                         cs->currbuf = buf;
665                         cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
666                         cs->len = buf->len;
667                         cs->buf = cs->mapaddr + buf->offset;
668                         cs->pipebufs++;
669                         cs->nr_segs--;
670                 } else {
671                         struct page *page;
672
673                         if (cs->nr_segs == cs->pipe->buffers)
674                                 return -EIO;
675
676                         page = alloc_page(GFP_HIGHUSER);
677                         if (!page)
678                                 return -ENOMEM;
679
680                         buf->page = page;
681                         buf->offset = 0;
682                         buf->len = 0;
683
684                         cs->currbuf = buf;
685                         cs->mapaddr = kmap(page);
686                         cs->buf = cs->mapaddr;
687                         cs->len = PAGE_SIZE;
688                         cs->pipebufs++;
689                         cs->nr_segs++;
690                 }
691         } else {
692                 if (!cs->seglen) {
693                         BUG_ON(!cs->nr_segs);
694                         cs->seglen = cs->iov[0].iov_len;
695                         cs->addr = (unsigned long) cs->iov[0].iov_base;
696                         cs->iov++;
697                         cs->nr_segs--;
698                 }
699                 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
700                 if (err < 0)
701                         return err;
702                 BUG_ON(err != 1);
703                 offset = cs->addr % PAGE_SIZE;
704                 cs->mapaddr = kmap(cs->pg);
705                 cs->buf = cs->mapaddr + offset;
706                 cs->len = min(PAGE_SIZE - offset, cs->seglen);
707                 cs->seglen -= cs->len;
708                 cs->addr += cs->len;
709         }
710
711         return lock_request(cs->fc, cs->req);
712 }
713
714 /* Do as much copy to/from userspace buffer as we can */
715 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
716 {
717         unsigned ncpy = min(*size, cs->len);
718         if (val) {
719                 if (cs->write)
720                         memcpy(cs->buf, *val, ncpy);
721                 else
722                         memcpy(*val, cs->buf, ncpy);
723                 *val += ncpy;
724         }
725         *size -= ncpy;
726         cs->len -= ncpy;
727         cs->buf += ncpy;
728         return ncpy;
729 }
730
731 static int fuse_check_page(struct page *page)
732 {
733         if (page_mapcount(page) ||
734             page->mapping != NULL ||
735             page_count(page) != 1 ||
736             (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
737              ~(1 << PG_locked |
738                1 << PG_referenced |
739                1 << PG_uptodate |
740                1 << PG_lru |
741                1 << PG_active |
742                1 << PG_reclaim))) {
743                 printk(KERN_WARNING "fuse: trying to steal weird page\n");
744                 printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
745                 return 1;
746         }
747         return 0;
748 }
749
750 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
751 {
752         int err;
753         struct page *oldpage = *pagep;
754         struct page *newpage;
755         struct pipe_buffer *buf = cs->pipebufs;
756
757         unlock_request(cs->fc, cs->req);
758         fuse_copy_finish(cs);
759
760         err = buf->ops->confirm(cs->pipe, buf);
761         if (err)
762                 return err;
763
764         BUG_ON(!cs->nr_segs);
765         cs->currbuf = buf;
766         cs->len = buf->len;
767         cs->pipebufs++;
768         cs->nr_segs--;
769
770         if (cs->len != PAGE_SIZE)
771                 goto out_fallback;
772
773         if (buf->ops->steal(cs->pipe, buf) != 0)
774                 goto out_fallback;
775
776         newpage = buf->page;
777
778         if (WARN_ON(!PageUptodate(newpage)))
779                 return -EIO;
780
781         ClearPageMappedToDisk(newpage);
782
783         if (fuse_check_page(newpage) != 0)
784                 goto out_fallback_unlock;
785
786         /*
787          * This is a new and locked page, it shouldn't be mapped or
788          * have any special flags on it
789          */
790         if (WARN_ON(page_mapped(oldpage)))
791                 goto out_fallback_unlock;
792         if (WARN_ON(page_has_private(oldpage)))
793                 goto out_fallback_unlock;
794         if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
795                 goto out_fallback_unlock;
796         if (WARN_ON(PageMlocked(oldpage)))
797                 goto out_fallback_unlock;
798
799         err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
800         if (err) {
801                 unlock_page(newpage);
802                 return err;
803         }
804
805         page_cache_get(newpage);
806
807         if (!(buf->flags & PIPE_BUF_FLAG_LRU))
808                 lru_cache_add_file(newpage);
809
810         err = 0;
811         spin_lock(&cs->fc->lock);
812         if (cs->req->aborted)
813                 err = -ENOENT;
814         else
815                 *pagep = newpage;
816         spin_unlock(&cs->fc->lock);
817
818         if (err) {
819                 unlock_page(newpage);
820                 page_cache_release(newpage);
821                 return err;
822         }
823
824         unlock_page(oldpage);
825         page_cache_release(oldpage);
826         cs->len = 0;
827
828         return 0;
829
830 out_fallback_unlock:
831         unlock_page(newpage);
832 out_fallback:
833         cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
834         cs->buf = cs->mapaddr + buf->offset;
835
836         err = lock_request(cs->fc, cs->req);
837         if (err)
838                 return err;
839
840         return 1;
841 }
842
843 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
844                          unsigned offset, unsigned count)
845 {
846         struct pipe_buffer *buf;
847
848         if (cs->nr_segs == cs->pipe->buffers)
849                 return -EIO;
850
851         unlock_request(cs->fc, cs->req);
852         fuse_copy_finish(cs);
853
854         buf = cs->pipebufs;
855         page_cache_get(page);
856         buf->page = page;
857         buf->offset = offset;
858         buf->len = count;
859
860         cs->pipebufs++;
861         cs->nr_segs++;
862         cs->len = 0;
863
864         return 0;
865 }
866
867 /*
868  * Copy a page in the request to/from the userspace buffer.  Must be
869  * done atomically
870  */
871 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
872                           unsigned offset, unsigned count, int zeroing)
873 {
874         int err;
875         struct page *page = *pagep;
876
877         if (page && zeroing && count < PAGE_SIZE)
878                 clear_highpage(page);
879
880         while (count) {
881                 if (cs->write && cs->pipebufs && page) {
882                         return fuse_ref_page(cs, page, offset, count);
883                 } else if (!cs->len) {
884                         if (cs->move_pages && page &&
885                             offset == 0 && count == PAGE_SIZE) {
886                                 err = fuse_try_move_page(cs, pagep);
887                                 if (err <= 0)
888                                         return err;
889                         } else {
890                                 err = fuse_copy_fill(cs);
891                                 if (err)
892                                         return err;
893                         }
894                 }
895                 if (page) {
896                         void *mapaddr = kmap_atomic(page);
897                         void *buf = mapaddr + offset;
898                         offset += fuse_copy_do(cs, &buf, &count);
899                         kunmap_atomic(mapaddr);
900                 } else
901                         offset += fuse_copy_do(cs, NULL, &count);
902         }
903         if (page && !cs->write)
904                 flush_dcache_page(page);
905         return 0;
906 }
907
908 /* Copy pages in the request to/from userspace buffer */
909 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
910                            int zeroing)
911 {
912         unsigned i;
913         struct fuse_req *req = cs->req;
914
915         for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
916                 int err;
917                 unsigned offset = req->page_descs[i].offset;
918                 unsigned count = min(nbytes, req->page_descs[i].length);
919
920                 err = fuse_copy_page(cs, &req->pages[i], offset, count,
921                                      zeroing);
922                 if (err)
923                         return err;
924
925                 nbytes -= count;
926         }
927         return 0;
928 }
929
930 /* Copy a single argument in the request to/from userspace buffer */
931 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
932 {
933         while (size) {
934                 if (!cs->len) {
935                         int err = fuse_copy_fill(cs);
936                         if (err)
937                                 return err;
938                 }
939                 fuse_copy_do(cs, &val, &size);
940         }
941         return 0;
942 }
943
944 /* Copy request arguments to/from userspace buffer */
945 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
946                           unsigned argpages, struct fuse_arg *args,
947                           int zeroing)
948 {
949         int err = 0;
950         unsigned i;
951
952         for (i = 0; !err && i < numargs; i++)  {
953                 struct fuse_arg *arg = &args[i];
954                 if (i == numargs - 1 && argpages)
955                         err = fuse_copy_pages(cs, arg->size, zeroing);
956                 else
957                         err = fuse_copy_one(cs, arg->value, arg->size);
958         }
959         return err;
960 }
961
962 static int forget_pending(struct fuse_conn *fc)
963 {
964         return fc->forget_list_head.next != NULL;
965 }
966
967 static int request_pending(struct fuse_conn *fc)
968 {
969         return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
970                 forget_pending(fc);
971 }
972
973 /* Wait until a request is available on the pending list */
974 static void request_wait(struct fuse_conn *fc)
975 __releases(fc->lock)
976 __acquires(fc->lock)
977 {
978         DECLARE_WAITQUEUE(wait, current);
979
980         add_wait_queue_exclusive(&fc->waitq, &wait);
981         while (fc->connected && !request_pending(fc)) {
982                 set_current_state(TASK_INTERRUPTIBLE);
983                 if (signal_pending(current))
984                         break;
985
986                 spin_unlock(&fc->lock);
987                 schedule();
988                 spin_lock(&fc->lock);
989         }
990         set_current_state(TASK_RUNNING);
991         remove_wait_queue(&fc->waitq, &wait);
992 }
993
994 /*
995  * Transfer an interrupt request to userspace
996  *
997  * Unlike other requests this is assembled on demand, without a need
998  * to allocate a separate fuse_req structure.
999  *
1000  * Called with fc->lock held, releases it
1001  */
1002 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
1003                                size_t nbytes, struct fuse_req *req)
1004 __releases(fc->lock)
1005 {
1006         struct fuse_in_header ih;
1007         struct fuse_interrupt_in arg;
1008         unsigned reqsize = sizeof(ih) + sizeof(arg);
1009         int err;
1010
1011         list_del_init(&req->intr_entry);
1012         req->intr_unique = fuse_get_unique(fc);
1013         memset(&ih, 0, sizeof(ih));
1014         memset(&arg, 0, sizeof(arg));
1015         ih.len = reqsize;
1016         ih.opcode = FUSE_INTERRUPT;
1017         ih.unique = req->intr_unique;
1018         arg.unique = req->in.h.unique;
1019
1020         spin_unlock(&fc->lock);
1021         if (nbytes < reqsize)
1022                 return -EINVAL;
1023
1024         err = fuse_copy_one(cs, &ih, sizeof(ih));
1025         if (!err)
1026                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1027         fuse_copy_finish(cs);
1028
1029         return err ? err : reqsize;
1030 }
1031
1032 static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
1033                                                unsigned max,
1034                                                unsigned *countp)
1035 {
1036         struct fuse_forget_link *head = fc->forget_list_head.next;
1037         struct fuse_forget_link **newhead = &head;
1038         unsigned count;
1039
1040         for (count = 0; *newhead != NULL && count < max; count++)
1041                 newhead = &(*newhead)->next;
1042
1043         fc->forget_list_head.next = *newhead;
1044         *newhead = NULL;
1045         if (fc->forget_list_head.next == NULL)
1046                 fc->forget_list_tail = &fc->forget_list_head;
1047
1048         if (countp != NULL)
1049                 *countp = count;
1050
1051         return head;
1052 }
1053
1054 static int fuse_read_single_forget(struct fuse_conn *fc,
1055                                    struct fuse_copy_state *cs,
1056                                    size_t nbytes)
1057 __releases(fc->lock)
1058 {
1059         int err;
1060         struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
1061         struct fuse_forget_in arg = {
1062                 .nlookup = forget->forget_one.nlookup,
1063         };
1064         struct fuse_in_header ih = {
1065                 .opcode = FUSE_FORGET,
1066                 .nodeid = forget->forget_one.nodeid,
1067                 .unique = fuse_get_unique(fc),
1068                 .len = sizeof(ih) + sizeof(arg),
1069         };
1070
1071         spin_unlock(&fc->lock);
1072         kfree(forget);
1073         if (nbytes < ih.len)
1074                 return -EINVAL;
1075
1076         err = fuse_copy_one(cs, &ih, sizeof(ih));
1077         if (!err)
1078                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1079         fuse_copy_finish(cs);
1080
1081         if (err)
1082                 return err;
1083
1084         return ih.len;
1085 }
1086
1087 static int fuse_read_batch_forget(struct fuse_conn *fc,
1088                                    struct fuse_copy_state *cs, size_t nbytes)
1089 __releases(fc->lock)
1090 {
1091         int err;
1092         unsigned max_forgets;
1093         unsigned count;
1094         struct fuse_forget_link *head;
1095         struct fuse_batch_forget_in arg = { .count = 0 };
1096         struct fuse_in_header ih = {
1097                 .opcode = FUSE_BATCH_FORGET,
1098                 .unique = fuse_get_unique(fc),
1099                 .len = sizeof(ih) + sizeof(arg),
1100         };
1101
1102         if (nbytes < ih.len) {
1103                 spin_unlock(&fc->lock);
1104                 return -EINVAL;
1105         }
1106
1107         max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1108         head = dequeue_forget(fc, max_forgets, &count);
1109         spin_unlock(&fc->lock);
1110
1111         arg.count = count;
1112         ih.len += count * sizeof(struct fuse_forget_one);
1113         err = fuse_copy_one(cs, &ih, sizeof(ih));
1114         if (!err)
1115                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1116
1117         while (head) {
1118                 struct fuse_forget_link *forget = head;
1119
1120                 if (!err) {
1121                         err = fuse_copy_one(cs, &forget->forget_one,
1122                                             sizeof(forget->forget_one));
1123                 }
1124                 head = forget->next;
1125                 kfree(forget);
1126         }
1127
1128         fuse_copy_finish(cs);
1129
1130         if (err)
1131                 return err;
1132
1133         return ih.len;
1134 }
1135
1136 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1137                             size_t nbytes)
1138 __releases(fc->lock)
1139 {
1140         if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1141                 return fuse_read_single_forget(fc, cs, nbytes);
1142         else
1143                 return fuse_read_batch_forget(fc, cs, nbytes);
1144 }
1145
1146 /*
1147  * Read a single request into the userspace filesystem's buffer.  This
1148  * function waits until a request is available, then removes it from
1149  * the pending list and copies request data to userspace buffer.  If
1150  * no reply is needed (FORGET) or request has been aborted or there
1151  * was an error during the copying then it's finished by calling
1152  * request_end().  Otherwise add it to the processing list, and set
1153  * the 'sent' flag.
1154  */
1155 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1156                                 struct fuse_copy_state *cs, size_t nbytes)
1157 {
1158         int err;
1159         struct fuse_req *req;
1160         struct fuse_in *in;
1161         unsigned reqsize;
1162
1163  restart:
1164         spin_lock(&fc->lock);
1165         err = -EAGAIN;
1166         if ((file->f_flags & O_NONBLOCK) && fc->connected &&
1167             !request_pending(fc))
1168                 goto err_unlock;
1169
1170         request_wait(fc);
1171         err = -ENODEV;
1172         if (!fc->connected)
1173                 goto err_unlock;
1174         err = -ERESTARTSYS;
1175         if (!request_pending(fc))
1176                 goto err_unlock;
1177
1178         if (!list_empty(&fc->interrupts)) {
1179                 req = list_entry(fc->interrupts.next, struct fuse_req,
1180                                  intr_entry);
1181                 return fuse_read_interrupt(fc, cs, nbytes, req);
1182         }
1183
1184         if (forget_pending(fc)) {
1185                 if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
1186                         return fuse_read_forget(fc, cs, nbytes);
1187
1188                 if (fc->forget_batch <= -8)
1189                         fc->forget_batch = 16;
1190         }
1191
1192         req = list_entry(fc->pending.next, struct fuse_req, list);
1193         req->state = FUSE_REQ_READING;
1194         list_move(&req->list, &fc->io);
1195
1196         in = &req->in;
1197         reqsize = in->h.len;
1198         /* If request is too large, reply with an error and restart the read */
1199         if (nbytes < reqsize) {
1200                 req->out.h.error = -EIO;
1201                 /* SETXATTR is special, since it may contain too large data */
1202                 if (in->h.opcode == FUSE_SETXATTR)
1203                         req->out.h.error = -E2BIG;
1204                 request_end(fc, req);
1205                 goto restart;
1206         }
1207         spin_unlock(&fc->lock);
1208         cs->req = req;
1209         err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1210         if (!err)
1211                 err = fuse_copy_args(cs, in->numargs, in->argpages,
1212                                      (struct fuse_arg *) in->args, 0);
1213         fuse_copy_finish(cs);
1214         spin_lock(&fc->lock);
1215         req->locked = 0;
1216         if (req->aborted) {
1217                 request_end(fc, req);
1218                 return -ENODEV;
1219         }
1220         if (err) {
1221                 req->out.h.error = -EIO;
1222                 request_end(fc, req);
1223                 return err;
1224         }
1225         if (!req->isreply)
1226                 request_end(fc, req);
1227         else {
1228                 req->state = FUSE_REQ_SENT;
1229                 list_move_tail(&req->list, &fc->processing);
1230                 if (req->interrupted)
1231                         queue_interrupt(fc, req);
1232                 spin_unlock(&fc->lock);
1233         }
1234         return reqsize;
1235
1236  err_unlock:
1237         spin_unlock(&fc->lock);
1238         return err;
1239 }
1240
1241 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1242                               unsigned long nr_segs, loff_t pos)
1243 {
1244         struct fuse_copy_state cs;
1245         struct file *file = iocb->ki_filp;
1246         struct fuse_conn *fc = fuse_get_conn(file);
1247         if (!fc)
1248                 return -EPERM;
1249
1250         fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1251
1252         return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1253 }
1254
1255 static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1256                                    struct pipe_buffer *buf)
1257 {
1258         return 1;
1259 }
1260
1261 static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1262         .can_merge = 0,
1263         .map = generic_pipe_buf_map,
1264         .unmap = generic_pipe_buf_unmap,
1265         .confirm = generic_pipe_buf_confirm,
1266         .release = generic_pipe_buf_release,
1267         .steal = fuse_dev_pipe_buf_steal,
1268         .get = generic_pipe_buf_get,
1269 };
1270
1271 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1272                                     struct pipe_inode_info *pipe,
1273                                     size_t len, unsigned int flags)
1274 {
1275         int ret;
1276         int page_nr = 0;
1277         int do_wakeup = 0;
1278         struct pipe_buffer *bufs;
1279         struct fuse_copy_state cs;
1280         struct fuse_conn *fc = fuse_get_conn(in);
1281         if (!fc)
1282                 return -EPERM;
1283
1284         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1285         if (!bufs)
1286                 return -ENOMEM;
1287
1288         fuse_copy_init(&cs, fc, 1, NULL, 0);
1289         cs.pipebufs = bufs;
1290         cs.pipe = pipe;
1291         ret = fuse_dev_do_read(fc, in, &cs, len);
1292         if (ret < 0)
1293                 goto out;
1294
1295         ret = 0;
1296         pipe_lock(pipe);
1297
1298         if (!pipe->readers) {
1299                 send_sig(SIGPIPE, current, 0);
1300                 if (!ret)
1301                         ret = -EPIPE;
1302                 goto out_unlock;
1303         }
1304
1305         if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1306                 ret = -EIO;
1307                 goto out_unlock;
1308         }
1309
1310         while (page_nr < cs.nr_segs) {
1311                 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1312                 struct pipe_buffer *buf = pipe->bufs + newbuf;
1313
1314                 buf->page = bufs[page_nr].page;
1315                 buf->offset = bufs[page_nr].offset;
1316                 buf->len = bufs[page_nr].len;
1317                 buf->ops = &fuse_dev_pipe_buf_ops;
1318
1319                 pipe->nrbufs++;
1320                 page_nr++;
1321                 ret += buf->len;
1322
1323                 if (pipe->files)
1324                         do_wakeup = 1;
1325         }
1326
1327 out_unlock:
1328         pipe_unlock(pipe);
1329
1330         if (do_wakeup) {
1331                 smp_mb();
1332                 if (waitqueue_active(&pipe->wait))
1333                         wake_up_interruptible(&pipe->wait);
1334                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1335         }
1336
1337 out:
1338         for (; page_nr < cs.nr_segs; page_nr++)
1339                 page_cache_release(bufs[page_nr].page);
1340
1341         kfree(bufs);
1342         return ret;
1343 }
1344
1345 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1346                             struct fuse_copy_state *cs)
1347 {
1348         struct fuse_notify_poll_wakeup_out outarg;
1349         int err = -EINVAL;
1350
1351         if (size != sizeof(outarg))
1352                 goto err;
1353
1354         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1355         if (err)
1356                 goto err;
1357
1358         fuse_copy_finish(cs);
1359         return fuse_notify_poll_wakeup(fc, &outarg);
1360
1361 err:
1362         fuse_copy_finish(cs);
1363         return err;
1364 }
1365
1366 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1367                                    struct fuse_copy_state *cs)
1368 {
1369         struct fuse_notify_inval_inode_out outarg;
1370         int err = -EINVAL;
1371
1372         if (size != sizeof(outarg))
1373                 goto err;
1374
1375         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1376         if (err)
1377                 goto err;
1378         fuse_copy_finish(cs);
1379
1380         down_read(&fc->killsb);
1381         err = -ENOENT;
1382         if (fc->sb) {
1383                 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1384                                                outarg.off, outarg.len);
1385         }
1386         up_read(&fc->killsb);
1387         return err;
1388
1389 err:
1390         fuse_copy_finish(cs);
1391         return err;
1392 }
1393
1394 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1395                                    struct fuse_copy_state *cs)
1396 {
1397         struct fuse_notify_inval_entry_out outarg;
1398         int err = -ENOMEM;
1399         char *buf;
1400         struct qstr name;
1401
1402         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1403         if (!buf)
1404                 goto err;
1405
1406         err = -EINVAL;
1407         if (size < sizeof(outarg))
1408                 goto err;
1409
1410         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1411         if (err)
1412                 goto err;
1413
1414         err = -ENAMETOOLONG;
1415         if (outarg.namelen > FUSE_NAME_MAX)
1416                 goto err;
1417
1418         err = -EINVAL;
1419         if (size != sizeof(outarg) + outarg.namelen + 1)
1420                 goto err;
1421
1422         name.name = buf;
1423         name.len = outarg.namelen;
1424         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1425         if (err)
1426                 goto err;
1427         fuse_copy_finish(cs);
1428         buf[outarg.namelen] = 0;
1429         name.hash = full_name_hash(name.name, name.len);
1430
1431         down_read(&fc->killsb);
1432         err = -ENOENT;
1433         if (fc->sb)
1434                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1435         up_read(&fc->killsb);
1436         kfree(buf);
1437         return err;
1438
1439 err:
1440         kfree(buf);
1441         fuse_copy_finish(cs);
1442         return err;
1443 }
1444
1445 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1446                               struct fuse_copy_state *cs)
1447 {
1448         struct fuse_notify_delete_out outarg;
1449         int err = -ENOMEM;
1450         char *buf;
1451         struct qstr name;
1452
1453         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1454         if (!buf)
1455                 goto err;
1456
1457         err = -EINVAL;
1458         if (size < sizeof(outarg))
1459                 goto err;
1460
1461         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1462         if (err)
1463                 goto err;
1464
1465         err = -ENAMETOOLONG;
1466         if (outarg.namelen > FUSE_NAME_MAX)
1467                 goto err;
1468
1469         err = -EINVAL;
1470         if (size != sizeof(outarg) + outarg.namelen + 1)
1471                 goto err;
1472
1473         name.name = buf;
1474         name.len = outarg.namelen;
1475         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1476         if (err)
1477                 goto err;
1478         fuse_copy_finish(cs);
1479         buf[outarg.namelen] = 0;
1480         name.hash = full_name_hash(name.name, name.len);
1481
1482         down_read(&fc->killsb);
1483         err = -ENOENT;
1484         if (fc->sb)
1485                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1486                                                outarg.child, &name);
1487         up_read(&fc->killsb);
1488         kfree(buf);
1489         return err;
1490
1491 err:
1492         kfree(buf);
1493         fuse_copy_finish(cs);
1494         return err;
1495 }
1496
1497 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1498                              struct fuse_copy_state *cs)
1499 {
1500         struct fuse_notify_store_out outarg;
1501         struct inode *inode;
1502         struct address_space *mapping;
1503         u64 nodeid;
1504         int err;
1505         pgoff_t index;
1506         unsigned int offset;
1507         unsigned int num;
1508         loff_t file_size;
1509         loff_t end;
1510
1511         err = -EINVAL;
1512         if (size < sizeof(outarg))
1513                 goto out_finish;
1514
1515         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1516         if (err)
1517                 goto out_finish;
1518
1519         err = -EINVAL;
1520         if (size - sizeof(outarg) != outarg.size)
1521                 goto out_finish;
1522
1523         nodeid = outarg.nodeid;
1524
1525         down_read(&fc->killsb);
1526
1527         err = -ENOENT;
1528         if (!fc->sb)
1529                 goto out_up_killsb;
1530
1531         inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1532         if (!inode)
1533                 goto out_up_killsb;
1534
1535         mapping = inode->i_mapping;
1536         index = outarg.offset >> PAGE_CACHE_SHIFT;
1537         offset = outarg.offset & ~PAGE_CACHE_MASK;
1538         file_size = i_size_read(inode);
1539         end = outarg.offset + outarg.size;
1540         if (end > file_size) {
1541                 file_size = end;
1542                 fuse_write_update_size(inode, file_size);
1543         }
1544
1545         num = outarg.size;
1546         while (num) {
1547                 struct page *page;
1548                 unsigned int this_num;
1549
1550                 err = -ENOMEM;
1551                 page = find_or_create_page(mapping, index,
1552                                            mapping_gfp_mask(mapping));
1553                 if (!page)
1554                         goto out_iput;
1555
1556                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1557                 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1558                 if (!err && offset == 0 && (num != 0 || file_size == end))
1559                         SetPageUptodate(page);
1560                 unlock_page(page);
1561                 page_cache_release(page);
1562
1563                 if (err)
1564                         goto out_iput;
1565
1566                 num -= this_num;
1567                 offset = 0;
1568                 index++;
1569         }
1570
1571         err = 0;
1572
1573 out_iput:
1574         iput(inode);
1575 out_up_killsb:
1576         up_read(&fc->killsb);
1577 out_finish:
1578         fuse_copy_finish(cs);
1579         return err;
1580 }
1581
1582 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1583 {
1584         release_pages(req->pages, req->num_pages, 0);
1585 }
1586
1587 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1588                          struct fuse_notify_retrieve_out *outarg)
1589 {
1590         int err;
1591         struct address_space *mapping = inode->i_mapping;
1592         struct fuse_req *req;
1593         pgoff_t index;
1594         loff_t file_size;
1595         unsigned int num;
1596         unsigned int offset;
1597         size_t total_len = 0;
1598         int num_pages;
1599
1600         offset = outarg->offset & ~PAGE_CACHE_MASK;
1601         file_size = i_size_read(inode);
1602
1603         num = outarg->size;
1604         if (outarg->offset > file_size)
1605                 num = 0;
1606         else if (outarg->offset + num > file_size)
1607                 num = file_size - outarg->offset;
1608
1609         num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1610         num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1611
1612         req = fuse_get_req(fc, num_pages);
1613         if (IS_ERR(req))
1614                 return PTR_ERR(req);
1615
1616         req->in.h.opcode = FUSE_NOTIFY_REPLY;
1617         req->in.h.nodeid = outarg->nodeid;
1618         req->in.numargs = 2;
1619         req->in.argpages = 1;
1620         req->page_descs[0].offset = offset;
1621         req->end = fuse_retrieve_end;
1622
1623         index = outarg->offset >> PAGE_CACHE_SHIFT;
1624
1625         while (num && req->num_pages < num_pages) {
1626                 struct page *page;
1627                 unsigned int this_num;
1628
1629                 page = find_get_page(mapping, index);
1630                 if (!page)
1631                         break;
1632
1633                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1634                 req->pages[req->num_pages] = page;
1635                 req->page_descs[req->num_pages].length = this_num;
1636                 req->num_pages++;
1637
1638                 offset = 0;
1639                 num -= this_num;
1640                 total_len += this_num;
1641                 index++;
1642         }
1643         req->misc.retrieve_in.offset = outarg->offset;
1644         req->misc.retrieve_in.size = total_len;
1645         req->in.args[0].size = sizeof(req->misc.retrieve_in);
1646         req->in.args[0].value = &req->misc.retrieve_in;
1647         req->in.args[1].size = total_len;
1648
1649         err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1650         if (err)
1651                 fuse_retrieve_end(fc, req);
1652
1653         return err;
1654 }
1655
1656 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1657                                 struct fuse_copy_state *cs)
1658 {
1659         struct fuse_notify_retrieve_out outarg;
1660         struct inode *inode;
1661         int err;
1662
1663         err = -EINVAL;
1664         if (size != sizeof(outarg))
1665                 goto copy_finish;
1666
1667         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1668         if (err)
1669                 goto copy_finish;
1670
1671         fuse_copy_finish(cs);
1672
1673         down_read(&fc->killsb);
1674         err = -ENOENT;
1675         if (fc->sb) {
1676                 u64 nodeid = outarg.nodeid;
1677
1678                 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1679                 if (inode) {
1680                         err = fuse_retrieve(fc, inode, &outarg);
1681                         iput(inode);
1682                 }
1683         }
1684         up_read(&fc->killsb);
1685
1686         return err;
1687
1688 copy_finish:
1689         fuse_copy_finish(cs);
1690         return err;
1691 }
1692
1693 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1694                        unsigned int size, struct fuse_copy_state *cs)
1695 {
1696         switch (code) {
1697         case FUSE_NOTIFY_POLL:
1698                 return fuse_notify_poll(fc, size, cs);
1699
1700         case FUSE_NOTIFY_INVAL_INODE:
1701                 return fuse_notify_inval_inode(fc, size, cs);
1702
1703         case FUSE_NOTIFY_INVAL_ENTRY:
1704                 return fuse_notify_inval_entry(fc, size, cs);
1705
1706         case FUSE_NOTIFY_STORE:
1707                 return fuse_notify_store(fc, size, cs);
1708
1709         case FUSE_NOTIFY_RETRIEVE:
1710                 return fuse_notify_retrieve(fc, size, cs);
1711
1712         case FUSE_NOTIFY_DELETE:
1713                 return fuse_notify_delete(fc, size, cs);
1714
1715         default:
1716                 fuse_copy_finish(cs);
1717                 return -EINVAL;
1718         }
1719 }
1720
1721 /* Look up request on processing list by unique ID */
1722 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1723 {
1724         struct list_head *entry;
1725
1726         list_for_each(entry, &fc->processing) {
1727                 struct fuse_req *req;
1728                 req = list_entry(entry, struct fuse_req, list);
1729                 if (req->in.h.unique == unique || req->intr_unique == unique)
1730                         return req;
1731         }
1732         return NULL;
1733 }
1734
1735 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1736                          unsigned nbytes)
1737 {
1738         unsigned reqsize = sizeof(struct fuse_out_header);
1739
1740         if (out->h.error)
1741                 return nbytes != reqsize ? -EINVAL : 0;
1742
1743         reqsize += len_args(out->numargs, out->args);
1744
1745         if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1746                 return -EINVAL;
1747         else if (reqsize > nbytes) {
1748                 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1749                 unsigned diffsize = reqsize - nbytes;
1750                 if (diffsize > lastarg->size)
1751                         return -EINVAL;
1752                 lastarg->size -= diffsize;
1753         }
1754         return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1755                               out->page_zeroing);
1756 }
1757
1758 /*
1759  * Write a single reply to a request.  First the header is copied from
1760  * the write buffer.  The request is then searched on the processing
1761  * list by the unique ID found in the header.  If found, then remove
1762  * it from the list and copy the rest of the buffer to the request.
1763  * The request is finished by calling request_end()
1764  */
1765 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1766                                  struct fuse_copy_state *cs, size_t nbytes)
1767 {
1768         int err;
1769         struct fuse_req *req;
1770         struct fuse_out_header oh;
1771
1772         if (nbytes < sizeof(struct fuse_out_header))
1773                 return -EINVAL;
1774
1775         err = fuse_copy_one(cs, &oh, sizeof(oh));
1776         if (err)
1777                 goto err_finish;
1778
1779         err = -EINVAL;
1780         if (oh.len != nbytes)
1781                 goto err_finish;
1782
1783         /*
1784          * Zero oh.unique indicates unsolicited notification message
1785          * and error contains notification code.
1786          */
1787         if (!oh.unique) {
1788                 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1789                 return err ? err : nbytes;
1790         }
1791
1792         err = -EINVAL;
1793         if (oh.error <= -1000 || oh.error > 0)
1794                 goto err_finish;
1795
1796         spin_lock(&fc->lock);
1797         err = -ENOENT;
1798         if (!fc->connected)
1799                 goto err_unlock;
1800
1801         req = request_find(fc, oh.unique);
1802         if (!req)
1803                 goto err_unlock;
1804
1805         if (req->aborted) {
1806                 spin_unlock(&fc->lock);
1807                 fuse_copy_finish(cs);
1808                 spin_lock(&fc->lock);
1809                 request_end(fc, req);
1810                 return -ENOENT;
1811         }
1812         /* Is it an interrupt reply? */
1813         if (req->intr_unique == oh.unique) {
1814                 err = -EINVAL;
1815                 if (nbytes != sizeof(struct fuse_out_header))
1816                         goto err_unlock;
1817
1818                 if (oh.error == -ENOSYS)
1819                         fc->no_interrupt = 1;
1820                 else if (oh.error == -EAGAIN)
1821                         queue_interrupt(fc, req);
1822
1823                 spin_unlock(&fc->lock);
1824                 fuse_copy_finish(cs);
1825                 return nbytes;
1826         }
1827
1828         req->state = FUSE_REQ_WRITING;
1829         list_move(&req->list, &fc->io);
1830         req->out.h = oh;
1831         req->locked = 1;
1832         cs->req = req;
1833         if (!req->out.page_replace)
1834                 cs->move_pages = 0;
1835         spin_unlock(&fc->lock);
1836
1837         err = copy_out_args(cs, &req->out, nbytes);
1838         fuse_copy_finish(cs);
1839
1840         spin_lock(&fc->lock);
1841         req->locked = 0;
1842         if (!err) {
1843                 if (req->aborted)
1844                         err = -ENOENT;
1845         } else if (!req->aborted)
1846                 req->out.h.error = -EIO;
1847         request_end(fc, req);
1848
1849         return err ? err : nbytes;
1850
1851  err_unlock:
1852         spin_unlock(&fc->lock);
1853  err_finish:
1854         fuse_copy_finish(cs);
1855         return err;
1856 }
1857
1858 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1859                               unsigned long nr_segs, loff_t pos)
1860 {
1861         struct fuse_copy_state cs;
1862         struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1863         if (!fc)
1864                 return -EPERM;
1865
1866         fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1867
1868         return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1869 }
1870
1871 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1872                                      struct file *out, loff_t *ppos,
1873                                      size_t len, unsigned int flags)
1874 {
1875         unsigned nbuf;
1876         unsigned idx;
1877         struct pipe_buffer *bufs;
1878         struct fuse_copy_state cs;
1879         struct fuse_conn *fc;
1880         size_t rem;
1881         ssize_t ret;
1882
1883         fc = fuse_get_conn(out);
1884         if (!fc)
1885                 return -EPERM;
1886
1887         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1888         if (!bufs)
1889                 return -ENOMEM;
1890
1891         pipe_lock(pipe);
1892         nbuf = 0;
1893         rem = 0;
1894         for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1895                 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1896
1897         ret = -EINVAL;
1898         if (rem < len) {
1899                 pipe_unlock(pipe);
1900                 goto out;
1901         }
1902
1903         rem = len;
1904         while (rem) {
1905                 struct pipe_buffer *ibuf;
1906                 struct pipe_buffer *obuf;
1907
1908                 BUG_ON(nbuf >= pipe->buffers);
1909                 BUG_ON(!pipe->nrbufs);
1910                 ibuf = &pipe->bufs[pipe->curbuf];
1911                 obuf = &bufs[nbuf];
1912
1913                 if (rem >= ibuf->len) {
1914                         *obuf = *ibuf;
1915                         ibuf->ops = NULL;
1916                         pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1917                         pipe->nrbufs--;
1918                 } else {
1919                         ibuf->ops->get(pipe, ibuf);
1920                         *obuf = *ibuf;
1921                         obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1922                         obuf->len = rem;
1923                         ibuf->offset += obuf->len;
1924                         ibuf->len -= obuf->len;
1925                 }
1926                 nbuf++;
1927                 rem -= obuf->len;
1928         }
1929         pipe_unlock(pipe);
1930
1931         fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1932         cs.pipebufs = bufs;
1933         cs.pipe = pipe;
1934
1935         if (flags & SPLICE_F_MOVE)
1936                 cs.move_pages = 1;
1937
1938         ret = fuse_dev_do_write(fc, &cs, len);
1939
1940         for (idx = 0; idx < nbuf; idx++) {
1941                 struct pipe_buffer *buf = &bufs[idx];
1942                 buf->ops->release(pipe, buf);
1943         }
1944 out:
1945         kfree(bufs);
1946         return ret;
1947 }
1948
1949 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1950 {
1951         unsigned mask = POLLOUT | POLLWRNORM;
1952         struct fuse_conn *fc = fuse_get_conn(file);
1953         if (!fc)
1954                 return POLLERR;
1955
1956         poll_wait(file, &fc->waitq, wait);
1957
1958         spin_lock(&fc->lock);
1959         if (!fc->connected)
1960                 mask = POLLERR;
1961         else if (request_pending(fc))
1962                 mask |= POLLIN | POLLRDNORM;
1963         spin_unlock(&fc->lock);
1964
1965         return mask;
1966 }
1967
1968 /*
1969  * Abort all requests on the given list (pending or processing)
1970  *
1971  * This function releases and reacquires fc->lock
1972  */
1973 static void end_requests(struct fuse_conn *fc, struct list_head *head)
1974 __releases(fc->lock)
1975 __acquires(fc->lock)
1976 {
1977         while (!list_empty(head)) {
1978                 struct fuse_req *req;
1979                 req = list_entry(head->next, struct fuse_req, list);
1980                 req->out.h.error = -ECONNABORTED;
1981                 request_end(fc, req);
1982                 spin_lock(&fc->lock);
1983         }
1984 }
1985
1986 /*
1987  * Abort requests under I/O
1988  *
1989  * The requests are set to aborted and finished, and the request
1990  * waiter is woken up.  This will make request_wait_answer() wait
1991  * until the request is unlocked and then return.
1992  *
1993  * If the request is asynchronous, then the end function needs to be
1994  * called after waiting for the request to be unlocked (if it was
1995  * locked).
1996  */
1997 static void end_io_requests(struct fuse_conn *fc)
1998 __releases(fc->lock)
1999 __acquires(fc->lock)
2000 {
2001         while (!list_empty(&fc->io)) {
2002                 struct fuse_req *req =
2003                         list_entry(fc->io.next, struct fuse_req, list);
2004                 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
2005
2006                 req->aborted = 1;
2007                 req->out.h.error = -ECONNABORTED;
2008                 req->state = FUSE_REQ_FINISHED;
2009                 list_del_init(&req->list);
2010                 wake_up(&req->waitq);
2011                 if (end) {
2012                         req->end = NULL;
2013                         __fuse_get_request(req);
2014                         spin_unlock(&fc->lock);
2015                         wait_event(req->waitq, !req->locked);
2016                         end(fc, req);
2017                         fuse_put_request(fc, req);
2018                         spin_lock(&fc->lock);
2019                 }
2020         }
2021 }
2022
2023 static void end_queued_requests(struct fuse_conn *fc)
2024 __releases(fc->lock)
2025 __acquires(fc->lock)
2026 {
2027         fc->max_background = UINT_MAX;
2028         flush_bg_queue(fc);
2029         end_requests(fc, &fc->pending);
2030         end_requests(fc, &fc->processing);
2031         while (forget_pending(fc))
2032                 kfree(dequeue_forget(fc, 1, NULL));
2033 }
2034
2035 static void end_polls(struct fuse_conn *fc)
2036 {
2037         struct rb_node *p;
2038
2039         p = rb_first(&fc->polled_files);
2040
2041         while (p) {
2042                 struct fuse_file *ff;
2043                 ff = rb_entry(p, struct fuse_file, polled_node);
2044                 wake_up_interruptible_all(&ff->poll_wait);
2045
2046                 p = rb_next(p);
2047         }
2048 }
2049
2050 /*
2051  * Abort all requests.
2052  *
2053  * Emergency exit in case of a malicious or accidental deadlock, or
2054  * just a hung filesystem.
2055  *
2056  * The same effect is usually achievable through killing the
2057  * filesystem daemon and all users of the filesystem.  The exception
2058  * is the combination of an asynchronous request and the tricky
2059  * deadlock (see Documentation/filesystems/fuse.txt).
2060  *
2061  * During the aborting, progression of requests from the pending and
2062  * processing lists onto the io list, and progression of new requests
2063  * onto the pending list is prevented by req->connected being false.
2064  *
2065  * Progression of requests under I/O to the processing list is
2066  * prevented by the req->aborted flag being true for these requests.
2067  * For this reason requests on the io list must be aborted first.
2068  */
2069 void fuse_abort_conn(struct fuse_conn *fc)
2070 {
2071         spin_lock(&fc->lock);
2072         if (fc->connected) {
2073                 fc->connected = 0;
2074                 fc->blocked = 0;
2075                 end_io_requests(fc);
2076                 end_queued_requests(fc);
2077                 end_polls(fc);
2078                 wake_up_all(&fc->waitq);
2079                 wake_up_all(&fc->blocked_waitq);
2080                 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
2081         }
2082         spin_unlock(&fc->lock);
2083 }
2084 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2085
2086 int fuse_dev_release(struct inode *inode, struct file *file)
2087 {
2088         struct fuse_conn *fc = fuse_get_conn(file);
2089         if (fc) {
2090                 spin_lock(&fc->lock);
2091                 fc->connected = 0;
2092                 fc->blocked = 0;
2093                 end_queued_requests(fc);
2094                 end_polls(fc);
2095                 wake_up_all(&fc->blocked_waitq);
2096                 spin_unlock(&fc->lock);
2097                 fuse_conn_put(fc);
2098         }
2099
2100         return 0;
2101 }
2102 EXPORT_SYMBOL_GPL(fuse_dev_release);
2103
2104 static int fuse_dev_fasync(int fd, struct file *file, int on)
2105 {
2106         struct fuse_conn *fc = fuse_get_conn(file);
2107         if (!fc)
2108                 return -EPERM;
2109
2110         /* No locking - fasync_helper does its own locking */
2111         return fasync_helper(fd, file, on, &fc->fasync);
2112 }
2113
2114 const struct file_operations fuse_dev_operations = {
2115         .owner          = THIS_MODULE,
2116         .llseek         = no_llseek,
2117         .read           = do_sync_read,
2118         .aio_read       = fuse_dev_read,
2119         .splice_read    = fuse_dev_splice_read,
2120         .write          = do_sync_write,
2121         .aio_write      = fuse_dev_write,
2122         .splice_write   = fuse_dev_splice_write,
2123         .poll           = fuse_dev_poll,
2124         .release        = fuse_dev_release,
2125         .fasync         = fuse_dev_fasync,
2126 };
2127 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2128
2129 static struct miscdevice fuse_miscdevice = {
2130         .minor = FUSE_MINOR,
2131         .name  = "fuse",
2132         .fops = &fuse_dev_operations,
2133 };
2134
2135 int __init fuse_dev_init(void)
2136 {
2137         int err = -ENOMEM;
2138         fuse_req_cachep = kmem_cache_create("fuse_request",
2139                                             sizeof(struct fuse_req),
2140                                             0, 0, NULL);
2141         if (!fuse_req_cachep)
2142                 goto out;
2143
2144         err = misc_register(&fuse_miscdevice);
2145         if (err)
2146                 goto out_cache_clean;
2147
2148         return 0;
2149
2150  out_cache_clean:
2151         kmem_cache_destroy(fuse_req_cachep);
2152  out:
2153         return err;
2154 }
2155
2156 void fuse_dev_cleanup(void)
2157 {
2158         misc_deregister(&fuse_miscdevice);
2159         kmem_cache_destroy(fuse_req_cachep);
2160 }