]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-davem' into for-next
authorAl Viro <viro@zeniv.linux.org.uk>
Sun, 12 Apr 2015 02:27:19 +0000 (22:27 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Sun, 12 Apr 2015 02:27:19 +0000 (22:27 -0400)
41 files changed:
arch/arc/kernel/process.c
arch/c6x/kernel/process.c
arch/frv/kernel/signal.c
arch/hexagon/kernel/process.c
arch/m32r/kernel/signal.c
arch/metag/include/asm/processor.h
arch/microblaze/kernel/signal.c
arch/nios2/kernel/process.c
arch/openrisc/kernel/process.c
arch/sh/kernel/signal_32.c
arch/sh/kernel/signal_64.c
arch/xtensa/kernel/signal.c
block/blk-map.c
block/scsi_ioctl.c
drivers/scsi/sg.c
fs/aio.c
fs/dcache.c
fs/namei.c
fs/ntfs/Makefile
fs/ntfs/file.c
fs/open.c
fs/read_write.c
fs/splice.c
fs/stat.c
include/linux/fs.h
include/linux/security.h
include/linux/uio.h
lib/iov_iter.c
mm/process_vm_access.c
security/apparmor/lsm.c
security/capability.c
security/keys/compat.c
security/keys/internal.h
security/keys/keyctl.c
security/security.c
security/selinux/hooks.c
security/smack/smack_lsm.c
security/tomoyo/common.h
security/tomoyo/file.c
security/tomoyo/realpath.c
security/tomoyo/tomoyo.c

index 98c00a2d4dd9a57f1c503ac2ebb6d63a3f1a76b4..f46efd14059d302712df70442604c19f1a8f2fe6 100644 (file)
@@ -155,8 +155,6 @@ int copy_thread(unsigned long clone_flags,
  */
 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long usp)
 {
-       set_fs(USER_DS); /* user space */
-
        regs->sp = usp;
        regs->ret = pc;
 
index 57d2ea8d19773828d620c98b8593f069ff32cd23..3ae9f5a166a0584034dea8fb41ea645ccf88aeea 100644 (file)
@@ -101,7 +101,6 @@ void start_thread(struct pt_regs *regs, unsigned int pc, unsigned long usp)
         */
        usp -= 8;
 
-       set_fs(USER_DS);
        regs->pc  = pc;
        regs->sp  = usp;
        regs->tsr |= 0x40; /* set user mode */
index 336713ab47454fa2afd8e603d53255f5d91acaa0..85ca6727ca075c8ce47ca73f801ed612cdeeb86d 100644 (file)
@@ -176,8 +176,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set)
        struct sigframe __user *frame;
        int rsig, sig = ksig->sig;
 
-       set_fs(USER_DS);
-
        frame = get_sigframe(ksig, sizeof(*frame));
 
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
@@ -257,8 +255,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set)
        struct rt_sigframe __user *frame;
        int rsig, sig = ksig->sig;
 
-       set_fs(USER_DS);
-
        frame = get_sigframe(ksig, sizeof(*frame));
 
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
index 0a0dd5c05b46af8fda112b2ab9cc606a08d6d5a5..a9ebd471823a6644a6773ed99d780c5c620f3e56 100644 (file)
@@ -37,8 +37,6 @@
  */
 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
 {
-       /* Set to run with user-mode data segmentation */
-       set_fs(USER_DS);
        /* We want to zero all data-containing registers. Is this overkill? */
        memset(regs, 0, sizeof(*regs));
        /* We might want to also zero all Processor registers here */
index 7736c6660a1580562bbbed37ece6aae5a61ac99a..8c25e0c8f6a5c752ba9c201de8292e4dd0c5120b 100644 (file)
@@ -214,8 +214,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        regs->r2 = (unsigned long)&frame->uc;
        regs->bpc = (unsigned long)ksig->ka.sa.sa_handler;
 
-       set_fs(USER_DS);
-
 #if DEBUG_SIG
        printk("SIG deliver (%s:%d): sp=%p pc=%p\n",
                current->comm, current->pid, frame, regs->pc);
index 13272fd5a5baec8e3b1a4de778a6982abf0adae7..0838ca69976466bbfc3c3854fecf91566afd6a9b 100644 (file)
@@ -111,7 +111,6 @@ struct thread_struct {
  */
 #define start_thread(regs, pc, usp) do {                                  \
        unsigned int *argc = (unsigned int *) bprm->exec;                  \
-       set_fs(USER_DS);                                                   \
        current->thread.int_depth = 1;                                     \
        /* Force this process down to user land */                         \
        regs->ctx.SaveMask = TBICTX_PRIV_BIT;                              \
index a1cbaf90e2ea47215e8bfce77d7d8d113f9fd110..20ccd4e2baa54c88f4fbcdd13c1a407ee66ffc82 100644 (file)
@@ -236,8 +236,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        /* Offset to handle microblaze rtid r14, 0 */
        regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
 
-       set_fs(USER_DS);
-
 #ifdef DEBUG_SIG
        pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
                current->comm, current->pid, frame, regs->pc);
index 0e075b5ad2a54298c99ea668848b523c12449b15..2f8c74f93e705a08e28f2c7a9e6ba9da754ff187 100644 (file)
@@ -94,7 +94,6 @@ void show_regs(struct pt_regs *regs)
 
 void flush_thread(void)
 {
-       set_fs(USER_DS);
 }
 
 int copy_thread(unsigned long clone_flags,
index 386af258591dbe7084867f88b79c3eef980ce15b..7095dfe7666ba3dd55a0807ffd7d09b00af3ccc2 100644 (file)
@@ -197,7 +197,6 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
 {
        unsigned long sr = mfspr(SPR_SR) & ~SPR_SR_SM;
 
-       set_fs(USER_DS);
        memset(regs, 0, sizeof(struct pt_regs));
 
        regs->pc = pc;
index 0b34f2a704fe1d2fdcf03b49fe37d73a7725ecf7..97292890b51bc4d36121cfa35fde08c79e3260f5 100644 (file)
@@ -329,8 +329,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        if (err)
                return -EFAULT;
 
-       set_fs(USER_DS);
-
        pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
                 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
 
@@ -408,8 +406,6 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        if (err)
                return -EFAULT;
 
-       set_fs(USER_DS);
-
        pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
                 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
 
index 71993c6a7d94b0f6da895f78d96176de5aa31e14..0462995d4d7f66ed9531948aaf0d3ef9bdd63b63 100644 (file)
@@ -457,8 +457,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
 
        regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
 
-       set_fs(USER_DS);
-
        /* Broken %016Lx */
        pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
                 signal, current->comm, current->pid, frame,
@@ -547,8 +545,6 @@ static int setup_rt_frame(struct ksignal *kig, sigset_t *set,
        regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
        regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
 
-       set_fs(USER_DS);
-
        pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
                 signal, current->comm, current->pid, frame,
                 regs->pc >> 32, regs->pc & 0xffffffff,
index 3d733ba16f28a2db8f6f4c91c61170a44ca7fd83..6b3790445cbed4c5d2ab7fb31cdc6bb6d3db25f8 100644 (file)
@@ -405,11 +405,6 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        regs->areg[8] = (unsigned long) &frame->uc;
        regs->threadptr = tp;
 
-       /* Set access mode to USER_DS.  Nomenclature is outdated, but
-        * functionality is used in uaccess.h
-        */
-       set_fs(USER_DS);
-
 #if DEBUG_SIG
        printk("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
                current->comm, current->pid, signal, frame, regs->pc);
index b8d2725324a6b88391db4c8a5daf7a9c1309dd86..da310a1054299720d1b809d4ac3fd27af02b1e19 100644 (file)
@@ -124,10 +124,10 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
 {
        struct iovec iov;
        struct iov_iter i;
+       int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
 
-       iov.iov_base = ubuf;
-       iov.iov_len = len;
-       iov_iter_init(&i, rq_data_dir(rq), &iov, 1, len);
+       if (unlikely(ret < 0))
+               return ret;
 
        return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
 }
index e1f71c3961934b9ed9adbf01247d196b05801750..55b6f15dac900af77a5ad7038cd98f3133d816a8 100644 (file)
@@ -335,16 +335,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
                struct iov_iter i;
                struct iovec *iov = NULL;
 
-               ret = rw_copy_check_uvector(-1, hdr->dxferp, hdr->iovec_count,
-                                           0, NULL, &iov);
-               if (ret < 0) {
-                       kfree(iov);
+               ret = import_iovec(rq_data_dir(rq),
+                                  hdr->dxferp, hdr->iovec_count,
+                                  0, &iov, &i);
+               if (ret < 0)
                        goto out_free_cdb;
-               }
 
                /* SG_IO howto says that the shorter of the two wins */
-               iov_iter_init(&i, rq_data_dir(rq), iov, hdr->iovec_count,
-                             min_t(unsigned, ret, hdr->dxfer_len));
+               iov_iter_truncate(&i, hdr->dxfer_len);
 
                ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
                kfree(iov);
index d383f84869aa45475bb25f149123092a0f3beb0c..9d7b7db75e4b96b6fbb33bf24b91c205add07d79 100644 (file)
@@ -1745,17 +1745,14 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
        }
 
        if (iov_count) {
-               int size = sizeof(struct iovec) * iov_count;
-               struct iovec *iov;
+               struct iovec *iov = NULL;
                struct iov_iter i;
 
-               iov = memdup_user(hp->dxferp, size);
-               if (IS_ERR(iov))
-                       return PTR_ERR(iov);
+               res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
+               if (res < 0)
+                       return res;
 
-               iov_iter_init(&i, rw, iov, iov_count,
-                             min_t(size_t, hp->dxfer_len,
-                                   iov_length(iov, iov_count)));
+               iov_iter_truncate(&i, hp->dxfer_len);
 
                res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
                kfree(iov);
index 435ca29eca31431649b4df125e933488b8d4073e..3b8467aeb5eeb5ef6061dc10467f40f0d3335801 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1352,48 +1352,19 @@ typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *,
                            unsigned long, loff_t);
 typedef ssize_t (rw_iter_op)(struct kiocb *, struct iov_iter *);
 
-static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb,
-                                    int rw, char __user *buf,
-                                    unsigned long *nr_segs,
-                                    size_t *len,
-                                    struct iovec **iovec,
-                                    bool compat)
+static int aio_setup_vectored_rw(int rw, char __user *buf, size_t len,
+                                struct iovec **iovec,
+                                bool compat,
+                                struct iov_iter *iter)
 {
-       ssize_t ret;
-
-       *nr_segs = *len;
-
 #ifdef CONFIG_COMPAT
        if (compat)
-               ret = compat_rw_copy_check_uvector(rw,
+               return compat_import_iovec(rw,
                                (struct compat_iovec __user *)buf,
-                               *nr_segs, UIO_FASTIOV, *iovec, iovec);
-       else
+                               len, UIO_FASTIOV, iovec, iter);
 #endif
-               ret = rw_copy_check_uvector(rw,
-                               (struct iovec __user *)buf,
-                               *nr_segs, UIO_FASTIOV, *iovec, iovec);
-       if (ret < 0)
-               return ret;
-
-       /* len now reflect bytes instead of segs */
-       *len = ret;
-       return 0;
-}
-
-static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
-                                      int rw, char __user *buf,
-                                      unsigned long *nr_segs,
-                                      size_t len,
-                                      struct iovec *iovec)
-{
-       if (unlikely(!access_ok(!rw, buf, len)))
-               return -EFAULT;
-
-       iovec->iov_base = buf;
-       iovec->iov_len = len;
-       *nr_segs = 1;
-       return 0;
+       return import_iovec(rw, (struct iovec __user *)buf,
+                               len, UIO_FASTIOV, iovec, iter);
 }
 
 /*
@@ -1405,7 +1376,6 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
 {
        struct file *file = req->ki_filp;
        ssize_t ret;
-       unsigned long nr_segs;
        int rw;
        fmode_t mode;
        aio_rw_op *rw_op;
@@ -1437,16 +1407,17 @@ rw_common:
                        return -EINVAL;
 
                if (opcode == IOCB_CMD_PREADV || opcode == IOCB_CMD_PWRITEV)
-                       ret = aio_setup_vectored_rw(req, rw, buf, &nr_segs,
-                                               &len, &iovec, compat);
-               else
-                       ret = aio_setup_single_vector(req, rw, buf, &nr_segs,
-                                                 len, iovec);
+                       ret = aio_setup_vectored_rw(rw, buf, len,
+                                               &iovec, compat, &iter);
+               else {
+                       ret = import_single_range(rw, buf, len, iovec, &iter);
+                       iovec = NULL;
+               }
                if (!ret)
-                       ret = rw_verify_area(rw, file, &req->ki_pos, len);
+                       ret = rw_verify_area(rw, file, &req->ki_pos,
+                                            iov_iter_count(&iter));
                if (ret < 0) {
-                       if (iovec != inline_vecs)
-                               kfree(iovec);
+                       kfree(iovec);
                        return ret;
                }
 
@@ -1463,14 +1434,14 @@ rw_common:
                        file_start_write(file);
 
                if (iter_op) {
-                       iov_iter_init(&iter, rw, iovec, nr_segs, len);
                        ret = iter_op(req, &iter);
                } else {
-                       ret = rw_op(req, iovec, nr_segs, req->ki_pos);
+                       ret = rw_op(req, iter.iov, iter.nr_segs, req->ki_pos);
                }
 
                if (rw == WRITE)
                        file_end_write(file);
+               kfree(iovec);
                break;
 
        case IOCB_CMD_FDSYNC:
@@ -1492,9 +1463,6 @@ rw_common:
                return -EINVAL;
        }
 
-       if (iovec != inline_vecs)
-               kfree(iovec);
-
        if (ret != -EIOCBQUEUED) {
                /*
                 * There's no easy way to restart the syscall since other AIO's
index c71e3732e53bcebbffca749e65b7095fd4ff6e7e..d99736a63e3cf6d5da6850e4eee02ecd7ae672e4 100644 (file)
@@ -2690,7 +2690,7 @@ static int __d_unalias(struct inode *inode,
                struct dentry *dentry, struct dentry *alias)
 {
        struct mutex *m1 = NULL, *m2 = NULL;
-       int ret = -EBUSY;
+       int ret = -ESTALE;
 
        /* If alias and dentry share a parent, then no extra locks required */
        if (alias->d_parent == dentry->d_parent)
index c83145af4bfc0ea9bb159002e3545e8a8cd65157..76fb76a0818bc274fc67b2d87b582db6690d62a6 100644 (file)
  * PATH_MAX includes the nul terminator --RR.
  */
 
-#define EMBEDDED_NAME_MAX      (PATH_MAX - sizeof(struct filename))
+#define EMBEDDED_NAME_MAX      (PATH_MAX - offsetof(struct filename, iname))
 
 struct filename *
 getname_flags(const char __user *filename, int flags, int *empty)
 {
-       struct filename *result, *err;
-       int len;
-       long max;
+       struct filename *result;
        char *kname;
+       int len;
 
        result = audit_reusename(filename);
        if (result)
@@ -136,22 +135,18 @@ getname_flags(const char __user *filename, int flags, int *empty)
        result = __getname();
        if (unlikely(!result))
                return ERR_PTR(-ENOMEM);
-       result->refcnt = 1;
 
        /*
         * First, try to embed the struct filename inside the names_cache
         * allocation
         */
-       kname = (char *)result + sizeof(*result);
+       kname = (char *)result->iname;
        result->name = kname;
-       result->separate = false;
-       max = EMBEDDED_NAME_MAX;
 
-recopy:
-       len = strncpy_from_user(kname, filename, max);
+       len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX);
        if (unlikely(len < 0)) {
-               err = ERR_PTR(len);
-               goto error;
+               __putname(result);
+               return ERR_PTR(len);
        }
 
        /*
@@ -160,43 +155,49 @@ recopy:
         * names_cache allocation for the pathname, and re-do the copy from
         * userland.
         */
-       if (len == EMBEDDED_NAME_MAX && max == EMBEDDED_NAME_MAX) {
+       if (unlikely(len == EMBEDDED_NAME_MAX)) {
+               const size_t size = offsetof(struct filename, iname[1]);
                kname = (char *)result;
 
-               result = kzalloc(sizeof(*result), GFP_KERNEL);
-               if (!result) {
-                       err = ERR_PTR(-ENOMEM);
-                       result = (struct filename *)kname;
-                       goto error;
+               /*
+                * size is chosen that way we to guarantee that
+                * result->iname[0] is within the same object and that
+                * kname can't be equal to result->iname, no matter what.
+                */
+               result = kzalloc(size, GFP_KERNEL);
+               if (unlikely(!result)) {
+                       __putname(kname);
+                       return ERR_PTR(-ENOMEM);
                }
                result->name = kname;
-               result->separate = true;
-               result->refcnt = 1;
-               max = PATH_MAX;
-               goto recopy;
+               len = strncpy_from_user(kname, filename, PATH_MAX);
+               if (unlikely(len < 0)) {
+                       __putname(kname);
+                       kfree(result);
+                       return ERR_PTR(len);
+               }
+               if (unlikely(len == PATH_MAX)) {
+                       __putname(kname);
+                       kfree(result);
+                       return ERR_PTR(-ENAMETOOLONG);
+               }
        }
 
+       result->refcnt = 1;
        /* The empty path is special. */
        if (unlikely(!len)) {
                if (empty)
                        *empty = 1;
-               err = ERR_PTR(-ENOENT);
-               if (!(flags & LOOKUP_EMPTY))
-                       goto error;
+               if (!(flags & LOOKUP_EMPTY)) {
+                       putname(result);
+                       return ERR_PTR(-ENOENT);
+               }
        }
 
-       err = ERR_PTR(-ENAMETOOLONG);
-       if (unlikely(len >= PATH_MAX))
-               goto error;
-
        result->uptr = filename;
        result->aname = NULL;
        audit_getname(result);
        return result;
-
-error:
-       putname(result);
-       return err;
 }
 
 struct filename *
@@ -216,8 +217,7 @@ getname_kernel(const char * filename)
                return ERR_PTR(-ENOMEM);
 
        if (len <= EMBEDDED_NAME_MAX) {
-               result->name = (char *)(result) + sizeof(*result);
-               result->separate = false;
+               result->name = (char *)result->iname;
        } else if (len <= PATH_MAX) {
                struct filename *tmp;
 
@@ -227,7 +227,6 @@ getname_kernel(const char * filename)
                        return ERR_PTR(-ENOMEM);
                }
                tmp->name = (char *)result;
-               tmp->separate = true;
                result = tmp;
        } else {
                __putname(result);
@@ -249,7 +248,7 @@ void putname(struct filename *name)
        if (--name->refcnt > 0)
                return;
 
-       if (name->separate) {
+       if (name->name != name->iname) {
                __putname(name->name);
                kfree(name);
        } else
@@ -1851,10 +1850,11 @@ static int link_path_walk(const char *name, struct nameidata *nd)
        return err;
 }
 
-static int path_init(int dfd, const char *name, unsigned int flags,
+static int path_init(int dfd, const struct filename *name, unsigned int flags,
                     struct nameidata *nd)
 {
        int retval = 0;
+       const char *s = name->name;
 
        nd->last_type = LAST_ROOT; /* if there are only slashes... */
        nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
@@ -1863,7 +1863,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        if (flags & LOOKUP_ROOT) {
                struct dentry *root = nd->root.dentry;
                struct inode *inode = root->d_inode;
-               if (*name) {
+               if (*s) {
                        if (!d_can_lookup(root))
                                return -ENOTDIR;
                        retval = inode_permission(inode, MAY_EXEC);
@@ -1885,7 +1885,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        nd->root.mnt = NULL;
 
        nd->m_seq = read_seqbegin(&mount_lock);
-       if (*name=='/') {
+       if (*s == '/') {
                if (flags & LOOKUP_RCU) {
                        rcu_read_lock();
                        nd->seq = set_root_rcu(nd);
@@ -1919,7 +1919,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
 
                dentry = f.file->f_path.dentry;
 
-               if (*name) {
+               if (*s) {
                        if (!d_can_lookup(dentry)) {
                                fdput(f);
                                return -ENOTDIR;
@@ -1949,7 +1949,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
        return -ECHILD;
 done:
        current->total_link_count = 0;
-       return link_path_walk(name, nd);
+       return link_path_walk(s, nd);
 }
 
 static void path_cleanup(struct nameidata *nd)
@@ -1972,7 +1972,7 @@ static inline int lookup_last(struct nameidata *nd, struct path *path)
 }
 
 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
-static int path_lookupat(int dfd, const char *name,
+static int path_lookupat(int dfd, const struct filename *name,
                                unsigned int flags, struct nameidata *nd)
 {
        struct path path;
@@ -2027,31 +2027,17 @@ static int path_lookupat(int dfd, const char *name,
 static int filename_lookup(int dfd, struct filename *name,
                                unsigned int flags, struct nameidata *nd)
 {
-       int retval = path_lookupat(dfd, name->name, flags | LOOKUP_RCU, nd);
+       int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
        if (unlikely(retval == -ECHILD))
-               retval = path_lookupat(dfd, name->name, flags, nd);
+               retval = path_lookupat(dfd, name, flags, nd);
        if (unlikely(retval == -ESTALE))
-               retval = path_lookupat(dfd, name->name,
-                                               flags | LOOKUP_REVAL, nd);
+               retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
 
        if (likely(!retval))
                audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
        return retval;
 }
 
-static int do_path_lookup(int dfd, const char *name,
-                               unsigned int flags, struct nameidata *nd)
-{
-       struct filename *filename = getname_kernel(name);
-       int retval = PTR_ERR(filename);
-
-       if (!IS_ERR(filename)) {
-               retval = filename_lookup(dfd, filename, flags, nd);
-               putname(filename);
-       }
-       return retval;
-}
-
 /* does lookup, returns the object with parent locked */
 struct dentry *kern_path_locked(const char *name, struct path *path)
 {
@@ -2089,9 +2075,15 @@ out:
 int kern_path(const char *name, unsigned int flags, struct path *path)
 {
        struct nameidata nd;
-       int res = do_path_lookup(AT_FDCWD, name, flags, &nd);
-       if (!res)
-               *path = nd.path;
+       struct filename *filename = getname_kernel(name);
+       int res = PTR_ERR(filename);
+
+       if (!IS_ERR(filename)) {
+               res = filename_lookup(AT_FDCWD, filename, flags, &nd);
+               putname(filename);
+               if (!res)
+                       *path = nd.path;
+       }
        return res;
 }
 EXPORT_SYMBOL(kern_path);
@@ -2108,15 +2100,22 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
                    const char *name, unsigned int flags,
                    struct path *path)
 {
-       struct nameidata nd;
-       int err;
-       nd.root.dentry = dentry;
-       nd.root.mnt = mnt;
+       struct filename *filename = getname_kernel(name);
+       int err = PTR_ERR(filename);
+
        BUG_ON(flags & LOOKUP_PARENT);
-       /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */
-       err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd);
-       if (!err)
-               *path = nd.path;
+
+       /* the first argument of filename_lookup() is ignored with LOOKUP_ROOT */
+       if (!IS_ERR(filename)) {
+               struct nameidata nd;
+               nd.root.dentry = dentry;
+               nd.root.mnt = mnt;
+               err = filename_lookup(AT_FDCWD, filename,
+                                     flags | LOOKUP_ROOT, &nd);
+               if (!err)
+                       *path = nd.path;
+               putname(filename);
+       }
        return err;
 }
 EXPORT_SYMBOL(vfs_path_lookup);
@@ -2138,9 +2137,7 @@ static struct dentry *lookup_hash(struct nameidata *nd)
  * @len:       maximum length @len should be interpreted to
  *
  * Note that this routine is purely a helper for filesystem usage and should
- * not be called by generic code.  Also note that by using this function the
- * nameidata argument is passed to the filesystem methods and a filesystem
- * using this helper needs to be prepared for that.
+ * not be called by generic code.
  */
 struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
 {
@@ -2341,7 +2338,8 @@ out:
  * Returns 0 and "path" will be valid on success; Returns error otherwise.
  */
 static int
-path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
+path_mountpoint(int dfd, const struct filename *name, struct path *path,
+               unsigned int flags)
 {
        struct nameidata nd;
        int err;
@@ -2370,20 +2368,20 @@ out:
 }
 
 static int
-filename_mountpoint(int dfd, struct filename *s, struct path *path,
+filename_mountpoint(int dfd, struct filename *name, struct path *path,
                        unsigned int flags)
 {
        int error;
-       if (IS_ERR(s))
-               return PTR_ERR(s);
-       error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU);
+       if (IS_ERR(name))
+               return PTR_ERR(name);
+       error = path_mountpoint(dfd, name, path, flags | LOOKUP_RCU);
        if (unlikely(error == -ECHILD))
-               error = path_mountpoint(dfd, s->name, path, flags);
+               error = path_mountpoint(dfd, name, path, flags);
        if (unlikely(error == -ESTALE))
-               error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL);
+               error = path_mountpoint(dfd, name, path, flags | LOOKUP_REVAL);
        if (likely(!error))
-               audit_inode(s, path->dentry, 0);
-       putname(s);
+               audit_inode(name, path->dentry, 0);
+       putname(name);
        return error;
 }
 
@@ -3156,7 +3154,7 @@ static int do_tmpfile(int dfd, struct filename *pathname,
        static const struct qstr name = QSTR_INIT("/", 1);
        struct dentry *dentry, *child;
        struct inode *dir;
-       int error = path_lookupat(dfd, pathname->name,
+       int error = path_lookupat(dfd, pathname,
                                  flags | LOOKUP_DIRECTORY, nd);
        if (unlikely(error))
                return error;
@@ -3229,7 +3227,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
                goto out;
        }
 
-       error = path_init(dfd, pathname->name, flags, nd);
+       error = path_init(dfd, pathname, flags, nd);
        if (unlikely(error))
                goto out;
 
index 36ae529511c49140417cafe6559a167cd17d92e4..2ff263e6d363dba5f9621ad705c5795aaf855326 100644 (file)
@@ -8,7 +8,7 @@ ntfs-y := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \
 
 ntfs-$(CONFIG_NTFS_RW) += bitmap.o lcnalloc.o logfile.o quota.o usnjrnl.o
 
-ccflags-y := -DNTFS_VERSION=\"2.1.31\"
+ccflags-y := -DNTFS_VERSION=\"2.1.32\"
 ccflags-$(CONFIG_NTFS_DEBUG)   += -DDEBUG
 ccflags-$(CONFIG_NTFS_RW)      += -DNTFS_RW
 
index f16f2d8401febeaf4911491e92f8deac377e8a93..c1da78dad1afb389039660aa41064bb6f948fa76 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * file.c - NTFS kernel file operations.  Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc.
+ * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc.
  *
  * This program/include file is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as published
@@ -328,62 +328,168 @@ err_out:
        return err;
 }
 
-/**
- * ntfs_fault_in_pages_readable -
- *
- * Fault a number of userspace pages into pagetables.
- *
- * Unlike include/linux/pagemap.h::fault_in_pages_readable(), this one copes
- * with more than two userspace pages as well as handling the single page case
- * elegantly.
- *
- * If you find this difficult to understand, then think of the while loop being
- * the following code, except that we do without the integer variable ret:
- *
- *     do {
- *             ret = __get_user(c, uaddr);
- *             uaddr += PAGE_SIZE;
- *     } while (!ret && uaddr < end);
- *
- * Note, the final __get_user() may well run out-of-bounds of the user buffer,
- * but _not_ out-of-bounds of the page the user buffer belongs to, and since
- * this is only a read and not a write, and since it is still in the same page,
- * it should not matter and this makes the code much simpler.
- */
-static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
-               int bytes)
+static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos,
+               size_t *count)
 {
-       const char __user *end;
-       volatile char c;
-
-       /* Set @end to the first byte outside the last page we care about. */
-       end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes);
-
-       while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
-               ;
-}
-
-/**
- * ntfs_fault_in_pages_readable_iovec -
- *
- * Same as ntfs_fault_in_pages_readable() but operates on an array of iovecs.
- */
-static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
-               size_t iov_ofs, int bytes)
-{
-       do {
-               const char __user *buf;
-               unsigned len;
+       loff_t pos;
+       s64 end, ll;
+       ssize_t err;
+       unsigned long flags;
+       struct inode *vi = file_inode(file);
+       ntfs_inode *base_ni, *ni = NTFS_I(vi);
+       ntfs_volume *vol = ni->vol;
 
-               buf = iov->iov_base + iov_ofs;
-               len = iov->iov_len - iov_ofs;
-               if (len > bytes)
-                       len = bytes;
-               ntfs_fault_in_pages_readable(buf, len);
-               bytes -= len;
-               iov++;
-               iov_ofs = 0;
-       } while (bytes);
+       ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
+                       "0x%llx, count 0x%lx.", vi->i_ino,
+                       (unsigned)le32_to_cpu(ni->type),
+                       (unsigned long long)*ppos, (unsigned long)*count);
+       /* We can write back this queue in page reclaim. */
+       current->backing_dev_info = inode_to_bdi(vi);
+       err = generic_write_checks(file, ppos, count, S_ISBLK(vi->i_mode));
+       if (unlikely(err))
+               goto out;
+       /*
+        * All checks have passed.  Before we start doing any writing we want
+        * to abort any totally illegal writes.
+        */
+       BUG_ON(NInoMstProtected(ni));
+       BUG_ON(ni->type != AT_DATA);
+       /* If file is encrypted, deny access, just like NT4. */
+       if (NInoEncrypted(ni)) {
+               /* Only $DATA attributes can be encrypted. */
+               /*
+                * Reminder for later: Encrypted files are _always_
+                * non-resident so that the content can always be encrypted.
+                */
+               ntfs_debug("Denying write access to encrypted file.");
+               err = -EACCES;
+               goto out;
+       }
+       if (NInoCompressed(ni)) {
+               /* Only unnamed $DATA attribute can be compressed. */
+               BUG_ON(ni->name_len);
+               /*
+                * Reminder for later: If resident, the data is not actually
+                * compressed.  Only on the switch to non-resident does
+                * compression kick in.  This is in contrast to encrypted files
+                * (see above).
+                */
+               ntfs_error(vi->i_sb, "Writing to compressed files is not "
+                               "implemented yet.  Sorry.");
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+       if (*count == 0)
+               goto out;
+       base_ni = ni;
+       if (NInoAttr(ni))
+               base_ni = ni->ext.base_ntfs_ino;
+       err = file_remove_suid(file);
+       if (unlikely(err))
+               goto out;
+       /*
+        * Our ->update_time method always succeeds thus file_update_time()
+        * cannot fail either so there is no need to check the return code.
+        */
+       file_update_time(file);
+       pos = *ppos;
+       /* The first byte after the last cluster being written to. */
+       end = (pos + *count + vol->cluster_size_mask) &
+                       ~(u64)vol->cluster_size_mask;
+       /*
+        * If the write goes beyond the allocated size, extend the allocation
+        * to cover the whole of the write, rounded up to the nearest cluster.
+        */
+       read_lock_irqsave(&ni->size_lock, flags);
+       ll = ni->allocated_size;
+       read_unlock_irqrestore(&ni->size_lock, flags);
+       if (end > ll) {
+               /*
+                * Extend the allocation without changing the data size.
+                *
+                * Note we ensure the allocation is big enough to at least
+                * write some data but we do not require the allocation to be
+                * complete, i.e. it may be partial.
+                */
+               ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
+               if (likely(ll >= 0)) {
+                       BUG_ON(pos >= ll);
+                       /* If the extension was partial truncate the write. */
+                       if (end > ll) {
+                               ntfs_debug("Truncating write to inode 0x%lx, "
+                                               "attribute type 0x%x, because "
+                                               "the allocation was only "
+                                               "partially extended.",
+                                               vi->i_ino, (unsigned)
+                                               le32_to_cpu(ni->type));
+                               *count = ll - pos;
+                       }
+               } else {
+                       err = ll;
+                       read_lock_irqsave(&ni->size_lock, flags);
+                       ll = ni->allocated_size;
+                       read_unlock_irqrestore(&ni->size_lock, flags);
+                       /* Perform a partial write if possible or fail. */
+                       if (pos < ll) {
+                               ntfs_debug("Truncating write to inode 0x%lx "
+                                               "attribute type 0x%x, because "
+                                               "extending the allocation "
+                                               "failed (error %d).",
+                                               vi->i_ino, (unsigned)
+                                               le32_to_cpu(ni->type),
+                                               (int)-err);
+                               *count = ll - pos;
+                       } else {
+                               if (err != -ENOSPC)
+                                       ntfs_error(vi->i_sb, "Cannot perform "
+                                                       "write to inode "
+                                                       "0x%lx, attribute "
+                                                       "type 0x%x, because "
+                                                       "extending the "
+                                                       "allocation failed "
+                                                       "(error %ld).",
+                                                       vi->i_ino, (unsigned)
+                                                       le32_to_cpu(ni->type),
+                                                       (long)-err);
+                               else
+                                       ntfs_debug("Cannot perform write to "
+                                                       "inode 0x%lx, "
+                                                       "attribute type 0x%x, "
+                                                       "because there is not "
+                                                       "space left.",
+                                                       vi->i_ino, (unsigned)
+                                                       le32_to_cpu(ni->type));
+                               goto out;
+                       }
+               }
+       }
+       /*
+        * If the write starts beyond the initialized size, extend it up to the
+        * beginning of the write and initialize all non-sparse space between
+        * the old initialized size and the new one.  This automatically also
+        * increments the vfs inode->i_size to keep it above or equal to the
+        * initialized_size.
+        */
+       read_lock_irqsave(&ni->size_lock, flags);
+       ll = ni->initialized_size;
+       read_unlock_irqrestore(&ni->size_lock, flags);
+       if (pos > ll) {
+               /*
+                * Wait for ongoing direct i/o to complete before proceeding.
+                * New direct i/o cannot start as we hold i_mutex.
+                */
+               inode_dio_wait(vi);
+               err = ntfs_attr_extend_initialized(ni, pos);
+               if (unlikely(err < 0))
+                       ntfs_error(vi->i_sb, "Cannot perform write to inode "
+                                       "0x%lx, attribute type 0x%x, because "
+                                       "extending the initialized size "
+                                       "failed (error %d).", vi->i_ino,
+                                       (unsigned)le32_to_cpu(ni->type),
+                                       (int)-err);
+       }
+out:
+       return err;
 }
 
 /**
@@ -420,8 +526,8 @@ static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
                                        goto err_out;
                                }
                        }
-                       err = add_to_page_cache_lru(*cached_page, mapping, index,
-                                       GFP_KERNEL);
+                       err = add_to_page_cache_lru(*cached_page, mapping,
+                                       index, GFP_KERNEL);
                        if (unlikely(err)) {
                                if (err == -EEXIST)
                                        continue;
@@ -1267,180 +1373,6 @@ rl_not_mapped_enoent:
        return err;
 }
 
-/*
- * Copy as much as we can into the pages and return the number of bytes which
- * were successfully copied.  If a fault is encountered then clear the pages
- * out to (ofs + bytes) and return the number of bytes which were copied.
- */
-static inline size_t ntfs_copy_from_user(struct page **pages,
-               unsigned nr_pages, unsigned ofs, const char __user *buf,
-               size_t bytes)
-{
-       struct page **last_page = pages + nr_pages;
-       char *addr;
-       size_t total = 0;
-       unsigned len;
-       int left;
-
-       do {
-               len = PAGE_CACHE_SIZE - ofs;
-               if (len > bytes)
-                       len = bytes;
-               addr = kmap_atomic(*pages);
-               left = __copy_from_user_inatomic(addr + ofs, buf, len);
-               kunmap_atomic(addr);
-               if (unlikely(left)) {
-                       /* Do it the slow way. */
-                       addr = kmap(*pages);
-                       left = __copy_from_user(addr + ofs, buf, len);
-                       kunmap(*pages);
-                       if (unlikely(left))
-                               goto err_out;
-               }
-               total += len;
-               bytes -= len;
-               if (!bytes)
-                       break;
-               buf += len;
-               ofs = 0;
-       } while (++pages < last_page);
-out:
-       return total;
-err_out:
-       total += len - left;
-       /* Zero the rest of the target like __copy_from_user(). */
-       while (++pages < last_page) {
-               bytes -= len;
-               if (!bytes)
-                       break;
-               len = PAGE_CACHE_SIZE;
-               if (len > bytes)
-                       len = bytes;
-               zero_user(*pages, 0, len);
-       }
-       goto out;
-}
-
-static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
-               const struct iovec *iov, size_t iov_ofs, size_t bytes)
-{
-       size_t total = 0;
-
-       while (1) {
-               const char __user *buf = iov->iov_base + iov_ofs;
-               unsigned len;
-               size_t left;
-
-               len = iov->iov_len - iov_ofs;
-               if (len > bytes)
-                       len = bytes;
-               left = __copy_from_user_inatomic(vaddr, buf, len);
-               total += len;
-               bytes -= len;
-               vaddr += len;
-               if (unlikely(left)) {
-                       total -= left;
-                       break;
-               }
-               if (!bytes)
-                       break;
-               iov++;
-               iov_ofs = 0;
-       }
-       return total;
-}
-
-static inline void ntfs_set_next_iovec(const struct iovec **iovp,
-               size_t *iov_ofsp, size_t bytes)
-{
-       const struct iovec *iov = *iovp;
-       size_t iov_ofs = *iov_ofsp;
-
-       while (bytes) {
-               unsigned len;
-
-               len = iov->iov_len - iov_ofs;
-               if (len > bytes)
-                       len = bytes;
-               bytes -= len;
-               iov_ofs += len;
-               if (iov->iov_len == iov_ofs) {
-                       iov++;
-                       iov_ofs = 0;
-               }
-       }
-       *iovp = iov;
-       *iov_ofsp = iov_ofs;
-}
-
-/*
- * This has the same side-effects and return value as ntfs_copy_from_user().
- * The difference is that on a fault we need to memset the remainder of the
- * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
- * single-segment behaviour.
- *
- * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when
- * atomic and when not atomic.  This is ok because it calls
- * __copy_from_user_inatomic() and it is ok to call this when non-atomic.  In
- * fact, the only difference between __copy_from_user_inatomic() and
- * __copy_from_user() is that the latter calls might_sleep() and the former
- * should not zero the tail of the buffer on error.  And on many architectures
- * __copy_from_user_inatomic() is just defined to __copy_from_user() so it
- * makes no difference at all on those architectures.
- */
-static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
-               unsigned nr_pages, unsigned ofs, const struct iovec **iov,
-               size_t *iov_ofs, size_t bytes)
-{
-       struct page **last_page = pages + nr_pages;
-       char *addr;
-       size_t copied, len, total = 0;
-
-       do {
-               len = PAGE_CACHE_SIZE - ofs;
-               if (len > bytes)
-                       len = bytes;
-               addr = kmap_atomic(*pages);
-               copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
-                               *iov, *iov_ofs, len);
-               kunmap_atomic(addr);
-               if (unlikely(copied != len)) {
-                       /* Do it the slow way. */
-                       addr = kmap(*pages);
-                       copied = __ntfs_copy_from_user_iovec_inatomic(addr +
-                                       ofs, *iov, *iov_ofs, len);
-                       if (unlikely(copied != len))
-                               goto err_out;
-                       kunmap(*pages);
-               }
-               total += len;
-               ntfs_set_next_iovec(iov, iov_ofs, len);
-               bytes -= len;
-               if (!bytes)
-                       break;
-               ofs = 0;
-       } while (++pages < last_page);
-out:
-       return total;
-err_out:
-       BUG_ON(copied > len);
-       /* Zero the rest of the target like __copy_from_user(). */
-       memset(addr + ofs + copied, 0, len - copied);
-       kunmap(*pages);
-       total += copied;
-       ntfs_set_next_iovec(iov, iov_ofs, copied);
-       while (++pages < last_page) {
-               bytes -= len;
-               if (!bytes)
-                       break;
-               len = PAGE_CACHE_SIZE;
-               if (len > bytes)
-                       len = bytes;
-               zero_user(*pages, 0, len);
-       }
-       goto out;
-}
-
 static inline void ntfs_flush_dcache_pages(struct page **pages,
                unsigned nr_pages)
 {
@@ -1761,86 +1693,83 @@ err_out:
        return err;
 }
 
-static void ntfs_write_failed(struct address_space *mapping, loff_t to)
+/*
+ * Copy as much as we can into the pages and return the number of bytes which
+ * were successfully copied.  If a fault is encountered then clear the pages
+ * out to (ofs + bytes) and return the number of bytes which were copied.
+ */
+static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
+               unsigned ofs, struct iov_iter *i, size_t bytes)
 {
-       struct inode *inode = mapping->host;
+       struct page **last_page = pages + nr_pages;
+       size_t total = 0;
+       struct iov_iter data = *i;
+       unsigned len, copied;
 
-       if (to > inode->i_size) {
-               truncate_pagecache(inode, inode->i_size);
-               ntfs_truncate_vfs(inode);
-       }
+       do {
+               len = PAGE_CACHE_SIZE - ofs;
+               if (len > bytes)
+                       len = bytes;
+               copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
+                               len);
+               total += copied;
+               bytes -= copied;
+               if (!bytes)
+                       break;
+               iov_iter_advance(&data, copied);
+               if (copied < len)
+                       goto err;
+               ofs = 0;
+       } while (++pages < last_page);
+out:
+       return total;
+err:
+       /* Zero the rest of the target like __copy_from_user(). */
+       len = PAGE_CACHE_SIZE - copied;
+       do {
+               if (len > bytes)
+                       len = bytes;
+               zero_user(*pages, copied, len);
+               bytes -= len;
+               copied = 0;
+               len = PAGE_CACHE_SIZE;
+       } while (++pages < last_page);
+       goto out;
 }
 
 /**
- * ntfs_file_buffered_write -
- *
- * Locking: The vfs is holding ->i_mutex on the inode.
+ * ntfs_perform_write - perform buffered write to a file
+ * @file:      file to write to
+ * @i:         iov_iter with data to write
+ * @pos:       byte offset in file at which to begin writing to
  */
-static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs,
-               loff_t pos, loff_t *ppos, size_t count)
+static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
+               loff_t pos)
 {
-       struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *vi = mapping->host;
        ntfs_inode *ni = NTFS_I(vi);
        ntfs_volume *vol = ni->vol;
        struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
        struct page *cached_page = NULL;
-       char __user *buf = NULL;
-       s64 end, ll;
        VCN last_vcn;
        LCN lcn;
-       unsigned long flags;
-       size_t bytes, iov_ofs = 0;      /* Offset in the current iovec. */
-       ssize_t status, written;
+       size_t bytes;
+       ssize_t status, written = 0;
        unsigned nr_pages;
-       int err;
 
-       ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
-                       "pos 0x%llx, count 0x%lx.",
-                       vi->i_ino, (unsigned)le32_to_cpu(ni->type),
-                       (unsigned long long)pos, (unsigned long)count);
-       if (unlikely(!count))
-               return 0;
-       BUG_ON(NInoMstProtected(ni));
-       /*
-        * If the attribute is not an index root and it is encrypted or
-        * compressed, we cannot write to it yet.  Note we need to check for
-        * AT_INDEX_ALLOCATION since this is the type of both directory and
-        * index inodes.
-        */
-       if (ni->type != AT_INDEX_ALLOCATION) {
-               /* If file is encrypted, deny access, just like NT4. */
-               if (NInoEncrypted(ni)) {
-                       /*
-                        * Reminder for later: Encrypted files are _always_
-                        * non-resident so that the content can always be
-                        * encrypted.
-                        */
-                       ntfs_debug("Denying write access to encrypted file.");
-                       return -EACCES;
-               }
-               if (NInoCompressed(ni)) {
-                       /* Only unnamed $DATA attribute can be compressed. */
-                       BUG_ON(ni->type != AT_DATA);
-                       BUG_ON(ni->name_len);
-                       /*
-                        * Reminder for later: If resident, the data is not
-                        * actually compressed.  Only on the switch to non-
-                        * resident does compression kick in.  This is in
-                        * contrast to encrypted files (see above).
-                        */
-                       ntfs_error(vi->i_sb, "Writing to compressed files is "
-                                       "not implemented yet.  Sorry.");
-                       return -EOPNOTSUPP;
-               }
-       }
+       ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
+                       "0x%llx, count 0x%lx.", vi->i_ino,
+                       (unsigned)le32_to_cpu(ni->type),
+                       (unsigned long long)pos,
+                       (unsigned long)iov_iter_count(i));
        /*
         * If a previous ntfs_truncate() failed, repeat it and abort if it
         * fails again.
         */
        if (unlikely(NInoTruncateFailed(ni))) {
+               int err;
+
                inode_dio_wait(vi);
                err = ntfs_truncate(vi);
                if (err || NInoTruncateFailed(ni)) {
@@ -1854,81 +1783,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                        return err;
                }
        }
-       /* The first byte after the write. */
-       end = pos + count;
-       /*
-        * If the write goes beyond the allocated size, extend the allocation
-        * to cover the whole of the write, rounded up to the nearest cluster.
-        */
-       read_lock_irqsave(&ni->size_lock, flags);
-       ll = ni->allocated_size;
-       read_unlock_irqrestore(&ni->size_lock, flags);
-       if (end > ll) {
-               /* Extend the allocation without changing the data size. */
-               ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
-               if (likely(ll >= 0)) {
-                       BUG_ON(pos >= ll);
-                       /* If the extension was partial truncate the write. */
-                       if (end > ll) {
-                               ntfs_debug("Truncating write to inode 0x%lx, "
-                                               "attribute type 0x%x, because "
-                                               "the allocation was only "
-                                               "partially extended.",
-                                               vi->i_ino, (unsigned)
-                                               le32_to_cpu(ni->type));
-                               end = ll;
-                               count = ll - pos;
-                       }
-               } else {
-                       err = ll;
-                       read_lock_irqsave(&ni->size_lock, flags);
-                       ll = ni->allocated_size;
-                       read_unlock_irqrestore(&ni->size_lock, flags);
-                       /* Perform a partial write if possible or fail. */
-                       if (pos < ll) {
-                               ntfs_debug("Truncating write to inode 0x%lx, "
-                                               "attribute type 0x%x, because "
-                                               "extending the allocation "
-                                               "failed (error code %i).",
-                                               vi->i_ino, (unsigned)
-                                               le32_to_cpu(ni->type), err);
-                               end = ll;
-                               count = ll - pos;
-                       } else {
-                               ntfs_error(vol->sb, "Cannot perform write to "
-                                               "inode 0x%lx, attribute type "
-                                               "0x%x, because extending the "
-                                               "allocation failed (error "
-                                               "code %i).", vi->i_ino,
-                                               (unsigned)
-                                               le32_to_cpu(ni->type), err);
-                               return err;
-                       }
-               }
-       }
-       written = 0;
-       /*
-        * If the write starts beyond the initialized size, extend it up to the
-        * beginning of the write and initialize all non-sparse space between
-        * the old initialized size and the new one.  This automatically also
-        * increments the vfs inode->i_size to keep it above or equal to the
-        * initialized_size.
-        */
-       read_lock_irqsave(&ni->size_lock, flags);
-       ll = ni->initialized_size;
-       read_unlock_irqrestore(&ni->size_lock, flags);
-       if (pos > ll) {
-               err = ntfs_attr_extend_initialized(ni, pos);
-               if (err < 0) {
-                       ntfs_error(vol->sb, "Cannot perform write to inode "
-                                       "0x%lx, attribute type 0x%x, because "
-                                       "extending the initialized size "
-                                       "failed (error code %i).", vi->i_ino,
-                                       (unsigned)le32_to_cpu(ni->type), err);
-                       status = err;
-                       goto err_out;
-               }
-       }
        /*
         * Determine the number of pages per cluster for non-resident
         * attributes.
@@ -1936,10 +1790,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
        nr_pages = 1;
        if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
                nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
-       /* Finally, perform the actual write. */
        last_vcn = -1;
-       if (likely(nr_segs == 1))
-               buf = iov->iov_base;
        do {
                VCN vcn;
                pgoff_t idx, start_idx;
@@ -1964,10 +1815,10 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                                                vol->cluster_size_bits, false);
                                up_read(&ni->runlist.lock);
                                if (unlikely(lcn < LCN_HOLE)) {
-                                       status = -EIO;
                                        if (lcn == LCN_ENOMEM)
                                                status = -ENOMEM;
-                                       else
+                                       else {
+                                               status = -EIO;
                                                ntfs_error(vol->sb, "Cannot "
                                                        "perform write to "
                                                        "inode 0x%lx, "
@@ -1976,6 +1827,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                                                        "is corrupt.",
                                                        vi->i_ino, (unsigned)
                                                        le32_to_cpu(ni->type));
+                                       }
                                        break;
                                }
                                if (lcn == LCN_HOLE) {
@@ -1988,8 +1840,9 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                                }
                        }
                }
-               if (bytes > count)
-                       bytes = count;
+               if (bytes > iov_iter_count(i))
+                       bytes = iov_iter_count(i);
+again:
                /*
                 * Bring in the user page(s) that we will copy from _first_.
                 * Otherwise there is a nasty deadlock on copying from the same
@@ -1998,10 +1851,10 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                 * pages being swapped out between us bringing them into memory
                 * and doing the actual copying.
                 */
-               if (likely(nr_segs == 1))
-                       ntfs_fault_in_pages_readable(buf, bytes);
-               else
-                       ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
+               if (unlikely(iov_iter_fault_in_multipages_readable(i, bytes))) {
+                       status = -EFAULT;
+                       break;
+               }
                /* Get and lock @do_pages starting at index @start_idx. */
                status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
                                pages, &cached_page);
@@ -2017,56 +1870,57 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                        status = ntfs_prepare_pages_for_non_resident_write(
                                        pages, do_pages, pos, bytes);
                        if (unlikely(status)) {
-                               loff_t i_size;
-
                                do {
                                        unlock_page(pages[--do_pages]);
                                        page_cache_release(pages[do_pages]);
                                } while (do_pages);
-                               /*
-                                * The write preparation may have instantiated
-                                * allocated space outside i_size.  Trim this
-                                * off again.  We can ignore any errors in this
-                                * case as we will just be waisting a bit of
-                                * allocated space, which is not a disaster.
-                                */
-                               i_size = i_size_read(vi);
-                               if (pos + bytes > i_size) {
-                                       ntfs_write_failed(mapping, pos + bytes);
-                               }
                                break;
                        }
                }
                u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
-               if (likely(nr_segs == 1)) {
-                       copied = ntfs_copy_from_user(pages + u, do_pages - u,
-                                       ofs, buf, bytes);
-                       buf += copied;
-               } else
-                       copied = ntfs_copy_from_user_iovec(pages + u,
-                                       do_pages - u, ofs, &iov, &iov_ofs,
-                                       bytes);
+               copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
+                                       i, bytes);
                ntfs_flush_dcache_pages(pages + u, do_pages - u);
-               status = ntfs_commit_pages_after_write(pages, do_pages, pos,
-                               bytes);
-               if (likely(!status)) {
-                       written += copied;
-                       count -= copied;
-                       pos += copied;
-                       if (unlikely(copied != bytes))
-                               status = -EFAULT;
+               status = 0;
+               if (likely(copied == bytes)) {
+                       status = ntfs_commit_pages_after_write(pages, do_pages,
+                                       pos, bytes);
+                       if (!status)
+                               status = bytes;
                }
                do {
                        unlock_page(pages[--do_pages]);
                        page_cache_release(pages[do_pages]);
                } while (do_pages);
-               if (unlikely(status))
+               if (unlikely(status < 0))
                        break;
-               balance_dirty_pages_ratelimited(mapping);
+               copied = status;
                cond_resched();
-       } while (count);
-err_out:
-       *ppos = pos;
+               if (unlikely(!copied)) {
+                       size_t sc;
+
+                       /*
+                        * We failed to copy anything.  Fall back to single
+                        * segment length write.
+                        *
+                        * This is needed to avoid possible livelock in the
+                        * case that all segments in the iov cannot be copied
+                        * at once without a pagefault.
+                        */
+                       sc = iov_iter_single_seg_count(i);
+                       if (bytes > sc)
+                               bytes = sc;
+                       goto again;
+               }
+               iov_iter_advance(i, copied);
+               pos += copied;
+               written += copied;
+               balance_dirty_pages_ratelimited(mapping);
+               if (fatal_signal_pending(current)) {
+                       status = -EINTR;
+                       break;
+               }
+       } while (iov_iter_count(i));
        if (cached_page)
                page_cache_release(cached_page);
        ntfs_debug("Done.  Returning %s (written 0x%lx, status %li).",
@@ -2076,59 +1930,56 @@ err_out:
 }
 
 /**
- * ntfs_file_aio_write_nolock -
+ * ntfs_file_write_iter_nolock - write data to a file
+ * @iocb:      IO state structure (file, offset, etc.)
+ * @from:      iov_iter with data to write
+ *
+ * Basically the same as __generic_file_write_iter() except that it ends
+ * up calling ntfs_perform_write() instead of generic_perform_write() and that
+ * O_DIRECT is not implemented.
  */
-static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs, loff_t *ppos)
+static ssize_t ntfs_file_write_iter_nolock(struct kiocb *iocb,
+               struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
-       loff_t pos;
-       size_t count;           /* after file limit checks */
-       ssize_t written, err;
+       loff_t pos = iocb->ki_pos;
+       ssize_t written = 0;
+       ssize_t err;
+       size_t count = iov_iter_count(from);
 
-       count = iov_length(iov, nr_segs);
-       pos = *ppos;
-       /* We can write back this queue in page reclaim. */
-       current->backing_dev_info = inode_to_bdi(inode);
-       written = 0;
-       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
-       if (err)
-               goto out;
-       if (!count)
-               goto out;
-       err = file_remove_suid(file);
-       if (err)
-               goto out;
-       err = file_update_time(file);
-       if (err)
-               goto out;
-       written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
-                       count);
-out:
+       err = ntfs_prepare_file_for_write(file, &pos, &count);
+       if (count && !err) {
+               iov_iter_truncate(from, count);
+               written = ntfs_perform_write(file, from, pos);
+               if (likely(written >= 0))
+                       iocb->ki_pos = pos + written;
+       }
        current->backing_dev_info = NULL;
        return written ? written : err;
 }
 
 /**
- * ntfs_file_aio_write -
+ * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock()
+ * @iocb:      IO state structure
+ * @from:      iov_iter with data to write
+ *
+ * Basically the same as generic_file_write_iter() except that it ends up
+ * calling ntfs_file_write_iter_nolock() instead of
+ * __generic_file_write_iter().
  */
-static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
+static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
+       struct inode *vi = file_inode(file);
        ssize_t ret;
 
-       BUG_ON(iocb->ki_pos != pos);
-
-       mutex_lock(&inode->i_mutex);
-       ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
-       mutex_unlock(&inode->i_mutex);
+       mutex_lock(&vi->i_mutex);
+       ret = ntfs_file_write_iter_nolock(iocb, from);
+       mutex_unlock(&vi->i_mutex);
        if (ret > 0) {
-               int err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+               ssize_t err;
+
+               err = generic_write_sync(file, iocb->ki_pos - ret, ret);
                if (err < 0)
                        ret = err;
        }
@@ -2196,37 +2047,17 @@ static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end,
 #endif /* NTFS_RW */
 
 const struct file_operations ntfs_file_ops = {
-       .llseek         = generic_file_llseek,   /* Seek inside file. */
-       .read           = new_sync_read,         /* Read from file. */
-       .read_iter      = generic_file_read_iter, /* Async read from file. */
+       .llseek         = generic_file_llseek,
+       .read           = new_sync_read,
+       .read_iter      = generic_file_read_iter,
 #ifdef NTFS_RW
-       .write          = do_sync_write,         /* Write to file. */
-       .aio_write      = ntfs_file_aio_write,   /* Async write to file. */
-       /*.release      = ,*/                    /* Last file is closed.  See
-                                                   fs/ext2/file.c::
-                                                   ext2_release_file() for
-                                                   how to use this to discard
-                                                   preallocated space for
-                                                   write opened files. */
-       .fsync          = ntfs_file_fsync,       /* Sync a file to disk. */
-       /*.aio_fsync    = ,*/                    /* Sync all outstanding async
-                                                   i/o operations on a
-                                                   kiocb. */
+       .write          = new_sync_write,
+       .write_iter     = ntfs_file_write_iter,
+       .fsync          = ntfs_file_fsync,
 #endif /* NTFS_RW */
-       /*.ioctl        = ,*/                    /* Perform function on the
-                                                   mounted filesystem. */
-       .mmap           = generic_file_mmap,     /* Mmap file. */
-       .open           = ntfs_file_open,        /* Open file. */
-       .splice_read    = generic_file_splice_read /* Zero-copy data send with
-                                                   the data source being on
-                                                   the ntfs partition.  We do
-                                                   not need to care about the
-                                                   data destination. */
-       /*.sendpage     = ,*/                    /* Zero-copy data send with
-                                                   the data destination being
-                                                   on the ntfs partition.  We
-                                                   do not need to care about
-                                                   the data source. */
+       .mmap           = generic_file_mmap,
+       .open           = ntfs_file_open,
+       .splice_read    = generic_file_splice_read,
 };
 
 const struct inode_operations ntfs_file_inode_ops = {
index 33f9cbf2610b39498d416cb8c142fb5ebe4cc790..6a83c47d59040df871d2dac261d71fd32bec5587 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -570,6 +570,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
        uid = make_kuid(current_user_ns(), user);
        gid = make_kgid(current_user_ns(), group);
 
+retry_deleg:
        newattrs.ia_valid =  ATTR_CTIME;
        if (user != (uid_t) -1) {
                if (!uid_valid(uid))
@@ -586,7 +587,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
        if (!S_ISDIR(inode->i_mode))
                newattrs.ia_valid |=
                        ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
-retry_deleg:
        mutex_lock(&inode->i_mutex);
        error = security_path_chown(path, uid, gid);
        if (!error)
@@ -988,9 +988,6 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
                return ERR_PTR(err);
        if (flags & O_CREAT)
                return ERR_PTR(-EINVAL);
-       if (!filename && (flags & O_DIRECTORY))
-               if (!dentry->d_inode->i_op->lookup)
-                       return ERR_PTR(-ENOTDIR);
        return do_file_open_root(dentry, mnt, filename, &op);
 }
 EXPORT_SYMBOL(file_open_root);
index 99a6ef946d0182711542b77f17b22bc84cfe7526..69128b3786469b807f87763a51c819f4ad076503 100644 (file)
@@ -695,25 +695,23 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
 }
 EXPORT_SYMBOL(iov_shorten);
 
-static ssize_t do_iter_readv_writev(struct file *filp, int rw, const struct iovec *iov,
-               unsigned long nr_segs, size_t len, loff_t *ppos, iter_fn_t fn)
+static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter,
+               loff_t *ppos, iter_fn_t fn)
 {
        struct kiocb kiocb;
-       struct iov_iter iter;
        ssize_t ret;
 
        init_sync_kiocb(&kiocb, filp);
        kiocb.ki_pos = *ppos;
 
-       iov_iter_init(&iter, rw, iov, nr_segs, len);
-       ret = fn(&kiocb, &iter);
+       ret = fn(&kiocb, iter);
        BUG_ON(ret == -EIOCBQUEUED);
        *ppos = kiocb.ki_pos;
        return ret;
 }
 
-static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
-               unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
+static ssize_t do_sync_readv_writev(struct file *filp, struct iov_iter *iter,
+               loff_t *ppos, iov_fn_t fn)
 {
        struct kiocb kiocb;
        ssize_t ret;
@@ -721,30 +719,23 @@ static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
        init_sync_kiocb(&kiocb, filp);
        kiocb.ki_pos = *ppos;
 
-       ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
+       ret = fn(&kiocb, iter->iov, iter->nr_segs, kiocb.ki_pos);
        BUG_ON(ret == -EIOCBQUEUED);
        *ppos = kiocb.ki_pos;
        return ret;
 }
 
 /* Do it by hand, with file-ops */
-static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
-               unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
+static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
+               loff_t *ppos, io_fn_t fn)
 {
-       struct iovec *vector = iov;
        ssize_t ret = 0;
 
-       while (nr_segs > 0) {
-               void __user *base;
-               size_t len;
+       while (iov_iter_count(iter)) {
+               struct iovec iovec = iov_iter_iovec(iter);
                ssize_t nr;
 
-               base = vector->iov_base;
-               len = vector->iov_len;
-               vector++;
-               nr_segs--;
-
-               nr = fn(filp, base, len, ppos);
+               nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos);
 
                if (nr < 0) {
                        if (!ret)
@@ -752,8 +743,9 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
                        break;
                }
                ret += nr;
-               if (nr != len)
+               if (nr != iovec.iov_len)
                        break;
+               iov_iter_advance(iter, nr);
        }
 
        return ret;
@@ -844,17 +836,20 @@ static ssize_t do_readv_writev(int type, struct file *file,
        size_t tot_len;
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
+       struct iov_iter iter;
        ssize_t ret;
        io_fn_t fn;
        iov_fn_t fnv;
        iter_fn_t iter_fn;
 
-       ret = rw_copy_check_uvector(type, uvector, nr_segs,
-                                   ARRAY_SIZE(iovstack), iovstack, &iov);
-       if (ret <= 0)
-               goto out;
+       ret = import_iovec(type, uvector, nr_segs,
+                          ARRAY_SIZE(iovstack), &iov, &iter);
+       if (ret < 0)
+               return ret;
 
-       tot_len = ret;
+       tot_len = iov_iter_count(&iter);
+       if (!tot_len)
+               goto out;
        ret = rw_verify_area(type, file, pos, tot_len);
        if (ret < 0)
                goto out;
@@ -872,20 +867,17 @@ static ssize_t do_readv_writev(int type, struct file *file,
        }
 
        if (iter_fn)
-               ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
-                                               pos, iter_fn);
+               ret = do_iter_readv_writev(file, &iter, pos, iter_fn);
        else if (fnv)
-               ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
-                                               pos, fnv);
+               ret = do_sync_readv_writev(file, &iter, pos, fnv);
        else
-               ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
+               ret = do_loop_readv_writev(file, &iter, pos, fn);
 
        if (type != READ)
                file_end_write(file);
 
 out:
-       if (iov != iovstack)
-               kfree(iov);
+       kfree(iov);
        if ((ret + (type == READ)) > 0) {
                if (type == READ)
                        fsnotify_access(file);
@@ -1024,17 +1016,20 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
        compat_ssize_t tot_len;
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
+       struct iov_iter iter;
        ssize_t ret;
        io_fn_t fn;
        iov_fn_t fnv;
        iter_fn_t iter_fn;
 
-       ret = compat_rw_copy_check_uvector(type, uvector, nr_segs,
-                                              UIO_FASTIOV, iovstack, &iov);
-       if (ret <= 0)
-               goto out;
+       ret = compat_import_iovec(type, uvector, nr_segs,
+                                 UIO_FASTIOV, &iov, &iter);
+       if (ret < 0)
+               return ret;
 
-       tot_len = ret;
+       tot_len = iov_iter_count(&iter);
+       if (!tot_len)
+               goto out;
        ret = rw_verify_area(type, file, pos, tot_len);
        if (ret < 0)
                goto out;
@@ -1052,20 +1047,17 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
        }
 
        if (iter_fn)
-               ret = do_iter_readv_writev(file, type, iov, nr_segs, tot_len,
-                                               pos, iter_fn);
+               ret = do_iter_readv_writev(file, &iter, pos, iter_fn);
        else if (fnv)
-               ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
-                                               pos, fnv);
+               ret = do_sync_readv_writev(file, &iter, pos, fnv);
        else
-               ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
+               ret = do_loop_readv_writev(file, &iter, pos, fn);
 
        if (type != READ)
                file_end_write(file);
 
 out:
-       if (iov != iovstack)
-               kfree(iov);
+       kfree(iov);
        if ((ret + (type == READ)) > 0) {
                if (type == READ)
                        fsnotify_access(file);
index 4bbfa95b5bfea8b20aa557cff35c2efd610289da..41cbb16299e0949984eb284887c22f77fff0390f 100644 (file)
@@ -1533,34 +1533,29 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
        struct iovec iovstack[UIO_FASTIOV];
        struct iovec *iov = iovstack;
        struct iov_iter iter;
-       ssize_t count;
 
        pipe = get_pipe_info(file);
        if (!pipe)
                return -EBADF;
 
-       ret = rw_copy_check_uvector(READ, uiov, nr_segs,
-                                   ARRAY_SIZE(iovstack), iovstack, &iov);
-       if (ret <= 0)
-               goto out;
-
-       count = ret;
-       iov_iter_init(&iter, READ, iov, nr_segs, count);
+       ret = import_iovec(READ, uiov, nr_segs,
+                          ARRAY_SIZE(iovstack), &iov, &iter);
+       if (ret < 0)
+               return ret;
 
+       sd.total_len = iov_iter_count(&iter);
        sd.len = 0;
-       sd.total_len = count;
        sd.flags = flags;
        sd.u.data = &iter;
        sd.pos = 0;
 
-       pipe_lock(pipe);
-       ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
-       pipe_unlock(pipe);
-
-out:
-       if (iov != iovstack)
-               kfree(iov);
+       if (sd.total_len) {
+               pipe_lock(pipe);
+               ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
+               pipe_unlock(pipe);
+       }
 
+       kfree(iov);
        return ret;
 }
 
index ae0c3cef9927e64fb1f21ccf1848155825fc79ef..19636af5e75cc16614f790519c6111599d906830 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -66,7 +66,7 @@ int vfs_getattr(struct path *path, struct kstat *stat)
 {
        int retval;
 
-       retval = security_inode_getattr(path->mnt, path->dentry);
+       retval = security_inode_getattr(path);
        if (retval)
                return retval;
        return vfs_getattr_nosec(path, stat);
index fdce1ddf230cb95a413593978a04c84c02961b3c..20fe15fe236da26c060bb9c9dc2be7871ac16ae3 100644 (file)
@@ -2167,7 +2167,7 @@ struct filename {
        const __user char       *uptr;  /* original userland pointer */
        struct audit_names      *aname;
        int                     refcnt;
-       bool                    separate; /* should "name" be freed? */
+       const char              iname[];
 };
 
 extern long vfs_truncate(struct path *, loff_t);
index 25a079a7c3b32a1fc0088cd48d5fe6ec71d2ff62..18264ea9e314153488f9726b530993658c4cea25 100644 (file)
@@ -1556,7 +1556,7 @@ struct security_operations {
        int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
        int (*inode_permission) (struct inode *inode, int mask);
        int (*inode_setattr)    (struct dentry *dentry, struct iattr *attr);
-       int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
+       int (*inode_getattr) (const struct path *path);
        int (*inode_setxattr) (struct dentry *dentry, const char *name,
                               const void *value, size_t size, int flags);
        void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
@@ -1842,7 +1842,7 @@ int security_inode_readlink(struct dentry *dentry);
 int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
 int security_inode_permission(struct inode *inode, int mask);
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
-int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
+int security_inode_getattr(const struct path *path);
 int security_inode_setxattr(struct dentry *dentry, const char *name,
                            const void *value, size_t size, int flags);
 void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -2258,8 +2258,7 @@ static inline int security_inode_setattr(struct dentry *dentry,
        return 0;
 }
 
-static inline int security_inode_getattr(struct vfsmount *mnt,
-                                         struct dentry *dentry)
+static inline int security_inode_getattr(const struct path *path)
 {
        return 0;
 }
index 1f4a37f1f025827c9a561a8e5a856680062227cc..15f11fb9fff6feb5197f9501777e018f172dda4b 100644 (file)
@@ -76,6 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
                struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
                         struct iov_iter *i);
index fc6e33f6b7f3376b365c1b04409eb23580b729e2..75232ad0a5e7ead00e5d8396ed34763d84a0685c 100644 (file)
@@ -317,6 +317,32 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 }
 EXPORT_SYMBOL(iov_iter_fault_in_readable);
 
+/*
+ * Fault in one or more iovecs of the given iov_iter, to a maximum length of
+ * bytes.  For each iovec, fault in each page that constitutes the iovec.
+ *
+ * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
+ * because it is an invalid address).
+ */
+int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
+{
+       size_t skip = i->iov_offset;
+       const struct iovec *iov;
+       int err;
+       struct iovec v;
+
+       if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
+               iterate_iovec(i, bytes, v, iov, skip, ({
+                       err = fault_in_multipages_readable(v.iov_base,
+                                       v.iov_len);
+                       if (unlikely(err))
+                       return err;
+               0;}))
+       }
+       return 0;
+}
+EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
+
 void iov_iter_init(struct iov_iter *i, int direction,
                        const struct iovec *iov, unsigned long nr_segs,
                        size_t count)
index b1597690530ce84644d8e405dab02740298706cd..e88d071648c2dece38b25d3fc8e57091d1fcd1d1 100644 (file)
@@ -257,22 +257,18 @@ static ssize_t process_vm_rw(pid_t pid,
        struct iovec *iov_r = iovstack_r;
        struct iov_iter iter;
        ssize_t rc;
+       int dir = vm_write ? WRITE : READ;
 
        if (flags != 0)
                return -EINVAL;
 
        /* Check iovecs */
-       if (vm_write)
-               rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
-                                          iovstack_l, &iov_l);
-       else
-               rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
-                                          iovstack_l, &iov_l);
-       if (rc <= 0)
+       rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
+       if (rc < 0)
+               return rc;
+       if (!iov_iter_count(&iter))
                goto free_iovecs;
 
-       iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);
-
        rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
                                   iovstack_r, &iov_r);
        if (rc <= 0)
@@ -283,8 +279,7 @@ static ssize_t process_vm_rw(pid_t pid,
 free_iovecs:
        if (iov_r != iovstack_r)
                kfree(iov_r);
-       if (iov_l != iovstack_l)
-               kfree(iov_l);
+       kfree(iov_l);
 
        return rc;
 }
@@ -320,21 +315,16 @@ compat_process_vm_rw(compat_pid_t pid,
        struct iovec *iov_r = iovstack_r;
        struct iov_iter iter;
        ssize_t rc = -EFAULT;
+       int dir = vm_write ? WRITE : READ;
 
        if (flags != 0)
                return -EINVAL;
 
-       if (vm_write)
-               rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
-                                                 UIO_FASTIOV, iovstack_l,
-                                                 &iov_l);
-       else
-               rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
-                                                 UIO_FASTIOV, iovstack_l,
-                                                 &iov_l);
-       if (rc <= 0)
+       rc = compat_import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
+       if (rc < 0)
+               return rc;
+       if (!iov_iter_count(&iter))
                goto free_iovecs;
-       iov_iter_init(&iter, vm_write ? WRITE : READ, iov_l, liovcnt, rc);
        rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
                                          UIO_FASTIOV, iovstack_r,
                                          &iov_r);
@@ -346,8 +336,7 @@ compat_process_vm_rw(compat_pid_t pid,
 free_iovecs:
        if (iov_r != iovstack_r)
                kfree(iov_r);
-       if (iov_l != iovstack_l)
-               kfree(iov_l);
+       kfree(iov_l);
        return rc;
 }
 
index 107db88b1d5f9d1d5dda20c0636f229738fec8bd..dd56bffd6500e078b4aa7d4b64c8e04c91802541 100644 (file)
@@ -364,12 +364,12 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
        return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
 }
 
-static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int apparmor_inode_getattr(const struct path *path)
 {
-       if (!mediated_filesystem(dentry))
+       if (!mediated_filesystem(path->dentry))
                return 0;
 
-       return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
+       return common_perm_mnt_dentry(OP_GETATTR, path->mnt, path->dentry,
                                      AA_MAY_META_READ);
 }
 
index 58a1600c149bfaf9f11b1f60eb124f2a1c52024d..0d03fcc489a49ee3221b1369ca2c1ff931c691cd 100644 (file)
@@ -225,7 +225,7 @@ static int cap_inode_setattr(struct dentry *dentry, struct iattr *iattr)
        return 0;
 }
 
-static int cap_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int cap_inode_getattr(const struct path *path)
 {
        return 0;
 }
index 347896548ad3159a152186a4c1a27cdf92f1f4ad..25430a3aa7f7b9d6e6b4d10ae9bc72c8669c00fe 100644 (file)
@@ -31,30 +31,21 @@ static long compat_keyctl_instantiate_key_iov(
        key_serial_t ringid)
 {
        struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+       struct iov_iter from;
        long ret;
 
-       if (!_payload_iov || !ioc)
-               goto no_payload;
+       if (!_payload_iov)
+               ioc = 0;
 
-       ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
-                                          ARRAY_SIZE(iovstack),
-                                          iovstack, &iov);
+       ret = compat_import_iovec(WRITE, _payload_iov, ioc,
+                                 ARRAY_SIZE(iovstack), &iov,
+                                 &from);
        if (ret < 0)
-               goto err;
-       if (ret == 0)
-               goto no_payload_free;
-
-       ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-err:
-       if (iov != iovstack)
-               kfree(iov);
-       return ret;
+               return ret;
 
-no_payload_free:
-       if (iov != iovstack)
-               kfree(iov);
-no_payload:
-       return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+       ret = keyctl_instantiate_key_common(id, &from, ringid);
+       kfree(iov);
+       return ret;
 }
 
 /*
index 200e37867336a3c2903437e97f591fbc302e15b7..5105c2c2da75b0e13dec1196be67c88f4d789e72 100644 (file)
@@ -243,9 +243,10 @@ extern long keyctl_instantiate_key_iov(key_serial_t,
                                       unsigned, key_serial_t);
 extern long keyctl_invalidate_key(key_serial_t);
 
+struct iov_iter;
 extern long keyctl_instantiate_key_common(key_serial_t,
-                                         const struct iovec *,
-                                         unsigned, size_t, key_serial_t);
+                                         struct iov_iter *,
+                                         key_serial_t);
 #ifdef CONFIG_PERSISTENT_KEYRINGS
 extern long keyctl_get_persistent(uid_t, key_serial_t);
 extern unsigned persistent_keyring_expiry;
index 4743d71e4aa6dd12f2456a5f00496c1222775c6a..0b9ec78a7a7ad2b14af1ef0407e051e6dcef29ff 100644 (file)
@@ -997,21 +997,6 @@ static int keyctl_change_reqkey_auth(struct key *key)
        return commit_creds(new);
 }
 
-/*
- * Copy the iovec data from userspace
- */
-static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
-                                unsigned ioc)
-{
-       for (; ioc > 0; ioc--) {
-               if (copy_from_user(buffer, iov->iov_base, iov->iov_len) != 0)
-                       return -EFAULT;
-               buffer += iov->iov_len;
-               iov++;
-       }
-       return 0;
-}
-
 /*
  * Instantiate a key with the specified payload and link the key into the
  * destination keyring if one is given.
@@ -1022,20 +1007,21 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
  * If successful, 0 will be returned.
  */
 long keyctl_instantiate_key_common(key_serial_t id,
-                                  const struct iovec *payload_iov,
-                                  unsigned ioc,
-                                  size_t plen,
+                                  struct iov_iter *from,
                                   key_serial_t ringid)
 {
        const struct cred *cred = current_cred();
        struct request_key_auth *rka;
        struct key *instkey, *dest_keyring;
+       size_t plen = from ? iov_iter_count(from) : 0;
        void *payload;
        long ret;
-       bool vm = false;
 
        kenter("%d,,%zu,%d", id, plen, ringid);
 
+       if (!plen)
+               from = NULL;
+
        ret = -EINVAL;
        if (plen > 1024 * 1024 - 1)
                goto error;
@@ -1054,20 +1040,19 @@ long keyctl_instantiate_key_common(key_serial_t id,
        /* pull the payload in if one was supplied */
        payload = NULL;
 
-       if (payload_iov) {
+       if (from) {
                ret = -ENOMEM;
                payload = kmalloc(plen, GFP_KERNEL);
                if (!payload) {
                        if (plen <= PAGE_SIZE)
                                goto error;
-                       vm = true;
                        payload = vmalloc(plen);
                        if (!payload)
                                goto error;
                }
 
-               ret = copy_from_user_iovec(payload, payload_iov, ioc);
-               if (ret < 0)
+               ret = -EFAULT;
+               if (copy_from_iter(payload, plen, from) != plen)
                        goto error2;
        }
 
@@ -1089,10 +1074,7 @@ long keyctl_instantiate_key_common(key_serial_t id,
                keyctl_change_reqkey_auth(NULL);
 
 error2:
-       if (!vm)
-               kfree(payload);
-       else
-               vfree(payload);
+       kvfree(payload);
 error:
        return ret;
 }
@@ -1112,15 +1094,19 @@ long keyctl_instantiate_key(key_serial_t id,
                            key_serial_t ringid)
 {
        if (_payload && plen) {
-               struct iovec iov[1] = {
-                       [0].iov_base = (void __user *)_payload,
-                       [0].iov_len  = plen
-               };
+               struct iovec iov;
+               struct iov_iter from;
+               int ret;
 
-               return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
+               ret = import_single_range(WRITE, (void __user *)_payload, plen,
+                                         &iov, &from);
+               if (unlikely(ret))
+                       return ret;
+
+               return keyctl_instantiate_key_common(id, &from, ringid);
        }
 
-       return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+       return keyctl_instantiate_key_common(id, NULL, ringid);
 }
 
 /*
@@ -1138,29 +1124,19 @@ long keyctl_instantiate_key_iov(key_serial_t id,
                                key_serial_t ringid)
 {
        struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+       struct iov_iter from;
        long ret;
 
-       if (!_payload_iov || !ioc)
-               goto no_payload;
+       if (!_payload_iov)
+               ioc = 0;
 
-       ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
-                                   ARRAY_SIZE(iovstack), iovstack, &iov);
+       ret = import_iovec(WRITE, _payload_iov, ioc,
+                                   ARRAY_SIZE(iovstack), &iov, &from);
        if (ret < 0)
-               goto err;
-       if (ret == 0)
-               goto no_payload_free;
-
-       ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-err:
-       if (iov != iovstack)
-               kfree(iov);
+               return ret;
+       ret = keyctl_instantiate_key_common(id, &from, ringid);
+       kfree(iov);
        return ret;
-
-no_payload_free:
-       if (iov != iovstack)
-               kfree(iov);
-no_payload:
-       return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
 }
 
 /*
index 1f475aa53288617cee142d4858428d523fad628d..730ac65a573722238142ad0d978abc26f87287cd 100644 (file)
@@ -608,11 +608,11 @@ int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
 }
 EXPORT_SYMBOL_GPL(security_inode_setattr);
 
-int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+int security_inode_getattr(const struct path *path)
 {
-       if (unlikely(IS_PRIVATE(dentry->d_inode)))
+       if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
                return 0;
-       return security_ops->inode_getattr(mnt, dentry);
+       return security_ops->inode_getattr(path);
 }
 
 int security_inode_setxattr(struct dentry *dentry, const char *name,
index 7e392edaab97bee0f66825b195562c3aa879c05a..c318b304ee2f5be7001d6e26e30811bbbbd83859 100644 (file)
@@ -1622,7 +1622,7 @@ static inline int dentry_has_perm(const struct cred *cred,
    the path to help the auditing code to more easily generate the
    pathname if needed. */
 static inline int path_has_perm(const struct cred *cred,
-                               struct path *path,
+                               const struct path *path,
                                u32 av)
 {
        struct inode *inode = path->dentry->d_inode;
@@ -2953,15 +2953,9 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
        return dentry_has_perm(cred, dentry, av);
 }
 
-static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int selinux_inode_getattr(const struct path *path)
 {
-       const struct cred *cred = current_cred();
-       struct path path;
-
-       path.dentry = dentry;
-       path.mnt = mnt;
-
-       return path_has_perm(cred, &path, FILE__GETATTR);
+       return path_has_perm(current_cred(), path, FILE__GETATTR);
 }
 
 static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
index c934311812f1a777093c44a89543dcae924b8568..1511965549b8232fdd4d3469166023c2d140f908 100644 (file)
@@ -1034,19 +1034,16 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
  *
  * Returns 0 if access is permitted, an error code otherwise
  */
-static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int smack_inode_getattr(const struct path *path)
 {
        struct smk_audit_info ad;
-       struct path path;
+       struct inode *inode = path->dentry->d_inode;
        int rc;
 
-       path.dentry = dentry;
-       path.mnt = mnt;
-
        smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
-       smk_ad_setfield_u_fs_path(&ad, path);
-       rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
-       rc = smk_bu_inode(dentry->d_inode, MAY_READ, rc);
+       smk_ad_setfield_u_fs_path(&ad, *path);
+       rc = smk_curacc(smk_of_inode(inode), MAY_READ, &ad);
+       rc = smk_bu_inode(inode, MAY_READ, rc);
        return rc;
 }
 
index b897d4862016ce51ba737cee7f86f07d95a28c65..f9c9fb1d56b4bde70d43a21cc22a71e37245dfbc 100644 (file)
@@ -945,7 +945,7 @@ char *tomoyo_encode2(const char *str, int str_len);
 char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
                      va_list args);
 char *tomoyo_read_token(struct tomoyo_acl_param *param);
-char *tomoyo_realpath_from_path(struct path *path);
+char *tomoyo_realpath_from_path(const struct path *path);
 char *tomoyo_realpath_nofollow(const char *pathname);
 const char *tomoyo_get_exe(void);
 const char *tomoyo_yesno(const unsigned int value);
@@ -978,7 +978,7 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1,
                      struct path *path2);
 int tomoyo_path_number_perm(const u8 operation, struct path *path,
                            unsigned long number);
-int tomoyo_path_perm(const u8 operation, struct path *path,
+int tomoyo_path_perm(const u8 operation, const struct path *path,
                     const char *target);
 unsigned int tomoyo_poll_control(struct file *file, poll_table *wait);
 unsigned int tomoyo_poll_log(struct file *file, poll_table *wait);
index c151a1869597f8155a0296f89fafa61cc65f447d..2367b100cc62daccafa80932e4740385612746e9 100644 (file)
@@ -145,7 +145,7 @@ static void tomoyo_add_slash(struct tomoyo_path_info *buf)
  *
  * Returns true on success, false otherwise.
  */
-static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, struct path *path)
+static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, const struct path *path)
 {
        buf->name = tomoyo_realpath_from_path(path);
        if (buf->name) {
@@ -782,7 +782,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
  *
  * Returns 0 on success, negative value otherwise.
  */
-int tomoyo_path_perm(const u8 operation, struct path *path, const char *target)
+int tomoyo_path_perm(const u8 operation, const struct path *path, const char *target)
 {
        struct tomoyo_request_info r;
        struct tomoyo_obj_info obj = {
index bed745c8b1a30d47a173fd7d96322aebb2d09c9c..1e0d480ff6a6b653cce9c4af266f5a8420a58c52 100644 (file)
@@ -89,7 +89,7 @@ char *tomoyo_encode(const char *str)
  *
  * If dentry is a directory, trailing '/' is appended.
  */
-static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
+static char *tomoyo_get_absolute_path(const struct path *path, char * const buffer,
                                      const int buflen)
 {
        char *pos = ERR_PTR(-ENOMEM);
@@ -216,7 +216,7 @@ out:
  *
  * Returns the buffer.
  */
-static char *tomoyo_get_socket_name(struct path *path, char * const buffer,
+static char *tomoyo_get_socket_name(const struct path *path, char * const buffer,
                                    const int buflen)
 {
        struct inode *inode = path->dentry->d_inode;
@@ -247,7 +247,7 @@ static char *tomoyo_get_socket_name(struct path *path, char * const buffer,
  * These functions use kzalloc(), so the caller must call kfree()
  * if these functions didn't return NULL.
  */
-char *tomoyo_realpath_from_path(struct path *path)
+char *tomoyo_realpath_from_path(const struct path *path)
 {
        char *buf = NULL;
        char *name = NULL;
index f0b756e27fed6b143f823d6a7708a408ea630bd8..57c88d52ffa52c3a7e799cba86a07027a5926aae 100644 (file)
@@ -144,10 +144,9 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
  *
  * Returns 0 on success, negative value otherwise.
  */
-static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+static int tomoyo_inode_getattr(const struct path *path)
 {
-       struct path path = { mnt, dentry };
-       return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path, NULL);
+       return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, path, NULL);
 }
 
 /**