]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - fs/inode.c
Merge tag 'nfs-for-4.13-3' of git://git.linux-nfs.org/projects/anna/linux-nfs
[karo-tx-linux.git] / fs / inode.c
index db5914783a7130d77725502cb4182c05ff7775c2..50370599e37104708b95ede3a627052cc0d910d3 100644 (file)
@@ -146,6 +146,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        i_gid_write(inode, 0);
        atomic_set(&inode->i_writecount, 0);
        inode->i_size = 0;
+       inode->i_write_hint = WRITE_LIFE_NOT_SET;
        inode->i_blocks = 0;
        inode->i_bytes = 0;
        inode->i_generation = 0;
@@ -1891,11 +1892,11 @@ static void __wait_on_freeing_inode(struct inode *inode)
        wait_queue_head_t *wq;
        DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
        wq = bit_waitqueue(&inode->i_state, __I_NEW);
-       prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+       prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
        spin_unlock(&inode->i_lock);
        spin_unlock(&inode_hash_lock);
        schedule();
-       finish_wait(wq, &wait.wait);
+       finish_wait(wq, &wait.wq_entry);
        spin_lock(&inode_hash_lock);
 }
 
@@ -1914,8 +1915,6 @@ __setup("ihash_entries=", set_ihash_entries);
  */
 void __init inode_init_early(void)
 {
-       unsigned int loop;
-
        /* If hashes are distributed across NUMA nodes, defer
         * hash allocation until vmalloc space is available.
         */
@@ -1927,20 +1926,15 @@ void __init inode_init_early(void)
                                        sizeof(struct hlist_head),
                                        ihash_entries,
                                        14,
-                                       HASH_EARLY,
+                                       HASH_EARLY | HASH_ZERO,
                                        &i_hash_shift,
                                        &i_hash_mask,
                                        0,
                                        0);
-
-       for (loop = 0; loop < (1U << i_hash_shift); loop++)
-               INIT_HLIST_HEAD(&inode_hashtable[loop]);
 }
 
 void __init inode_init(void)
 {
-       unsigned int loop;
-
        /* inode slab cache */
        inode_cachep = kmem_cache_create("inode_cache",
                                         sizeof(struct inode),
@@ -1958,14 +1952,11 @@ void __init inode_init(void)
                                        sizeof(struct hlist_head),
                                        ihash_entries,
                                        14,
-                                       0,
+                                       HASH_ZERO,
                                        &i_hash_shift,
                                        &i_hash_mask,
                                        0,
                                        0);
-
-       for (loop = 0; loop < (1U << i_hash_shift); loop++)
-               INIT_HLIST_HEAD(&inode_hashtable[loop]);
 }
 
 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
@@ -2023,7 +2014,7 @@ bool inode_owner_or_capable(const struct inode *inode)
                return true;
 
        ns = current_user_ns();
-       if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
+       if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER))
                return true;
        return false;
 }
@@ -2038,11 +2029,11 @@ static void __inode_dio_wait(struct inode *inode)
        DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
 
        do {
-               prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
+               prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
                if (atomic_read(&inode->i_dio_count))
                        schedule();
        } while (atomic_read(&inode->i_dio_count));
-       finish_wait(wq, &q.wait);
+       finish_wait(wq, &q.wq_entry);
 }
 
 /**