]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
aio: percpu ioctx refcount
authorKent Overstreet <koverstreet@google.com>
Sat, 23 Mar 2013 02:33:15 +0000 (13:33 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 26 Mar 2013 05:11:48 +0000 (16:11 +1100)
This just converts the ioctx refcount to the new generic dynamic percpu
refcount code.

Signed-off-by: Kent Overstreet <koverstreet@google.com>
Cc: Zach Brown <zab@redhat.com>
Cc: Felipe Balbi <balbi@ti.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/aio.c

index afb795ffd540a6ca03a9b81c5f55e454411c03ee..50e619d125672b37da96a09ea90b25da3ab80632 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -36,6 +36,7 @@
 #include <linux/eventfd.h>
 #include <linux/blkdev.h>
 #include <linux/compat.h>
+#include <linux/percpu-refcount.h>
 
 #include <asm/kmap_types.h>
 #include <asm/uaccess.h>
@@ -65,8 +66,7 @@ struct kioctx_cpu {
 };
 
 struct kioctx {
-       atomic_t                users;
-       atomic_t                dead;
+       struct percpu_ref       users;
 
        /* This needs improving */
        unsigned long           user_id;
@@ -368,7 +368,7 @@ static void free_ioctx(struct kioctx *ctx)
 
 static void put_ioctx(struct kioctx *ctx)
 {
-       if (unlikely(atomic_dec_and_test(&ctx->users)))
+       if (percpu_ref_put(&ctx->users))
                free_ioctx(ctx);
 }
 
@@ -409,8 +409,11 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 
        ctx->max_reqs = nr_events;
 
-       atomic_set(&ctx->users, 2);
-       atomic_set(&ctx->dead, 0);
+       percpu_ref_init(&ctx->users);
+       rcu_read_lock();
+       percpu_ref_get(&ctx->users);
+       rcu_read_unlock();
+
        spin_lock_init(&ctx->ctx_lock);
        spin_lock_init(&ctx->completion_lock);
        mutex_init(&ctx->ring_lock);
@@ -482,7 +485,7 @@ static void kill_ioctx_rcu(struct rcu_head *head)
  */
 static void kill_ioctx(struct kioctx *ctx)
 {
-       if (!atomic_xchg(&ctx->dead, 1)) {
+       if (percpu_ref_kill(&ctx->users)) {
                hlist_del_rcu(&ctx->list);
                /* Between hlist_del_rcu() and dropping the initial ref */
                synchronize_rcu();
@@ -528,12 +531,6 @@ void exit_aio(struct mm_struct *mm)
        struct hlist_node *n;
 
        hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) {
-               if (1 != atomic_read(&ctx->users))
-                       printk(KERN_DEBUG
-                               "exit_aio:ioctx still alive: %d %d %d\n",
-                               atomic_read(&ctx->users),
-                               atomic_read(&ctx->dead),
-                               atomic_read(&ctx->reqs_available));
                /*
                 * We don't need to bother with munmap() here -
                 * exit_mmap(mm) is coming and it'll unmap everything.
@@ -544,7 +541,7 @@ void exit_aio(struct mm_struct *mm)
                 */
                ctx->mmap_size = 0;
 
-               if (!atomic_xchg(&ctx->dead, 1)) {
+               if (percpu_ref_kill(&ctx->users)) {
                        hlist_del_rcu(&ctx->list);
                        call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
                }
@@ -655,7 +652,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 
        hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
                if (ctx->user_id == ctx_id) {
-                       atomic_inc(&ctx->users);
+                       percpu_ref_get(&ctx->users);
                        ret = ctx;
                        break;
                }
@@ -868,7 +865,7 @@ static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
        if (ret > 0)
                *i += ret;
 
-       if (unlikely(atomic_read(&ctx->dead)))
+       if (unlikely(percpu_ref_dead(&ctx->users)))
                ret = -EINVAL;
 
        if (!*i)