aio_free_ring(ctx);
return -EAGAIN;
}
- up_write(&mm->mmap_sem);
-
- mm_populate(ctx->mmap_base, populate);
pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
+
+ /* We must do this while still holding mmap_sem for write, as we
+ * need to be protected against userspace attempting to mremap()
+ * or munmap() the ring buffer.
+ */
ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
1, 0, ctx->ring_pages, NULL);
+
+ /* Dropping the reference here is safe as the page cache will hold
+ * onto the pages for us. It is also required so that page migration
+ * can unmap the pages and get the right reference count.
+ */
for (i = 0; i < ctx->nr_pages; i++)
put_page(ctx->ring_pages[i]);
+ up_write(&mm->mmap_sem);
+
if (unlikely(ctx->nr_pages != nr_pages)) {
aio_free_ring(ctx);
return -EAGAIN;
struct aio_ring *ring;
spin_lock(&mm->ioctx_lock);
+ rcu_read_lock();
table = rcu_dereference(mm->ioctx_table);
while (1) {
if (!table->table[i]) {
ctx->id = i;
table->table[i] = ctx;
+ rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
ring = kmap_atomic(ctx->ring_pages[0]);
new_nr = (table ? table->nr : 1) * 4;
+ rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
table->nr = new_nr;
spin_lock(&mm->ioctx_lock);
+ rcu_read_lock();
old = rcu_dereference(mm->ioctx_table);
if (!old) {
struct kioctx_table *table;
spin_lock(&mm->ioctx_lock);
+ rcu_read_lock();
table = rcu_dereference(mm->ioctx_table);
WARN_ON(ctx != table->table[ctx->id]);
table->table[ctx->id] = NULL;
+ rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
/* percpu_ref_kill() will do the necessary call_rcu() */