2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_umem.h>
44 #include <rdma/ib_umem_odp.h>
46 static void ib_umem_notifier_start_account(struct ib_umem *item)
48 mutex_lock(&item->odp_data->umem_mutex);
50 /* Only update private counters for this umem if it has them.
51 * Otherwise skip it. All page faults will be delayed for this umem. */
52 if (item->odp_data->mn_counters_active) {
53 int notifiers_count = item->odp_data->notifiers_count++;
55 if (notifiers_count == 0)
56 /* Initialize the completion object for waiting on
57 * notifiers. Since notifier_count is zero, no one
58 * should be waiting right now. */
59 reinit_completion(&item->odp_data->notifier_completion);
61 mutex_unlock(&item->odp_data->umem_mutex);
64 static void ib_umem_notifier_end_account(struct ib_umem *item)
66 mutex_lock(&item->odp_data->umem_mutex);
68 /* Only update private counters for this umem if it has them.
69 * Otherwise skip it. All page faults will be delayed for this umem. */
70 if (item->odp_data->mn_counters_active) {
72 * This sequence increase will notify the QP page fault that
73 * the page that is going to be mapped in the spte could have
76 ++item->odp_data->notifiers_seq;
77 if (--item->odp_data->notifiers_count == 0)
78 complete_all(&item->odp_data->notifier_completion);
80 mutex_unlock(&item->odp_data->umem_mutex);
83 /* Account for a new mmu notifier in an ib_ucontext. */
84 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
86 atomic_inc(&context->notifier_count);
89 /* Account for a terminating mmu notifier in an ib_ucontext.
91 * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
92 * the function takes the semaphore itself. */
93 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
95 int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
98 !list_empty(&context->no_private_counters)) {
99 /* No currently running mmu notifiers. Now is the chance to
100 * add private accounting to all previously added umems. */
101 struct ib_umem_odp *odp_data, *next;
103 /* Prevent concurrent mmu notifiers from working on the
104 * no_private_counters list. */
105 down_write(&context->umem_rwsem);
107 /* Read the notifier_count again, with the umem_rwsem
108 * semaphore taken for write. */
109 if (!atomic_read(&context->notifier_count)) {
110 list_for_each_entry_safe(odp_data, next,
111 &context->no_private_counters,
112 no_private_counters) {
113 mutex_lock(&odp_data->umem_mutex);
114 odp_data->mn_counters_active = true;
115 list_del(&odp_data->no_private_counters);
116 complete_all(&odp_data->notifier_completion);
117 mutex_unlock(&odp_data->umem_mutex);
121 up_write(&context->umem_rwsem);
125 static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
126 u64 end, void *cookie) {
128 * Increase the number of notifiers running, to
129 * prevent any further fault handling on this MR.
131 ib_umem_notifier_start_account(item);
132 item->odp_data->dying = 1;
133 /* Make sure that the fact the umem is dying is out before we release
134 * all pending page faults. */
136 complete_all(&item->odp_data->notifier_completion);
137 item->context->invalidate_range(item, ib_umem_start(item),
142 static void ib_umem_notifier_release(struct mmu_notifier *mn,
143 struct mm_struct *mm)
145 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
147 if (!context->invalidate_range)
150 ib_ucontext_notifier_start_account(context);
151 down_read(&context->umem_rwsem);
152 rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
154 ib_umem_notifier_release_trampoline,
156 up_read(&context->umem_rwsem);
159 static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
160 u64 end, void *cookie)
162 ib_umem_notifier_start_account(item);
163 item->context->invalidate_range(item, start, start + PAGE_SIZE);
164 ib_umem_notifier_end_account(item);
168 static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
169 struct mm_struct *mm,
170 unsigned long address)
172 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
174 if (!context->invalidate_range)
177 ib_ucontext_notifier_start_account(context);
178 down_read(&context->umem_rwsem);
179 rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
181 invalidate_page_trampoline, NULL);
182 up_read(&context->umem_rwsem);
183 ib_ucontext_notifier_end_account(context);
186 static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
187 u64 end, void *cookie)
189 ib_umem_notifier_start_account(item);
190 item->context->invalidate_range(item, start, end);
194 static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
195 struct mm_struct *mm,
199 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
201 if (!context->invalidate_range)
204 ib_ucontext_notifier_start_account(context);
205 down_read(&context->umem_rwsem);
206 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
208 invalidate_range_start_trampoline, NULL);
209 up_read(&context->umem_rwsem);
212 static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
213 u64 end, void *cookie)
215 ib_umem_notifier_end_account(item);
219 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
220 struct mm_struct *mm,
224 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
226 if (!context->invalidate_range)
229 down_read(&context->umem_rwsem);
230 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
232 invalidate_range_end_trampoline, NULL);
233 up_read(&context->umem_rwsem);
234 ib_ucontext_notifier_end_account(context);
237 static const struct mmu_notifier_ops ib_umem_notifiers = {
238 .release = ib_umem_notifier_release,
239 .invalidate_page = ib_umem_notifier_invalidate_page,
240 .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
241 .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
244 struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
248 struct ib_umem *umem;
249 struct ib_umem_odp *odp_data;
250 int pages = size >> PAGE_SHIFT;
253 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
255 return ERR_PTR(-ENOMEM);
257 umem->context = context;
259 umem->address = addr;
260 umem->page_size = PAGE_SIZE;
263 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
268 odp_data->umem = umem;
270 mutex_init(&odp_data->umem_mutex);
271 init_completion(&odp_data->notifier_completion);
273 odp_data->page_list = vzalloc(pages * sizeof(*odp_data->page_list));
274 if (!odp_data->page_list) {
279 odp_data->dma_list = vzalloc(pages * sizeof(*odp_data->dma_list));
280 if (!odp_data->dma_list) {
285 down_write(&context->umem_rwsem);
286 context->odp_mrs_count++;
287 rbt_ib_umem_insert(&odp_data->interval_tree, &context->umem_tree);
288 if (likely(!atomic_read(&context->notifier_count)))
289 odp_data->mn_counters_active = true;
291 list_add(&odp_data->no_private_counters,
292 &context->no_private_counters);
293 up_write(&context->umem_rwsem);
295 umem->odp_data = odp_data;
300 vfree(odp_data->page_list);
307 EXPORT_SYMBOL(ib_alloc_odp_umem);
309 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
313 struct mm_struct *mm = get_task_mm(current);
318 /* Prevent creating ODP MRs in child processes */
320 our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
323 if (context->tgid != our_pid) {
329 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
330 if (!umem->odp_data) {
334 umem->odp_data->umem = umem;
336 mutex_init(&umem->odp_data->umem_mutex);
338 init_completion(&umem->odp_data->notifier_completion);
340 if (ib_umem_num_pages(umem)) {
341 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
342 sizeof(*umem->odp_data->page_list));
343 if (!umem->odp_data->page_list) {
348 umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
349 sizeof(*umem->odp_data->dma_list));
350 if (!umem->odp_data->dma_list) {
357 * When using MMU notifiers, we will get a
358 * notification before the "current" task (and MM) is
359 * destroyed. We use the umem_rwsem semaphore to synchronize.
361 down_write(&context->umem_rwsem);
362 context->odp_mrs_count++;
363 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
364 rbt_ib_umem_insert(&umem->odp_data->interval_tree,
365 &context->umem_tree);
366 if (likely(!atomic_read(&context->notifier_count)) ||
367 context->odp_mrs_count == 1)
368 umem->odp_data->mn_counters_active = true;
370 list_add(&umem->odp_data->no_private_counters,
371 &context->no_private_counters);
372 downgrade_write(&context->umem_rwsem);
374 if (context->odp_mrs_count == 1) {
376 * Note that at this point, no MMU notifier is running
379 atomic_set(&context->notifier_count, 0);
380 INIT_HLIST_NODE(&context->mn.hlist);
381 context->mn.ops = &ib_umem_notifiers;
383 * Lock-dep detects a false positive for mmap_sem vs.
384 * umem_rwsem, due to not grasping downgrade_write correctly.
387 ret_val = mmu_notifier_register(&context->mn, mm);
390 pr_err("Failed to register mmu_notifier %d\n", ret_val);
396 up_read(&context->umem_rwsem);
399 * Note that doing an mmput can cause a notifier for the relevant mm.
400 * If the notifier is called while we hold the umem_rwsem, this will
401 * cause a deadlock. Therefore, we release the reference only after we
402 * released the semaphore.
408 up_read(&context->umem_rwsem);
409 vfree(umem->odp_data->dma_list);
411 vfree(umem->odp_data->page_list);
413 kfree(umem->odp_data);
419 void ib_umem_odp_release(struct ib_umem *umem)
421 struct ib_ucontext *context = umem->context;
424 * Ensure that no more pages are mapped in the umem.
426 * It is the driver's responsibility to ensure, before calling us,
427 * that the hardware will not attempt to access the MR any more.
429 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
432 down_write(&context->umem_rwsem);
433 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
434 rbt_ib_umem_remove(&umem->odp_data->interval_tree,
435 &context->umem_tree);
436 context->odp_mrs_count--;
437 if (!umem->odp_data->mn_counters_active) {
438 list_del(&umem->odp_data->no_private_counters);
439 complete_all(&umem->odp_data->notifier_completion);
443 * Downgrade the lock to a read lock. This ensures that the notifiers
444 * (who lock the mutex for reading) will be able to finish, and we
445 * will be able to enventually obtain the mmu notifiers SRCU. Note
446 * that since we are doing it atomically, no other user could register
447 * and unregister while we do the check.
449 downgrade_write(&context->umem_rwsem);
450 if (!context->odp_mrs_count) {
451 struct task_struct *owning_process = NULL;
452 struct mm_struct *owning_mm = NULL;
454 owning_process = get_pid_task(context->tgid,
456 if (owning_process == NULL)
458 * The process is already dead, notifier were removed
463 owning_mm = get_task_mm(owning_process);
464 if (owning_mm == NULL)
466 * The process' mm is already dead, notifier were
470 mmu_notifier_unregister(&context->mn, owning_mm);
475 put_task_struct(owning_process);
478 up_read(&context->umem_rwsem);
480 vfree(umem->odp_data->dma_list);
481 vfree(umem->odp_data->page_list);
482 kfree(umem->odp_data);
487 * Map for DMA and insert a single page into the on-demand paging page tables.
489 * @umem: the umem to insert the page to.
490 * @page_index: index in the umem to add the page to.
491 * @page: the page struct to map and add.
492 * @access_mask: access permissions needed for this page.
493 * @current_seq: sequence number for synchronization with invalidations.
494 * the sequence number is taken from
495 * umem->odp_data->notifiers_seq.
497 * The function returns -EFAULT if the DMA mapping operation fails. It returns
498 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
500 * The page is released via put_page even if the operation failed. For
501 * on-demand pinning, the page is released whenever it isn't stored in the
504 static int ib_umem_odp_map_dma_single_page(
505 struct ib_umem *umem,
510 unsigned long current_seq)
512 struct ib_device *dev = umem->context->device;
515 int remove_existing_mapping = 0;
519 * Note: we avoid writing if seq is different from the initial seq, to
520 * handle case of a racing notifier. This check also allows us to bail
521 * early if we have a notifier running in parallel with us.
523 if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
527 if (!(umem->odp_data->dma_list[page_index])) {
528 dma_addr = ib_dma_map_page(dev,
532 if (ib_dma_mapping_error(dev, dma_addr)) {
536 umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
537 umem->odp_data->page_list[page_index] = page;
540 } else if (umem->odp_data->page_list[page_index] == page) {
541 umem->odp_data->dma_list[page_index] |= access_mask;
543 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
544 umem->odp_data->page_list[page_index], page);
545 /* Better remove the mapping now, to prevent any further
547 remove_existing_mapping = 1;
551 /* On Demand Paging - avoid pinning the page */
552 if (umem->context->invalidate_range || !stored_page)
555 if (remove_existing_mapping && umem->context->invalidate_range) {
556 invalidate_page_trampoline(
558 base_virt_addr + (page_index * PAGE_SIZE),
559 base_virt_addr + ((page_index+1)*PAGE_SIZE),
568 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
570 * Pins the range of pages passed in the argument, and maps them to
571 * DMA addresses. The DMA addresses of the mapped pages is updated in
572 * umem->odp_data->dma_list.
574 * Returns the number of pages mapped in success, negative error code
576 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
577 * the function from completing its task.
578 * An -ENOENT error code indicates that userspace process is being terminated
579 * and mm was already destroyed.
580 * @umem: the umem to map and pin
581 * @user_virt: the address from which we need to map.
582 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
583 * bigger due to alignment, and may also be smaller in case of an error
584 * pinning or mapping a page. The actual pages mapped is returned in
586 * @access_mask: bit mask of the requested access permissions for the given
588 * @current_seq: the MMU notifiers sequance value for synchronization with
589 * invalidations. the sequance number is read from
590 * umem->odp_data->notifiers_seq before calling this function
592 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
593 u64 access_mask, unsigned long current_seq)
595 struct task_struct *owning_process = NULL;
596 struct mm_struct *owning_mm = NULL;
597 struct page **local_page_list = NULL;
599 int j, k, ret = 0, start_idx, npages = 0;
601 unsigned int flags = 0;
603 if (access_mask == 0)
606 if (user_virt < ib_umem_start(umem) ||
607 user_virt + bcnt > ib_umem_end(umem))
610 local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
611 if (!local_page_list)
614 off = user_virt & (~PAGE_MASK);
615 user_virt = user_virt & PAGE_MASK;
616 base_virt_addr = user_virt;
617 bcnt += off; /* Charge for the first page offset as well. */
619 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
620 if (owning_process == NULL) {
625 owning_mm = get_task_mm(owning_process);
626 if (owning_mm == NULL) {
631 if (access_mask & ODP_WRITE_ALLOWED_BIT)
634 start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
638 const size_t gup_num_pages =
639 min_t(size_t, ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
640 PAGE_SIZE / sizeof(struct page *));
642 down_read(&owning_mm->mmap_sem);
644 * Note: this might result in redundent page getting. We can
645 * avoid this by checking dma_list to be 0 before calling
646 * get_user_pages. However, this make the code much more
647 * complex (and doesn't gain us much performance in most use
650 npages = get_user_pages_remote(owning_process, owning_mm,
651 user_virt, gup_num_pages,
652 flags, local_page_list, NULL, NULL);
653 up_read(&owning_mm->mmap_sem);
658 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
659 user_virt += npages << PAGE_SHIFT;
660 mutex_lock(&umem->odp_data->umem_mutex);
661 for (j = 0; j < npages; ++j) {
662 ret = ib_umem_odp_map_dma_single_page(
663 umem, k, base_virt_addr, local_page_list[j],
664 access_mask, current_seq);
669 mutex_unlock(&umem->odp_data->umem_mutex);
672 /* Release left over pages when handling errors. */
673 for (++j; j < npages; ++j)
674 put_page(local_page_list[j]);
680 if (npages < 0 && k == start_idx)
688 put_task_struct(owning_process);
690 free_page((unsigned long)local_page_list);
693 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
695 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
700 struct ib_device *dev = umem->context->device;
702 virt = max_t(u64, virt, ib_umem_start(umem));
703 bound = min_t(u64, bound, ib_umem_end(umem));
704 /* Note that during the run of this function, the
705 * notifiers_count of the MR is > 0, preventing any racing
706 * faults from completion. We might be racing with other
707 * invalidations, so we must make sure we free each page only
709 mutex_lock(&umem->odp_data->umem_mutex);
710 for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
711 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
712 if (umem->odp_data->page_list[idx]) {
713 struct page *page = umem->odp_data->page_list[idx];
714 dma_addr_t dma = umem->odp_data->dma_list[idx];
715 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
719 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
721 if (dma & ODP_WRITE_ALLOWED_BIT) {
722 struct page *head_page = compound_head(page);
724 * set_page_dirty prefers being called with
725 * the page lock. However, MMU notifiers are
726 * called sometimes with and sometimes without
727 * the lock. We rely on the umem_mutex instead
728 * to prevent other mmu notifiers from
729 * continuing and allowing the page mapping to
732 set_page_dirty(head_page);
734 /* on demand pinning support */
735 if (!umem->context->invalidate_range)
737 umem->odp_data->page_list[idx] = NULL;
738 umem->odp_data->dma_list[idx] = 0;
742 mutex_unlock(&umem->odp_data->umem_mutex);
744 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);