2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
38 #define MAX_PREFETCH_LEN (4*1024*1024U)
40 /* Timeout in ms to wait for an active mmu notifier to complete when handling
42 #define MMU_NOTIFIER_TIMEOUT 1000
44 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
47 struct mlx5_ib_mr *mr;
48 const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
49 sizeof(struct mlx5_mtt)) - 1;
50 u64 idx = 0, blk_start_idx = 0;
54 if (!umem || !umem->odp_data) {
55 pr_err("invalidation called on NULL umem or non-ODP umem\n");
59 mr = umem->odp_data->private;
61 if (!mr || !mr->ibmr.pd)
64 start = max_t(u64, ib_umem_start(umem), start);
65 end = min_t(u64, ib_umem_end(umem), end);
68 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
69 * while we are doing the invalidation, no page fault will attempt to
70 * overwrite the same MTTs. Concurent invalidations might race us,
71 * but they will write 0s as well, so no difference in the end result.
74 for (addr = start; addr < end; addr += (u64)umem->page_size) {
75 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
77 * Strive to write the MTTs in chunks, but avoid overwriting
78 * non-existing MTTs. The huristic here can be improved to
79 * estimate the cost of another UMR vs. the cost of bigger
82 if (umem->odp_data->dma_list[idx] &
83 (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
89 u64 umr_offset = idx & umr_block_mask;
91 if (in_block && umr_offset == 0) {
92 mlx5_ib_update_xlt(mr, blk_start_idx,
96 MLX5_IB_UPD_XLT_ATOMIC);
102 mlx5_ib_update_xlt(mr, blk_start_idx,
103 idx - blk_start_idx + 1,
105 MLX5_IB_UPD_XLT_ZAP |
106 MLX5_IB_UPD_XLT_ATOMIC);
108 * We are now sure that the device will not access the
109 * memory. We can safely unmap it, and mark it as dirty if
113 ib_umem_odp_unmap_dma_pages(umem, start, end);
116 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
118 struct ib_odp_caps *caps = &dev->odp_caps;
120 memset(caps, 0, sizeof(*caps));
122 if (!MLX5_CAP_GEN(dev->mdev, pg))
125 caps->general_caps = IB_ODP_SUPPORT;
127 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
128 dev->odp_max_size = U64_MAX;
130 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
132 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
133 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
135 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
136 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
138 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
139 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
141 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
142 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
144 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
145 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
150 static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
153 u32 base_key = mlx5_base_mkey(key);
154 struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key);
155 struct mlx5_ib_mr *mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
157 if (!mmkey || mmkey->key != key || !mr->live)
160 return container_of(mmkey, struct mlx5_ib_mr, mmkey);
163 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
164 struct mlx5_pagefault *pfault,
167 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
168 pfault->wqe.wq_num : pfault->token;
169 int ret = mlx5_core_page_fault_resume(dev->mdev,
175 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n",
180 * Handle a single data segment in a page-fault WQE or RDMA region.
182 * Returns number of pages retrieved on success. The caller may continue to
183 * the next data segment.
184 * Can return the following error codes:
185 * -EAGAIN to designate a temporary error. The caller will abort handling the
186 * page fault and resolve it.
187 * -EFAULT when there's an error mapping the requested pages. The caller will
188 * abort the page fault handling.
190 static int pagefault_single_data_segment(struct mlx5_ib_dev *mib_dev,
191 u32 key, u64 io_virt, size_t bcnt,
192 u32 *bytes_committed,
196 unsigned int current_seq;
198 int npages = 0, ret = 0;
199 struct mlx5_ib_mr *mr;
200 u64 access_mask = ODP_READ_ALLOWED_BIT;
202 srcu_key = srcu_read_lock(&mib_dev->mr_srcu);
203 mr = mlx5_ib_odp_find_mr_lkey(mib_dev, key);
205 * If we didn't find the MR, it means the MR was closed while we were
206 * handling the ODP event. In this case we return -EFAULT so that the
209 if (!mr || !mr->ibmr.pd) {
210 pr_err("Failed to find relevant mr for lkey=0x%06x, probably the MR was destroyed\n",
215 if (!mr->umem->odp_data) {
216 pr_debug("skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
220 (bcnt - *bytes_committed);
224 current_seq = ACCESS_ONCE(mr->umem->odp_data->notifiers_seq);
226 * Ensure the sequence number is valid for some time before we call
232 * Avoid branches - this code will perform correctly
233 * in all iterations (in iteration 2 and above,
234 * bytes_committed == 0).
236 io_virt += *bytes_committed;
237 bcnt -= *bytes_committed;
239 start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT;
241 if (mr->umem->writable)
242 access_mask |= ODP_WRITE_ALLOWED_BIT;
243 npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt,
244 access_mask, current_seq);
251 mutex_lock(&mr->umem->odp_data->umem_mutex);
252 if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) {
254 * No need to check whether the MTTs really belong to
255 * this MR, since ib_umem_odp_map_dma_pages already
258 ret = mlx5_ib_update_xlt(mr, start_idx, npages,
260 MLX5_IB_UPD_XLT_ATOMIC);
264 mutex_unlock(&mr->umem->odp_data->umem_mutex);
267 pr_err("Failed to update mkey page tables\n");
272 u32 new_mappings = npages * PAGE_SIZE -
273 (io_virt - round_down(io_virt, PAGE_SIZE));
274 *bytes_mapped += min_t(u32, new_mappings, bcnt);
279 if (ret == -EAGAIN) {
280 if (!mr->umem->odp_data->dying) {
281 struct ib_umem_odp *odp_data = mr->umem->odp_data;
282 unsigned long timeout =
283 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
285 if (!wait_for_completion_timeout(
286 &odp_data->notifier_completion,
288 pr_warn("timeout waiting for mmu notifier completion\n");
291 /* The MR is being killed, kill the QP as well. */
295 srcu_read_unlock(&mib_dev->mr_srcu, srcu_key);
296 *bytes_committed = 0;
297 return ret ? ret : npages;
301 * Parse a series of data segments for page fault handling.
303 * @qp the QP on which the fault occurred.
304 * @pfault contains page fault information.
305 * @wqe points at the first data segment in the WQE.
306 * @wqe_end points after the end of the WQE.
307 * @bytes_mapped receives the number of bytes that the function was able to
308 * map. This allows the caller to decide intelligently whether
309 * enough memory was mapped to resolve the page fault
310 * successfully (e.g. enough for the next MTU, or the entire
312 * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
313 * the committed bytes).
315 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
316 * negative error code.
318 static int pagefault_data_segments(struct mlx5_ib_dev *dev,
319 struct mlx5_pagefault *pfault,
320 struct mlx5_ib_qp *qp, void *wqe,
321 void *wqe_end, u32 *bytes_mapped,
322 u32 *total_wqe_bytes, int receive_queue)
324 int ret = 0, npages = 0;
331 /* Skip SRQ next-WQE segment. */
332 if (receive_queue && qp->ibqp.srq)
333 wqe += sizeof(struct mlx5_wqe_srq_next_seg);
338 *total_wqe_bytes = 0;
340 while (wqe < wqe_end) {
341 struct mlx5_wqe_data_seg *dseg = wqe;
343 io_virt = be64_to_cpu(dseg->addr);
344 key = be32_to_cpu(dseg->lkey);
345 byte_count = be32_to_cpu(dseg->byte_count);
346 inline_segment = !!(byte_count & MLX5_INLINE_SEG);
347 bcnt = byte_count & ~MLX5_INLINE_SEG;
349 if (inline_segment) {
350 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
351 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
354 wqe += sizeof(*dseg);
357 /* receive WQE end of sg list. */
358 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
362 if (!inline_segment && total_wqe_bytes) {
363 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
364 pfault->bytes_committed);
367 /* A zero length data segment designates a length of 2GB. */
371 if (inline_segment || bcnt <= pfault->bytes_committed) {
372 pfault->bytes_committed -=
374 pfault->bytes_committed);
378 ret = pagefault_single_data_segment(dev, key, io_virt, bcnt,
379 &pfault->bytes_committed,
386 return ret < 0 ? ret : npages;
390 * Parse initiator WQE. Advances the wqe pointer to point at the
391 * scatter-gather list, and set wqe_end to the end of the WQE.
393 static int mlx5_ib_mr_initiator_pfault_handler(
394 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
395 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
397 struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
398 u16 wqe_index = pfault->wqe.wqe_index;
401 u32 ctrl_wqe_index, ctrl_qpn;
403 u32 qpn = qp->trans_qp.base.mqp.qpn;
405 ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
406 if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
407 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
413 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
419 ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
420 MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
421 MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
422 if (wqe_index != ctrl_wqe_index) {
423 mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
429 ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
430 MLX5_WQE_CTRL_QPN_SHIFT;
431 if (qpn != ctrl_qpn) {
432 mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
439 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
440 *wqe += sizeof(*ctrl);
442 opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
443 MLX5_WQE_CTRL_OPCODE_MASK;
444 switch (qp->ibqp.qp_type) {
447 case MLX5_OPCODE_SEND:
448 case MLX5_OPCODE_SEND_IMM:
449 case MLX5_OPCODE_SEND_INVAL:
450 if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
451 IB_ODP_SUPPORT_SEND))
452 goto invalid_transport_or_opcode;
454 case MLX5_OPCODE_RDMA_WRITE:
455 case MLX5_OPCODE_RDMA_WRITE_IMM:
456 if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
457 IB_ODP_SUPPORT_WRITE))
458 goto invalid_transport_or_opcode;
459 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
461 case MLX5_OPCODE_RDMA_READ:
462 if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
463 IB_ODP_SUPPORT_READ))
464 goto invalid_transport_or_opcode;
465 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
468 goto invalid_transport_or_opcode;
473 case MLX5_OPCODE_SEND:
474 case MLX5_OPCODE_SEND_IMM:
475 if (!(dev->odp_caps.per_transport_caps.ud_odp_caps &
476 IB_ODP_SUPPORT_SEND))
477 goto invalid_transport_or_opcode;
478 *wqe += sizeof(struct mlx5_wqe_datagram_seg);
481 goto invalid_transport_or_opcode;
485 invalid_transport_or_opcode:
486 mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n",
487 qp->ibqp.qp_type, opcode);
495 * Parse responder WQE. Advances the wqe pointer to point at the
496 * scatter-gather list, and set wqe_end to the end of the WQE.
498 static int mlx5_ib_mr_responder_pfault_handler(
499 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
500 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
502 struct mlx5_ib_wq *wq = &qp->rq;
503 int wqe_size = 1 << wq->wqe_shift;
506 mlx5_ib_err(dev, "ODP fault on SRQ is not supported\n");
511 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
515 if (wqe_size > wqe_length) {
516 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
520 switch (qp->ibqp.qp_type) {
522 if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
523 IB_ODP_SUPPORT_RECV))
524 goto invalid_transport_or_opcode;
527 invalid_transport_or_opcode:
528 mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
533 *wqe_end = *wqe + wqe_size;
538 static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev,
541 struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num);
544 mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num);
548 return to_mibqp(mqp);
551 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
552 struct mlx5_pagefault *pfault)
556 u32 bytes_mapped, total_wqe_bytes;
558 int resume_with_error = 1;
559 u16 wqe_index = pfault->wqe.wqe_index;
560 int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
561 struct mlx5_ib_qp *qp;
563 buffer = (char *)__get_free_page(GFP_KERNEL);
565 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
566 goto resolve_page_fault;
569 qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num);
571 goto resolve_page_fault;
573 ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
574 PAGE_SIZE, &qp->trans_qp.base);
576 mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
577 ret, wqe_index, pfault->token);
578 goto resolve_page_fault;
583 ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe,
586 ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe,
589 goto resolve_page_fault;
591 if (wqe >= wqe_end) {
592 mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
593 goto resolve_page_fault;
596 ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end,
597 &bytes_mapped, &total_wqe_bytes,
599 if (ret == -EAGAIN) {
600 resume_with_error = 0;
601 goto resolve_page_fault;
602 } else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
604 mlx5_ib_err(dev, "Error getting user pages for page fault. Error: %d\n",
606 goto resolve_page_fault;
609 resume_with_error = 0;
611 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
612 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
613 pfault->token, resume_with_error,
615 free_page((unsigned long)buffer);
618 static int pages_in_range(u64 address, u32 length)
620 return (ALIGN(address + length, PAGE_SIZE) -
621 (address & PAGE_MASK)) >> PAGE_SHIFT;
624 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
625 struct mlx5_pagefault *pfault)
629 u32 prefetch_len = pfault->bytes_committed;
630 int prefetch_activated = 0;
631 u32 rkey = pfault->rdma.r_key;
634 /* The RDMA responder handler handles the page fault in two parts.
635 * First it brings the necessary pages for the current packet
636 * (and uses the pfault context), and then (after resuming the QP)
637 * prefetches more pages. The second operation cannot use the pfault
638 * context and therefore uses the dummy_pfault context allocated on
640 pfault->rdma.rdma_va += pfault->bytes_committed;
641 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
642 pfault->rdma.rdma_op_len);
643 pfault->bytes_committed = 0;
645 address = pfault->rdma.rdma_va;
646 length = pfault->rdma.rdma_op_len;
648 /* For some operations, the hardware cannot tell the exact message
649 * length, and in those cases it reports zero. Use prefetch
652 prefetch_activated = 1;
653 length = pfault->rdma.packet_size;
654 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
657 ret = pagefault_single_data_segment(dev, rkey, address, length,
658 &pfault->bytes_committed, NULL);
659 if (ret == -EAGAIN) {
660 /* We're racing with an invalidation, don't prefetch */
661 prefetch_activated = 0;
662 } else if (ret < 0 || pages_in_range(address, length) > ret) {
663 mlx5_ib_page_fault_resume(dev, pfault, 1);
665 mlx5_ib_warn(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
666 ret, pfault->token, pfault->type);
670 mlx5_ib_page_fault_resume(dev, pfault, 0);
671 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
672 pfault->token, pfault->type,
675 /* At this point, there might be a new pagefault already arriving in
676 * the eq, switch to the dummy pagefault for the rest of the
677 * processing. We're still OK with the objects being alive as the
678 * work-queue is being fenced. */
680 if (prefetch_activated) {
681 u32 bytes_committed = 0;
683 ret = pagefault_single_data_segment(dev, rkey, address,
685 &bytes_committed, NULL);
687 mlx5_ib_warn(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
688 ret, pfault->token, address,
694 void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
695 struct mlx5_pagefault *pfault)
697 struct mlx5_ib_dev *dev = context;
698 u8 event_subtype = pfault->event_subtype;
700 switch (event_subtype) {
701 case MLX5_PFAULT_SUBTYPE_WQE:
702 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
704 case MLX5_PFAULT_SUBTYPE_RDMA:
705 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
708 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
710 mlx5_ib_page_fault_resume(dev, pfault, 1);
714 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev)
718 ret = init_srcu_struct(&ibdev->mr_srcu);
725 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
727 cleanup_srcu_struct(&ibdev->mr_srcu);