2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 #include <linux/security.h>
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_addr.h>
54 #include "core_priv.h"
56 static const char * const ib_events[] = {
57 [IB_EVENT_CQ_ERR] = "CQ error",
58 [IB_EVENT_QP_FATAL] = "QP fatal error",
59 [IB_EVENT_QP_REQ_ERR] = "QP request error",
60 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
61 [IB_EVENT_COMM_EST] = "communication established",
62 [IB_EVENT_SQ_DRAINED] = "send queue drained",
63 [IB_EVENT_PATH_MIG] = "path migration successful",
64 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
65 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
66 [IB_EVENT_PORT_ACTIVE] = "port active",
67 [IB_EVENT_PORT_ERR] = "port error",
68 [IB_EVENT_LID_CHANGE] = "LID change",
69 [IB_EVENT_PKEY_CHANGE] = "P_key change",
70 [IB_EVENT_SM_CHANGE] = "SM change",
71 [IB_EVENT_SRQ_ERR] = "SRQ error",
72 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
73 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
74 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
75 [IB_EVENT_GID_CHANGE] = "GID changed",
78 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
82 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
83 ib_events[index] : "unrecognized event";
85 EXPORT_SYMBOL(ib_event_msg);
87 static const char * const wc_statuses[] = {
88 [IB_WC_SUCCESS] = "success",
89 [IB_WC_LOC_LEN_ERR] = "local length error",
90 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
91 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
92 [IB_WC_LOC_PROT_ERR] = "local protection error",
93 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
94 [IB_WC_MW_BIND_ERR] = "memory management operation error",
95 [IB_WC_BAD_RESP_ERR] = "bad response error",
96 [IB_WC_LOC_ACCESS_ERR] = "local access error",
97 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
98 [IB_WC_REM_ACCESS_ERR] = "remote access error",
99 [IB_WC_REM_OP_ERR] = "remote operation error",
100 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
101 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
102 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
103 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
104 [IB_WC_REM_ABORT_ERR] = "operation aborted",
105 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
106 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
107 [IB_WC_FATAL_ERR] = "fatal error",
108 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
109 [IB_WC_GENERAL_ERR] = "general error",
112 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
114 size_t index = status;
116 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
117 wc_statuses[index] : "unrecognized status";
119 EXPORT_SYMBOL(ib_wc_status_msg);
121 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
124 case IB_RATE_2_5_GBPS: return 1;
125 case IB_RATE_5_GBPS: return 2;
126 case IB_RATE_10_GBPS: return 4;
127 case IB_RATE_20_GBPS: return 8;
128 case IB_RATE_30_GBPS: return 12;
129 case IB_RATE_40_GBPS: return 16;
130 case IB_RATE_60_GBPS: return 24;
131 case IB_RATE_80_GBPS: return 32;
132 case IB_RATE_120_GBPS: return 48;
136 EXPORT_SYMBOL(ib_rate_to_mult);
138 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
141 case 1: return IB_RATE_2_5_GBPS;
142 case 2: return IB_RATE_5_GBPS;
143 case 4: return IB_RATE_10_GBPS;
144 case 8: return IB_RATE_20_GBPS;
145 case 12: return IB_RATE_30_GBPS;
146 case 16: return IB_RATE_40_GBPS;
147 case 24: return IB_RATE_60_GBPS;
148 case 32: return IB_RATE_80_GBPS;
149 case 48: return IB_RATE_120_GBPS;
150 default: return IB_RATE_PORT_CURRENT;
153 EXPORT_SYMBOL(mult_to_ib_rate);
155 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
158 case IB_RATE_2_5_GBPS: return 2500;
159 case IB_RATE_5_GBPS: return 5000;
160 case IB_RATE_10_GBPS: return 10000;
161 case IB_RATE_20_GBPS: return 20000;
162 case IB_RATE_30_GBPS: return 30000;
163 case IB_RATE_40_GBPS: return 40000;
164 case IB_RATE_60_GBPS: return 60000;
165 case IB_RATE_80_GBPS: return 80000;
166 case IB_RATE_120_GBPS: return 120000;
167 case IB_RATE_14_GBPS: return 14062;
168 case IB_RATE_56_GBPS: return 56250;
169 case IB_RATE_112_GBPS: return 112500;
170 case IB_RATE_168_GBPS: return 168750;
171 case IB_RATE_25_GBPS: return 25781;
172 case IB_RATE_100_GBPS: return 103125;
173 case IB_RATE_200_GBPS: return 206250;
174 case IB_RATE_300_GBPS: return 309375;
178 EXPORT_SYMBOL(ib_rate_to_mbps);
180 __attribute_const__ enum rdma_transport_type
181 rdma_node_get_transport(enum rdma_node_type node_type)
184 case RDMA_NODE_IB_CA:
185 case RDMA_NODE_IB_SWITCH:
186 case RDMA_NODE_IB_ROUTER:
187 return RDMA_TRANSPORT_IB;
189 return RDMA_TRANSPORT_IWARP;
190 case RDMA_NODE_USNIC:
191 return RDMA_TRANSPORT_USNIC;
192 case RDMA_NODE_USNIC_UDP:
193 return RDMA_TRANSPORT_USNIC_UDP;
199 EXPORT_SYMBOL(rdma_node_get_transport);
201 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
203 if (device->get_link_layer)
204 return device->get_link_layer(device, port_num);
206 switch (rdma_node_get_transport(device->node_type)) {
207 case RDMA_TRANSPORT_IB:
208 return IB_LINK_LAYER_INFINIBAND;
209 case RDMA_TRANSPORT_IWARP:
210 case RDMA_TRANSPORT_USNIC:
211 case RDMA_TRANSPORT_USNIC_UDP:
212 return IB_LINK_LAYER_ETHERNET;
214 return IB_LINK_LAYER_UNSPECIFIED;
217 EXPORT_SYMBOL(rdma_port_get_link_layer);
219 /* Protection domains */
222 * ib_alloc_pd - Allocates an unused protection domain.
223 * @device: The device on which to allocate the protection domain.
225 * A protection domain object provides an association between QPs, shared
226 * receive queues, address handles, memory regions, and memory windows.
228 * Every PD has a local_dma_lkey which can be used as the lkey value for local
231 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
235 int mr_access_flags = 0;
237 pd = device->alloc_pd(device, NULL, NULL);
243 pd->__internal_mr = NULL;
244 atomic_set(&pd->usecnt, 0);
247 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
248 pd->local_dma_lkey = device->local_dma_lkey;
250 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
252 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
253 pr_warn("%s: enabling unsafe global rkey\n", caller);
254 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
257 if (mr_access_flags) {
260 mr = pd->device->get_dma_mr(pd, mr_access_flags);
266 mr->device = pd->device;
269 mr->need_inval = false;
271 pd->__internal_mr = mr;
273 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
274 pd->local_dma_lkey = pd->__internal_mr->lkey;
276 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
277 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
282 EXPORT_SYMBOL(__ib_alloc_pd);
285 * ib_dealloc_pd - Deallocates a protection domain.
286 * @pd: The protection domain to deallocate.
288 * It is an error to call this function while any resources in the pd still
289 * exist. The caller is responsible to synchronously destroy them and
290 * guarantee no new allocations will happen.
292 void ib_dealloc_pd(struct ib_pd *pd)
296 if (pd->__internal_mr) {
297 ret = pd->device->dereg_mr(pd->__internal_mr);
299 pd->__internal_mr = NULL;
302 /* uverbs manipulates usecnt with proper locking, while the kabi
303 requires the caller to guarantee we can't race here. */
304 WARN_ON(atomic_read(&pd->usecnt));
306 /* Making delalloc_pd a void return is a WIP, no driver should return
308 ret = pd->device->dealloc_pd(pd);
309 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
311 EXPORT_SYMBOL(ib_dealloc_pd);
313 /* Address handles */
315 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
319 ah = pd->device->create_ah(pd, ah_attr, NULL);
322 ah->device = pd->device;
325 ah->type = ah_attr->type;
326 atomic_inc(&pd->usecnt);
331 EXPORT_SYMBOL(rdma_create_ah);
333 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
335 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
336 struct iphdr ip4h_checked;
337 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
339 /* If it's IPv6, the version must be 6, otherwise, the first
340 * 20 bytes (before the IPv4 header) are garbled.
342 if (ip6h->version != 6)
343 return (ip4h->version == 4) ? 4 : 0;
344 /* version may be 6 or 4 because the first 20 bytes could be garbled */
346 /* RoCE v2 requires no options, thus header length
353 * We can't write on scattered buffers so we need to copy to
356 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
357 ip4h_checked.check = 0;
358 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
359 /* if IPv4 header checksum is OK, believe it */
360 if (ip4h->check == ip4h_checked.check)
364 EXPORT_SYMBOL(ib_get_rdma_header_version);
366 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
368 const struct ib_grh *grh)
372 if (rdma_protocol_ib(device, port_num))
373 return RDMA_NETWORK_IB;
375 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
377 if (grh_version == 4)
378 return RDMA_NETWORK_IPV4;
380 if (grh->next_hdr == IPPROTO_UDP)
381 return RDMA_NETWORK_IPV6;
383 return RDMA_NETWORK_ROCE_V1;
386 struct find_gid_index_context {
388 enum ib_gid_type gid_type;
391 static bool find_gid_index(const union ib_gid *gid,
392 const struct ib_gid_attr *gid_attr,
395 struct find_gid_index_context *ctx =
396 (struct find_gid_index_context *)context;
398 if (ctx->gid_type != gid_attr->gid_type)
401 if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
402 (is_vlan_dev(gid_attr->ndev) &&
403 vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
409 static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
410 u16 vlan_id, const union ib_gid *sgid,
411 enum ib_gid_type gid_type,
414 struct find_gid_index_context context = {.vlan_id = vlan_id,
415 .gid_type = gid_type};
417 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
418 &context, gid_index);
421 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
422 enum rdma_network_type net_type,
423 union ib_gid *sgid, union ib_gid *dgid)
425 struct sockaddr_in src_in;
426 struct sockaddr_in dst_in;
427 __be32 src_saddr, dst_saddr;
432 if (net_type == RDMA_NETWORK_IPV4) {
433 memcpy(&src_in.sin_addr.s_addr,
434 &hdr->roce4grh.saddr, 4);
435 memcpy(&dst_in.sin_addr.s_addr,
436 &hdr->roce4grh.daddr, 4);
437 src_saddr = src_in.sin_addr.s_addr;
438 dst_saddr = dst_in.sin_addr.s_addr;
439 ipv6_addr_set_v4mapped(src_saddr,
440 (struct in6_addr *)sgid);
441 ipv6_addr_set_v4mapped(dst_saddr,
442 (struct in6_addr *)dgid);
444 } else if (net_type == RDMA_NETWORK_IPV6 ||
445 net_type == RDMA_NETWORK_IB) {
446 *dgid = hdr->ibgrh.dgid;
447 *sgid = hdr->ibgrh.sgid;
453 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
456 * This function creates ah from the incoming packet.
457 * Incoming packet has dgid of the receiver node on which this code is
458 * getting executed and, sgid contains the GID of the sender.
460 * When resolving mac address of destination, the arrived dgid is used
461 * as sgid and, sgid is used as dgid because sgid contains destinations
462 * GID whom to respond to.
464 * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
465 * position of arguments dgid and sgid do not match the order of the
468 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
469 const struct ib_wc *wc, const struct ib_grh *grh,
470 struct rdma_ah_attr *ah_attr)
475 enum rdma_network_type net_type = RDMA_NETWORK_IB;
476 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
481 memset(ah_attr, 0, sizeof *ah_attr);
482 ah_attr->type = rdma_ah_find_type(device, port_num);
483 if (rdma_cap_eth_ah(device, port_num)) {
484 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
485 net_type = wc->network_hdr_type;
487 net_type = ib_get_net_type_by_grh(device, port_num, grh);
488 gid_type = ib_network_to_gid_type(net_type);
490 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
495 if (rdma_protocol_roce(device, port_num)) {
497 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
498 wc->vlan_id : 0xffff;
499 struct net_device *idev;
500 struct net_device *resolved_dev;
502 if (!(wc->wc_flags & IB_WC_GRH))
505 if (!device->get_netdev)
508 idev = device->get_netdev(device, port_num);
512 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid,
514 wc->wc_flags & IB_WC_WITH_VLAN ?
516 &if_index, &hoplimit);
522 resolved_dev = dev_get_by_index(&init_net, if_index);
524 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
529 dev_put(resolved_dev);
533 ret = get_sgid_index_from_eth(device, port_num, vlan_id,
534 &dgid, gid_type, &gid_index);
539 rdma_ah_set_dlid(ah_attr, wc->slid);
540 rdma_ah_set_sl(ah_attr, wc->sl);
541 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
542 rdma_ah_set_port_num(ah_attr, port_num);
544 if (wc->wc_flags & IB_WC_GRH) {
545 if (!rdma_cap_eth_ah(device, port_num)) {
546 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
547 ret = ib_find_cached_gid_by_port(device, &dgid,
558 flow_class = be32_to_cpu(grh->version_tclass_flow);
559 rdma_ah_set_grh(ah_attr, &sgid,
560 flow_class & 0xFFFFF,
561 (u8)gid_index, hoplimit,
562 (flow_class >> 20) & 0xFF);
567 EXPORT_SYMBOL(ib_init_ah_from_wc);
569 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
570 const struct ib_grh *grh, u8 port_num)
572 struct rdma_ah_attr ah_attr;
575 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
579 return rdma_create_ah(pd, &ah_attr);
581 EXPORT_SYMBOL(ib_create_ah_from_wc);
583 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
585 if (ah->type != ah_attr->type)
588 return ah->device->modify_ah ?
589 ah->device->modify_ah(ah, ah_attr) :
592 EXPORT_SYMBOL(rdma_modify_ah);
594 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
596 return ah->device->query_ah ?
597 ah->device->query_ah(ah, ah_attr) :
600 EXPORT_SYMBOL(rdma_query_ah);
602 int rdma_destroy_ah(struct ib_ah *ah)
608 ret = ah->device->destroy_ah(ah);
610 atomic_dec(&pd->usecnt);
614 EXPORT_SYMBOL(rdma_destroy_ah);
616 /* Shared receive queues */
618 struct ib_srq *ib_create_srq(struct ib_pd *pd,
619 struct ib_srq_init_attr *srq_init_attr)
623 if (!pd->device->create_srq)
624 return ERR_PTR(-ENOSYS);
626 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
629 srq->device = pd->device;
632 srq->event_handler = srq_init_attr->event_handler;
633 srq->srq_context = srq_init_attr->srq_context;
634 srq->srq_type = srq_init_attr->srq_type;
635 if (srq->srq_type == IB_SRQT_XRC) {
636 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
637 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
638 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
639 atomic_inc(&srq->ext.xrc.cq->usecnt);
641 atomic_inc(&pd->usecnt);
642 atomic_set(&srq->usecnt, 0);
647 EXPORT_SYMBOL(ib_create_srq);
649 int ib_modify_srq(struct ib_srq *srq,
650 struct ib_srq_attr *srq_attr,
651 enum ib_srq_attr_mask srq_attr_mask)
653 return srq->device->modify_srq ?
654 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
657 EXPORT_SYMBOL(ib_modify_srq);
659 int ib_query_srq(struct ib_srq *srq,
660 struct ib_srq_attr *srq_attr)
662 return srq->device->query_srq ?
663 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
665 EXPORT_SYMBOL(ib_query_srq);
667 int ib_destroy_srq(struct ib_srq *srq)
670 enum ib_srq_type srq_type;
671 struct ib_xrcd *uninitialized_var(xrcd);
672 struct ib_cq *uninitialized_var(cq);
675 if (atomic_read(&srq->usecnt))
679 srq_type = srq->srq_type;
680 if (srq_type == IB_SRQT_XRC) {
681 xrcd = srq->ext.xrc.xrcd;
682 cq = srq->ext.xrc.cq;
685 ret = srq->device->destroy_srq(srq);
687 atomic_dec(&pd->usecnt);
688 if (srq_type == IB_SRQT_XRC) {
689 atomic_dec(&xrcd->usecnt);
690 atomic_dec(&cq->usecnt);
696 EXPORT_SYMBOL(ib_destroy_srq);
700 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
702 struct ib_qp *qp = context;
705 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
706 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
707 if (event->element.qp->event_handler)
708 event->element.qp->event_handler(event, event->element.qp->qp_context);
709 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
712 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
714 mutex_lock(&xrcd->tgt_qp_mutex);
715 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
716 mutex_unlock(&xrcd->tgt_qp_mutex);
719 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
720 void (*event_handler)(struct ib_event *, void *),
727 qp = kzalloc(sizeof *qp, GFP_KERNEL);
729 return ERR_PTR(-ENOMEM);
731 qp->real_qp = real_qp;
732 err = ib_open_shared_qp_security(qp, real_qp->device);
738 qp->real_qp = real_qp;
739 atomic_inc(&real_qp->usecnt);
740 qp->device = real_qp->device;
741 qp->event_handler = event_handler;
742 qp->qp_context = qp_context;
743 qp->qp_num = real_qp->qp_num;
744 qp->qp_type = real_qp->qp_type;
746 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
747 list_add(&qp->open_list, &real_qp->open_list);
748 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
753 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
754 struct ib_qp_open_attr *qp_open_attr)
756 struct ib_qp *qp, *real_qp;
758 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
759 return ERR_PTR(-EINVAL);
761 qp = ERR_PTR(-EINVAL);
762 mutex_lock(&xrcd->tgt_qp_mutex);
763 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
764 if (real_qp->qp_num == qp_open_attr->qp_num) {
765 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
766 qp_open_attr->qp_context);
770 mutex_unlock(&xrcd->tgt_qp_mutex);
773 EXPORT_SYMBOL(ib_open_qp);
775 static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
776 struct ib_qp_init_attr *qp_init_attr)
778 struct ib_qp *real_qp = qp;
780 qp->event_handler = __ib_shared_qp_event_handler;
783 qp->send_cq = qp->recv_cq = NULL;
785 qp->xrcd = qp_init_attr->xrcd;
786 atomic_inc(&qp_init_attr->xrcd->usecnt);
787 INIT_LIST_HEAD(&qp->open_list);
789 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
790 qp_init_attr->qp_context);
792 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
794 real_qp->device->destroy_qp(real_qp);
798 struct ib_qp *ib_create_qp(struct ib_pd *pd,
799 struct ib_qp_init_attr *qp_init_attr)
801 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
805 if (qp_init_attr->rwq_ind_tbl &&
806 (qp_init_attr->recv_cq ||
807 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
808 qp_init_attr->cap.max_recv_sge))
809 return ERR_PTR(-EINVAL);
812 * If the callers is using the RDMA API calculate the resources
813 * needed for the RDMA READ/WRITE operations.
815 * Note that these callers need to pass in a port number.
817 if (qp_init_attr->cap.max_rdma_ctxs)
818 rdma_rw_init_qp(device, qp_init_attr);
820 qp = device->create_qp(pd, qp_init_attr, NULL);
824 ret = ib_create_qp_security(qp, device);
833 qp->qp_type = qp_init_attr->qp_type;
834 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
836 atomic_set(&qp->usecnt, 0);
838 spin_lock_init(&qp->mr_lock);
839 INIT_LIST_HEAD(&qp->rdma_mrs);
840 INIT_LIST_HEAD(&qp->sig_mrs);
842 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
843 return ib_create_xrc_qp(qp, qp_init_attr);
845 qp->event_handler = qp_init_attr->event_handler;
846 qp->qp_context = qp_init_attr->qp_context;
847 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
851 qp->recv_cq = qp_init_attr->recv_cq;
852 if (qp_init_attr->recv_cq)
853 atomic_inc(&qp_init_attr->recv_cq->usecnt);
854 qp->srq = qp_init_attr->srq;
856 atomic_inc(&qp_init_attr->srq->usecnt);
860 qp->send_cq = qp_init_attr->send_cq;
863 atomic_inc(&pd->usecnt);
864 if (qp_init_attr->send_cq)
865 atomic_inc(&qp_init_attr->send_cq->usecnt);
866 if (qp_init_attr->rwq_ind_tbl)
867 atomic_inc(&qp->rwq_ind_tbl->usecnt);
869 if (qp_init_attr->cap.max_rdma_ctxs) {
870 ret = rdma_rw_init_mrs(qp, qp_init_attr);
872 pr_err("failed to init MR pool ret= %d\n", ret);
879 * Note: all hw drivers guarantee that max_send_sge is lower than
880 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
881 * max_send_sge <= max_sge_rd.
883 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
884 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
885 device->attrs.max_sge_rd);
889 EXPORT_SYMBOL(ib_create_qp);
891 static const struct {
893 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
894 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
895 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
897 [IB_QPS_RESET] = { .valid = 1 },
898 [IB_QPS_ERR] = { .valid = 1 },
902 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
905 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
906 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
909 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
912 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
915 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
918 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
920 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
926 [IB_QPS_RESET] = { .valid = 1 },
927 [IB_QPS_ERR] = { .valid = 1 },
931 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
934 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
937 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
940 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
943 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
946 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
948 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
955 [IB_QPT_UC] = (IB_QP_AV |
959 [IB_QPT_RC] = (IB_QP_AV |
963 IB_QP_MAX_DEST_RD_ATOMIC |
964 IB_QP_MIN_RNR_TIMER),
965 [IB_QPT_XRC_INI] = (IB_QP_AV |
969 [IB_QPT_XRC_TGT] = (IB_QP_AV |
973 IB_QP_MAX_DEST_RD_ATOMIC |
974 IB_QP_MIN_RNR_TIMER),
977 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
979 [IB_QPT_UC] = (IB_QP_ALT_PATH |
982 [IB_QPT_RC] = (IB_QP_ALT_PATH |
985 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
988 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
991 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
993 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
999 [IB_QPS_RESET] = { .valid = 1 },
1000 [IB_QPS_ERR] = { .valid = 1 },
1004 [IB_QPT_UD] = IB_QP_SQ_PSN,
1005 [IB_QPT_UC] = IB_QP_SQ_PSN,
1006 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1010 IB_QP_MAX_QP_RD_ATOMIC),
1011 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1015 IB_QP_MAX_QP_RD_ATOMIC),
1016 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1018 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1019 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1022 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1024 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1026 IB_QP_ACCESS_FLAGS |
1027 IB_QP_PATH_MIG_STATE),
1028 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1030 IB_QP_ACCESS_FLAGS |
1031 IB_QP_MIN_RNR_TIMER |
1032 IB_QP_PATH_MIG_STATE),
1033 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1035 IB_QP_ACCESS_FLAGS |
1036 IB_QP_PATH_MIG_STATE),
1037 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1039 IB_QP_ACCESS_FLAGS |
1040 IB_QP_MIN_RNR_TIMER |
1041 IB_QP_PATH_MIG_STATE),
1042 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1044 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1046 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1051 [IB_QPS_RESET] = { .valid = 1 },
1052 [IB_QPS_ERR] = { .valid = 1 },
1056 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1058 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1059 IB_QP_ACCESS_FLAGS |
1061 IB_QP_PATH_MIG_STATE),
1062 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1063 IB_QP_ACCESS_FLAGS |
1065 IB_QP_PATH_MIG_STATE |
1066 IB_QP_MIN_RNR_TIMER),
1067 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1068 IB_QP_ACCESS_FLAGS |
1070 IB_QP_PATH_MIG_STATE),
1071 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1072 IB_QP_ACCESS_FLAGS |
1074 IB_QP_PATH_MIG_STATE |
1075 IB_QP_MIN_RNR_TIMER),
1076 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1078 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1080 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1086 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1087 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1088 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1089 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1090 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1091 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1092 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1097 [IB_QPS_RESET] = { .valid = 1 },
1098 [IB_QPS_ERR] = { .valid = 1 },
1102 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1104 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1106 IB_QP_ACCESS_FLAGS |
1107 IB_QP_PATH_MIG_STATE),
1108 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1110 IB_QP_ACCESS_FLAGS |
1111 IB_QP_MIN_RNR_TIMER |
1112 IB_QP_PATH_MIG_STATE),
1113 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1115 IB_QP_ACCESS_FLAGS |
1116 IB_QP_PATH_MIG_STATE),
1117 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1119 IB_QP_ACCESS_FLAGS |
1120 IB_QP_MIN_RNR_TIMER |
1121 IB_QP_PATH_MIG_STATE),
1122 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1124 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1131 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1133 [IB_QPT_UC] = (IB_QP_AV |
1135 IB_QP_ACCESS_FLAGS |
1137 IB_QP_PATH_MIG_STATE),
1138 [IB_QPT_RC] = (IB_QP_PORT |
1143 IB_QP_MAX_QP_RD_ATOMIC |
1144 IB_QP_MAX_DEST_RD_ATOMIC |
1146 IB_QP_ACCESS_FLAGS |
1148 IB_QP_MIN_RNR_TIMER |
1149 IB_QP_PATH_MIG_STATE),
1150 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1155 IB_QP_MAX_QP_RD_ATOMIC |
1157 IB_QP_ACCESS_FLAGS |
1159 IB_QP_PATH_MIG_STATE),
1160 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1163 IB_QP_MAX_DEST_RD_ATOMIC |
1165 IB_QP_ACCESS_FLAGS |
1167 IB_QP_MIN_RNR_TIMER |
1168 IB_QP_PATH_MIG_STATE),
1169 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1171 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1177 [IB_QPS_RESET] = { .valid = 1 },
1178 [IB_QPS_ERR] = { .valid = 1 },
1182 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1184 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1185 IB_QP_ACCESS_FLAGS),
1186 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1188 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1194 [IB_QPS_RESET] = { .valid = 1 },
1195 [IB_QPS_ERR] = { .valid = 1 }
1199 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1200 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1201 enum rdma_link_layer ll)
1203 enum ib_qp_attr_mask req_param, opt_param;
1205 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
1206 next_state < 0 || next_state > IB_QPS_ERR)
1209 if (mask & IB_QP_CUR_STATE &&
1210 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1211 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1214 if (!qp_state_table[cur_state][next_state].valid)
1217 req_param = qp_state_table[cur_state][next_state].req_param[type];
1218 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1220 if ((mask & req_param) != req_param)
1223 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1228 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1230 int ib_resolve_eth_dmac(struct ib_device *device,
1231 struct rdma_ah_attr *ah_attr)
1234 struct ib_global_route *grh;
1236 if (!rdma_is_port_valid(device, rdma_ah_get_port_num(ah_attr)))
1239 if (ah_attr->type != RDMA_AH_ATTR_TYPE_ROCE)
1242 grh = rdma_ah_retrieve_grh(ah_attr);
1244 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw)) {
1245 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
1246 ah_attr->roce.dmac);
1249 struct ib_gid_attr sgid_attr;
1253 ret = ib_query_gid(device,
1254 rdma_ah_get_port_num(ah_attr),
1258 if (ret || !sgid_attr.ndev) {
1264 ifindex = sgid_attr.ndev->ifindex;
1267 rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
1269 NULL, &ifindex, &hop_limit);
1271 dev_put(sgid_attr.ndev);
1273 grh->hop_limit = hop_limit;
1278 EXPORT_SYMBOL(ib_resolve_eth_dmac);
1281 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1282 * @qp: The QP to modify.
1283 * @attr: On input, specifies the QP attributes to modify. On output,
1284 * the current values of selected QP attributes are returned.
1285 * @attr_mask: A bit-mask used to specify which attributes of the QP
1286 * are being modified.
1287 * @udata: pointer to user's input output buffer information
1288 * are being modified.
1289 * It returns 0 on success and returns appropriate error code on error.
1291 int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
1292 int attr_mask, struct ib_udata *udata)
1296 if (attr_mask & IB_QP_AV) {
1297 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
1301 return ib_security_modify_qp(qp, attr, attr_mask, udata);
1303 EXPORT_SYMBOL(ib_modify_qp_with_udata);
1305 int ib_modify_qp(struct ib_qp *qp,
1306 struct ib_qp_attr *qp_attr,
1309 return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
1311 EXPORT_SYMBOL(ib_modify_qp);
1313 int ib_query_qp(struct ib_qp *qp,
1314 struct ib_qp_attr *qp_attr,
1316 struct ib_qp_init_attr *qp_init_attr)
1318 return qp->device->query_qp ?
1319 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
1322 EXPORT_SYMBOL(ib_query_qp);
1324 int ib_close_qp(struct ib_qp *qp)
1326 struct ib_qp *real_qp;
1327 unsigned long flags;
1329 real_qp = qp->real_qp;
1333 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1334 list_del(&qp->open_list);
1335 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1337 atomic_dec(&real_qp->usecnt);
1338 ib_close_shared_qp_security(qp->qp_sec);
1343 EXPORT_SYMBOL(ib_close_qp);
1345 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1347 struct ib_xrcd *xrcd;
1348 struct ib_qp *real_qp;
1351 real_qp = qp->real_qp;
1352 xrcd = real_qp->xrcd;
1354 mutex_lock(&xrcd->tgt_qp_mutex);
1356 if (atomic_read(&real_qp->usecnt) == 0)
1357 list_del(&real_qp->xrcd_list);
1360 mutex_unlock(&xrcd->tgt_qp_mutex);
1363 ret = ib_destroy_qp(real_qp);
1365 atomic_dec(&xrcd->usecnt);
1367 __ib_insert_xrcd_qp(xrcd, real_qp);
1373 int ib_destroy_qp(struct ib_qp *qp)
1376 struct ib_cq *scq, *rcq;
1378 struct ib_rwq_ind_table *ind_tbl;
1379 struct ib_qp_security *sec;
1382 WARN_ON_ONCE(qp->mrs_used > 0);
1384 if (atomic_read(&qp->usecnt))
1387 if (qp->real_qp != qp)
1388 return __ib_destroy_shared_qp(qp);
1394 ind_tbl = qp->rwq_ind_tbl;
1397 ib_destroy_qp_security_begin(sec);
1400 rdma_rw_cleanup_mrs(qp);
1402 ret = qp->device->destroy_qp(qp);
1405 atomic_dec(&pd->usecnt);
1407 atomic_dec(&scq->usecnt);
1409 atomic_dec(&rcq->usecnt);
1411 atomic_dec(&srq->usecnt);
1413 atomic_dec(&ind_tbl->usecnt);
1415 ib_destroy_qp_security_end(sec);
1418 ib_destroy_qp_security_abort(sec);
1423 EXPORT_SYMBOL(ib_destroy_qp);
1425 /* Completion queues */
1427 struct ib_cq *ib_create_cq(struct ib_device *device,
1428 ib_comp_handler comp_handler,
1429 void (*event_handler)(struct ib_event *, void *),
1431 const struct ib_cq_init_attr *cq_attr)
1435 cq = device->create_cq(device, cq_attr, NULL, NULL);
1438 cq->device = device;
1440 cq->comp_handler = comp_handler;
1441 cq->event_handler = event_handler;
1442 cq->cq_context = cq_context;
1443 atomic_set(&cq->usecnt, 0);
1448 EXPORT_SYMBOL(ib_create_cq);
1450 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1452 return cq->device->modify_cq ?
1453 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1455 EXPORT_SYMBOL(ib_modify_cq);
1457 int ib_destroy_cq(struct ib_cq *cq)
1459 if (atomic_read(&cq->usecnt))
1462 return cq->device->destroy_cq(cq);
1464 EXPORT_SYMBOL(ib_destroy_cq);
1466 int ib_resize_cq(struct ib_cq *cq, int cqe)
1468 return cq->device->resize_cq ?
1469 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1471 EXPORT_SYMBOL(ib_resize_cq);
1473 /* Memory regions */
1475 int ib_dereg_mr(struct ib_mr *mr)
1477 struct ib_pd *pd = mr->pd;
1480 ret = mr->device->dereg_mr(mr);
1482 atomic_dec(&pd->usecnt);
1486 EXPORT_SYMBOL(ib_dereg_mr);
1489 * ib_alloc_mr() - Allocates a memory region
1490 * @pd: protection domain associated with the region
1491 * @mr_type: memory region type
1492 * @max_num_sg: maximum sg entries available for registration.
1495 * Memory registeration page/sg lists must not exceed max_num_sg.
1496 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1497 * max_num_sg * used_page_size.
1500 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1501 enum ib_mr_type mr_type,
1506 if (!pd->device->alloc_mr)
1507 return ERR_PTR(-ENOSYS);
1509 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
1511 mr->device = pd->device;
1514 atomic_inc(&pd->usecnt);
1515 mr->need_inval = false;
1520 EXPORT_SYMBOL(ib_alloc_mr);
1522 /* "Fast" memory regions */
1524 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1525 int mr_access_flags,
1526 struct ib_fmr_attr *fmr_attr)
1530 if (!pd->device->alloc_fmr)
1531 return ERR_PTR(-ENOSYS);
1533 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1535 fmr->device = pd->device;
1537 atomic_inc(&pd->usecnt);
1542 EXPORT_SYMBOL(ib_alloc_fmr);
1544 int ib_unmap_fmr(struct list_head *fmr_list)
1548 if (list_empty(fmr_list))
1551 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1552 return fmr->device->unmap_fmr(fmr_list);
1554 EXPORT_SYMBOL(ib_unmap_fmr);
1556 int ib_dealloc_fmr(struct ib_fmr *fmr)
1562 ret = fmr->device->dealloc_fmr(fmr);
1564 atomic_dec(&pd->usecnt);
1568 EXPORT_SYMBOL(ib_dealloc_fmr);
1570 /* Multicast groups */
1572 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1576 if (!qp->device->attach_mcast)
1578 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
1579 lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1580 lid == be16_to_cpu(IB_LID_PERMISSIVE))
1583 ret = qp->device->attach_mcast(qp, gid, lid);
1585 atomic_inc(&qp->usecnt);
1588 EXPORT_SYMBOL(ib_attach_mcast);
1590 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1594 if (!qp->device->detach_mcast)
1596 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD ||
1597 lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1598 lid == be16_to_cpu(IB_LID_PERMISSIVE))
1601 ret = qp->device->detach_mcast(qp, gid, lid);
1603 atomic_dec(&qp->usecnt);
1606 EXPORT_SYMBOL(ib_detach_mcast);
1608 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1610 struct ib_xrcd *xrcd;
1612 if (!device->alloc_xrcd)
1613 return ERR_PTR(-ENOSYS);
1615 xrcd = device->alloc_xrcd(device, NULL, NULL);
1616 if (!IS_ERR(xrcd)) {
1617 xrcd->device = device;
1619 atomic_set(&xrcd->usecnt, 0);
1620 mutex_init(&xrcd->tgt_qp_mutex);
1621 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1626 EXPORT_SYMBOL(ib_alloc_xrcd);
1628 int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1633 if (atomic_read(&xrcd->usecnt))
1636 while (!list_empty(&xrcd->tgt_qp_list)) {
1637 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1638 ret = ib_destroy_qp(qp);
1643 return xrcd->device->dealloc_xrcd(xrcd);
1645 EXPORT_SYMBOL(ib_dealloc_xrcd);
1648 * ib_create_wq - Creates a WQ associated with the specified protection
1650 * @pd: The protection domain associated with the WQ.
1651 * @wq_init_attr: A list of initial attributes required to create the
1652 * WQ. If WQ creation succeeds, then the attributes are updated to
1653 * the actual capabilities of the created WQ.
1655 * wq_init_attr->max_wr and wq_init_attr->max_sge determine
1656 * the requested size of the WQ, and set to the actual values allocated
1658 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
1659 * at least as large as the requested values.
1661 struct ib_wq *ib_create_wq(struct ib_pd *pd,
1662 struct ib_wq_init_attr *wq_attr)
1666 if (!pd->device->create_wq)
1667 return ERR_PTR(-ENOSYS);
1669 wq = pd->device->create_wq(pd, wq_attr, NULL);
1671 wq->event_handler = wq_attr->event_handler;
1672 wq->wq_context = wq_attr->wq_context;
1673 wq->wq_type = wq_attr->wq_type;
1674 wq->cq = wq_attr->cq;
1675 wq->device = pd->device;
1678 atomic_inc(&pd->usecnt);
1679 atomic_inc(&wq_attr->cq->usecnt);
1680 atomic_set(&wq->usecnt, 0);
1684 EXPORT_SYMBOL(ib_create_wq);
1687 * ib_destroy_wq - Destroys the specified WQ.
1688 * @wq: The WQ to destroy.
1690 int ib_destroy_wq(struct ib_wq *wq)
1693 struct ib_cq *cq = wq->cq;
1694 struct ib_pd *pd = wq->pd;
1696 if (atomic_read(&wq->usecnt))
1699 err = wq->device->destroy_wq(wq);
1701 atomic_dec(&pd->usecnt);
1702 atomic_dec(&cq->usecnt);
1706 EXPORT_SYMBOL(ib_destroy_wq);
1709 * ib_modify_wq - Modifies the specified WQ.
1710 * @wq: The WQ to modify.
1711 * @wq_attr: On input, specifies the WQ attributes to modify.
1712 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
1713 * are being modified.
1714 * On output, the current values of selected WQ attributes are returned.
1716 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1721 if (!wq->device->modify_wq)
1724 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
1727 EXPORT_SYMBOL(ib_modify_wq);
1730 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
1731 * @device: The device on which to create the rwq indirection table.
1732 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
1733 * create the Indirection Table.
1735 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
1736 * than the created ib_rwq_ind_table object and the caller is responsible
1737 * for its memory allocation/free.
1739 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
1740 struct ib_rwq_ind_table_init_attr *init_attr)
1742 struct ib_rwq_ind_table *rwq_ind_table;
1746 if (!device->create_rwq_ind_table)
1747 return ERR_PTR(-ENOSYS);
1749 table_size = (1 << init_attr->log_ind_tbl_size);
1750 rwq_ind_table = device->create_rwq_ind_table(device,
1752 if (IS_ERR(rwq_ind_table))
1753 return rwq_ind_table;
1755 rwq_ind_table->ind_tbl = init_attr->ind_tbl;
1756 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
1757 rwq_ind_table->device = device;
1758 rwq_ind_table->uobject = NULL;
1759 atomic_set(&rwq_ind_table->usecnt, 0);
1761 for (i = 0; i < table_size; i++)
1762 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
1764 return rwq_ind_table;
1766 EXPORT_SYMBOL(ib_create_rwq_ind_table);
1769 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
1770 * @wq_ind_table: The Indirection Table to destroy.
1772 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
1775 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
1776 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
1778 if (atomic_read(&rwq_ind_table->usecnt))
1781 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
1783 for (i = 0; i < table_size; i++)
1784 atomic_dec(&ind_tbl[i]->usecnt);
1789 EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
1791 struct ib_flow *ib_create_flow(struct ib_qp *qp,
1792 struct ib_flow_attr *flow_attr,
1795 struct ib_flow *flow_id;
1796 if (!qp->device->create_flow)
1797 return ERR_PTR(-ENOSYS);
1799 flow_id = qp->device->create_flow(qp, flow_attr, domain);
1800 if (!IS_ERR(flow_id)) {
1801 atomic_inc(&qp->usecnt);
1806 EXPORT_SYMBOL(ib_create_flow);
1808 int ib_destroy_flow(struct ib_flow *flow_id)
1811 struct ib_qp *qp = flow_id->qp;
1813 err = qp->device->destroy_flow(flow_id);
1815 atomic_dec(&qp->usecnt);
1818 EXPORT_SYMBOL(ib_destroy_flow);
1820 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1821 struct ib_mr_status *mr_status)
1823 return mr->device->check_mr_status ?
1824 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1826 EXPORT_SYMBOL(ib_check_mr_status);
1828 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
1831 if (!device->set_vf_link_state)
1834 return device->set_vf_link_state(device, vf, port, state);
1836 EXPORT_SYMBOL(ib_set_vf_link_state);
1838 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
1839 struct ifla_vf_info *info)
1841 if (!device->get_vf_config)
1844 return device->get_vf_config(device, vf, port, info);
1846 EXPORT_SYMBOL(ib_get_vf_config);
1848 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
1849 struct ifla_vf_stats *stats)
1851 if (!device->get_vf_stats)
1854 return device->get_vf_stats(device, vf, port, stats);
1856 EXPORT_SYMBOL(ib_get_vf_stats);
1858 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
1861 if (!device->set_vf_guid)
1864 return device->set_vf_guid(device, vf, port, guid, type);
1866 EXPORT_SYMBOL(ib_set_vf_guid);
1869 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1870 * and set it the memory region.
1871 * @mr: memory region
1872 * @sg: dma mapped scatterlist
1873 * @sg_nents: number of entries in sg
1874 * @sg_offset: offset in bytes into sg
1875 * @page_size: page vector desired page size
1878 * - The first sg element is allowed to have an offset.
1879 * - Each sg element must either be aligned to page_size or virtually
1880 * contiguous to the previous element. In case an sg element has a
1881 * non-contiguous offset, the mapping prefix will not include it.
1882 * - The last sg element is allowed to have length less than page_size.
1883 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
1884 * then only max_num_sg entries will be mapped.
1885 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
1886 * constraints holds and the page_size argument is ignored.
1888 * Returns the number of sg elements that were mapped to the memory region.
1890 * After this completes successfully, the memory region
1891 * is ready for registration.
1893 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
1894 unsigned int *sg_offset, unsigned int page_size)
1896 if (unlikely(!mr->device->map_mr_sg))
1899 mr->page_size = page_size;
1901 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
1903 EXPORT_SYMBOL(ib_map_mr_sg);
1906 * ib_sg_to_pages() - Convert the largest prefix of a sg list
1908 * @mr: memory region
1909 * @sgl: dma mapped scatterlist
1910 * @sg_nents: number of entries in sg
1911 * @sg_offset_p: IN: start offset in bytes into sg
1912 * OUT: offset in bytes for element n of the sg of the first
1913 * byte that has not been processed where n is the return
1914 * value of this function.
1915 * @set_page: driver page assignment function pointer
1917 * Core service helper for drivers to convert the largest
1918 * prefix of given sg list to a page vector. The sg list
1919 * prefix converted is the prefix that meet the requirements
1922 * Returns the number of sg elements that were assigned to
1925 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
1926 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
1928 struct scatterlist *sg;
1929 u64 last_end_dma_addr = 0;
1930 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1931 unsigned int last_page_off = 0;
1932 u64 page_mask = ~((u64)mr->page_size - 1);
1935 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
1938 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
1941 for_each_sg(sgl, sg, sg_nents, i) {
1942 u64 dma_addr = sg_dma_address(sg) + sg_offset;
1943 u64 prev_addr = dma_addr;
1944 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
1945 u64 end_dma_addr = dma_addr + dma_len;
1946 u64 page_addr = dma_addr & page_mask;
1949 * For the second and later elements, check whether either the
1950 * end of element i-1 or the start of element i is not aligned
1951 * on a page boundary.
1953 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
1954 /* Stop mapping if there is a gap. */
1955 if (last_end_dma_addr != dma_addr)
1959 * Coalesce this element with the last. If it is small
1960 * enough just update mr->length. Otherwise start
1961 * mapping from the next page.
1967 ret = set_page(mr, page_addr);
1968 if (unlikely(ret < 0)) {
1969 sg_offset = prev_addr - sg_dma_address(sg);
1970 mr->length += prev_addr - dma_addr;
1972 *sg_offset_p = sg_offset;
1973 return i || sg_offset ? i : ret;
1975 prev_addr = page_addr;
1977 page_addr += mr->page_size;
1978 } while (page_addr < end_dma_addr);
1980 mr->length += dma_len;
1981 last_end_dma_addr = end_dma_addr;
1982 last_page_off = end_dma_addr & ~page_mask;
1991 EXPORT_SYMBOL(ib_sg_to_pages);
1993 struct ib_drain_cqe {
1995 struct completion done;
1998 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2000 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2003 complete(&cqe->done);
2007 * Post a WR and block until its completion is reaped for the SQ.
2009 static void __ib_drain_sq(struct ib_qp *qp)
2011 struct ib_cq *cq = qp->send_cq;
2012 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2013 struct ib_drain_cqe sdrain;
2014 struct ib_send_wr swr = {}, *bad_swr;
2017 swr.wr_cqe = &sdrain.cqe;
2018 sdrain.cqe.done = ib_drain_qp_done;
2019 init_completion(&sdrain.done);
2021 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2023 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2027 ret = ib_post_send(qp, &swr, &bad_swr);
2029 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2033 if (cq->poll_ctx == IB_POLL_DIRECT)
2034 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2035 ib_process_cq_direct(cq, -1);
2037 wait_for_completion(&sdrain.done);
2041 * Post a WR and block until its completion is reaped for the RQ.
2043 static void __ib_drain_rq(struct ib_qp *qp)
2045 struct ib_cq *cq = qp->recv_cq;
2046 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2047 struct ib_drain_cqe rdrain;
2048 struct ib_recv_wr rwr = {}, *bad_rwr;
2051 rwr.wr_cqe = &rdrain.cqe;
2052 rdrain.cqe.done = ib_drain_qp_done;
2053 init_completion(&rdrain.done);
2055 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2057 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2061 ret = ib_post_recv(qp, &rwr, &bad_rwr);
2063 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2067 if (cq->poll_ctx == IB_POLL_DIRECT)
2068 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2069 ib_process_cq_direct(cq, -1);
2071 wait_for_completion(&rdrain.done);
2075 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2077 * @qp: queue pair to drain
2079 * If the device has a provider-specific drain function, then
2080 * call that. Otherwise call the generic drain function
2085 * ensure there is room in the CQ and SQ for the drain work request and
2088 * allocate the CQ using ib_alloc_cq().
2090 * ensure that there are no other contexts that are posting WRs concurrently.
2091 * Otherwise the drain is not guaranteed.
2093 void ib_drain_sq(struct ib_qp *qp)
2095 if (qp->device->drain_sq)
2096 qp->device->drain_sq(qp);
2100 EXPORT_SYMBOL(ib_drain_sq);
2103 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2105 * @qp: queue pair to drain
2107 * If the device has a provider-specific drain function, then
2108 * call that. Otherwise call the generic drain function
2113 * ensure there is room in the CQ and RQ for the drain work request and
2116 * allocate the CQ using ib_alloc_cq().
2118 * ensure that there are no other contexts that are posting WRs concurrently.
2119 * Otherwise the drain is not guaranteed.
2121 void ib_drain_rq(struct ib_qp *qp)
2123 if (qp->device->drain_rq)
2124 qp->device->drain_rq(qp);
2128 EXPORT_SYMBOL(ib_drain_rq);
2131 * ib_drain_qp() - Block until all CQEs have been consumed by the
2132 * application on both the RQ and SQ.
2133 * @qp: queue pair to drain
2137 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2140 * allocate the CQs using ib_alloc_cq().
2142 * ensure that there are no other contexts that are posting WRs concurrently.
2143 * Otherwise the drain is not guaranteed.
2145 void ib_drain_qp(struct ib_qp *qp)
2151 EXPORT_SYMBOL(ib_drain_qp);