2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include "rxe_queue.h"
38 static int rxe_query_device(struct ib_device *dev,
39 struct ib_device_attr *attr,
42 struct rxe_dev *rxe = to_rdev(dev);
44 if (uhw->inlen || uhw->outlen)
51 static void rxe_eth_speed_to_ib_speed(int speed, u8 *active_speed,
55 *active_width = IB_WIDTH_1X;
56 *active_speed = IB_SPEED_SDR;
57 } else if (speed <= 10000) {
58 *active_width = IB_WIDTH_1X;
59 *active_speed = IB_SPEED_FDR10;
60 } else if (speed <= 20000) {
61 *active_width = IB_WIDTH_4X;
62 *active_speed = IB_SPEED_DDR;
63 } else if (speed <= 30000) {
64 *active_width = IB_WIDTH_4X;
65 *active_speed = IB_SPEED_QDR;
66 } else if (speed <= 40000) {
67 *active_width = IB_WIDTH_4X;
68 *active_speed = IB_SPEED_FDR10;
70 *active_width = IB_WIDTH_4X;
71 *active_speed = IB_SPEED_EDR;
75 static int rxe_query_port(struct ib_device *dev,
76 u8 port_num, struct ib_port_attr *attr)
78 struct rxe_dev *rxe = to_rdev(dev);
79 struct rxe_port *port;
82 if (unlikely(port_num != 1)) {
83 pr_warn("invalid port_number %d\n", port_num);
89 /* *attr being zeroed by the caller, avoid zeroing it here */
92 mutex_lock(&rxe->usdev_lock);
93 if (rxe->ndev->ethtool_ops->get_link_ksettings) {
94 struct ethtool_link_ksettings ks;
96 rxe->ndev->ethtool_ops->get_link_ksettings(rxe->ndev, &ks);
97 speed = ks.base.speed;
98 } else if (rxe->ndev->ethtool_ops->get_settings) {
99 struct ethtool_cmd cmd;
101 rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
104 pr_warn("%s speed is unknown, defaulting to 1000\n",
108 rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
109 &attr->active_width);
110 mutex_unlock(&rxe->usdev_lock);
118 static int rxe_query_gid(struct ib_device *device,
119 u8 port_num, int index, union ib_gid *gid)
123 if (index > RXE_PORT_GID_TBL_LEN)
126 ret = ib_get_cached_gid(device, port_num, index, gid, NULL);
127 if (ret == -EAGAIN) {
128 memcpy(gid, &zgid, sizeof(*gid));
135 static int rxe_add_gid(struct ib_device *device, u8 port_num, unsigned int
136 index, const union ib_gid *gid,
137 const struct ib_gid_attr *attr, void **context)
139 if (index >= RXE_PORT_GID_TBL_LEN)
144 static int rxe_del_gid(struct ib_device *device, u8 port_num, unsigned int
145 index, void **context)
147 if (index >= RXE_PORT_GID_TBL_LEN)
152 static struct net_device *rxe_get_netdev(struct ib_device *device,
155 struct rxe_dev *rxe = to_rdev(device);
165 static int rxe_query_pkey(struct ib_device *device,
166 u8 port_num, u16 index, u16 *pkey)
168 struct rxe_dev *rxe = to_rdev(device);
169 struct rxe_port *port;
171 if (unlikely(port_num != 1)) {
172 dev_warn(device->dma_device, "invalid port_num = %d\n",
179 if (unlikely(index >= port->attr.pkey_tbl_len)) {
180 dev_warn(device->dma_device, "invalid index = %d\n",
185 *pkey = port->pkey_tbl[index];
192 static int rxe_modify_device(struct ib_device *dev,
193 int mask, struct ib_device_modify *attr)
195 struct rxe_dev *rxe = to_rdev(dev);
197 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
198 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
200 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
201 memcpy(rxe->ib_dev.node_desc,
202 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
208 static int rxe_modify_port(struct ib_device *dev,
209 u8 port_num, int mask, struct ib_port_modify *attr)
211 struct rxe_dev *rxe = to_rdev(dev);
212 struct rxe_port *port;
214 if (unlikely(port_num != 1)) {
215 pr_warn("invalid port_num = %d\n", port_num);
221 port->attr.port_cap_flags |= attr->set_port_cap_mask;
222 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
224 if (mask & IB_PORT_RESET_QKEY_CNTR)
225 port->attr.qkey_viol_cntr = 0;
233 static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
236 struct rxe_dev *rxe = to_rdev(dev);
238 return rxe_link_layer(rxe, port_num);
241 static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
242 struct ib_udata *udata)
244 struct rxe_dev *rxe = to_rdev(dev);
245 struct rxe_ucontext *uc;
247 uc = rxe_alloc(&rxe->uc_pool);
248 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
251 static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
253 struct rxe_ucontext *uc = to_ruc(ibuc);
259 static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
260 struct ib_port_immutable *immutable)
263 struct ib_port_attr attr;
265 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
267 err = ib_query_port(dev, port_num, &attr);
271 immutable->pkey_tbl_len = attr.pkey_tbl_len;
272 immutable->gid_tbl_len = attr.gid_tbl_len;
273 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
278 static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
279 struct ib_ucontext *context,
280 struct ib_udata *udata)
282 struct rxe_dev *rxe = to_rdev(dev);
285 pd = rxe_alloc(&rxe->pd_pool);
286 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
289 static int rxe_dealloc_pd(struct ib_pd *ibpd)
291 struct rxe_pd *pd = to_rpd(ibpd);
297 static int rxe_init_av(struct rxe_dev *rxe, struct ib_ah_attr *attr,
302 struct ib_gid_attr sgid_attr;
304 err = ib_get_cached_gid(&rxe->ib_dev, attr->port_num,
305 attr->grh.sgid_index, &sgid,
308 pr_err("Failed to query sgid. err = %d\n", err);
312 err = rxe_av_from_attr(rxe, attr->port_num, av, attr);
314 err = rxe_av_fill_ip_info(rxe, av, attr, &sgid_attr, &sgid);
317 dev_put(sgid_attr.ndev);
321 static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
322 struct ib_udata *udata)
326 struct rxe_dev *rxe = to_rdev(ibpd->device);
327 struct rxe_pd *pd = to_rpd(ibpd);
330 err = rxe_av_chk_attr(rxe, attr);
334 ah = rxe_alloc(&rxe->ah_pool);
343 err = rxe_init_av(rxe, attr, &ah->av);
356 static int rxe_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
359 struct rxe_dev *rxe = to_rdev(ibah->device);
360 struct rxe_ah *ah = to_rah(ibah);
362 err = rxe_av_chk_attr(rxe, attr);
366 err = rxe_init_av(rxe, attr, &ah->av);
373 static int rxe_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
375 struct rxe_dev *rxe = to_rdev(ibah->device);
376 struct rxe_ah *ah = to_rah(ibah);
378 rxe_av_to_attr(rxe, &ah->av, attr);
382 static int rxe_destroy_ah(struct ib_ah *ibah)
384 struct rxe_ah *ah = to_rah(ibah);
386 rxe_drop_ref(ah->pd);
391 static int post_one_recv(struct rxe_rq *rq, struct ib_recv_wr *ibwr)
396 struct rxe_recv_wqe *recv_wqe;
397 int num_sge = ibwr->num_sge;
399 if (unlikely(queue_full(rq->queue))) {
404 if (unlikely(num_sge > rq->max_sge)) {
410 for (i = 0; i < num_sge; i++)
411 length += ibwr->sg_list[i].length;
413 recv_wqe = producer_addr(rq->queue);
414 recv_wqe->wr_id = ibwr->wr_id;
415 recv_wqe->num_sge = num_sge;
417 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
418 num_sge * sizeof(struct ib_sge));
420 recv_wqe->dma.length = length;
421 recv_wqe->dma.resid = length;
422 recv_wqe->dma.num_sge = num_sge;
423 recv_wqe->dma.cur_sge = 0;
424 recv_wqe->dma.sge_offset = 0;
426 /* make sure all changes to the work queue are written before we
427 * update the producer pointer
431 advance_producer(rq->queue);
438 static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
439 struct ib_srq_init_attr *init,
440 struct ib_udata *udata)
443 struct rxe_dev *rxe = to_rdev(ibpd->device);
444 struct rxe_pd *pd = to_rpd(ibpd);
446 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
448 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
452 srq = rxe_alloc(&rxe->srq_pool);
462 err = rxe_srq_from_init(rxe, srq, init, context, udata);
476 static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
477 enum ib_srq_attr_mask mask,
478 struct ib_udata *udata)
481 struct rxe_srq *srq = to_rsrq(ibsrq);
482 struct rxe_dev *rxe = to_rdev(ibsrq->device);
484 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
488 err = rxe_srq_from_attr(rxe, srq, attr, mask, udata);
498 static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
500 struct rxe_srq *srq = to_rsrq(ibsrq);
505 attr->max_wr = srq->rq.queue->buf->index_mask;
506 attr->max_sge = srq->rq.max_sge;
507 attr->srq_limit = srq->limit;
511 static int rxe_destroy_srq(struct ib_srq *ibsrq)
513 struct rxe_srq *srq = to_rsrq(ibsrq);
516 rxe_queue_cleanup(srq->rq.queue);
518 rxe_drop_ref(srq->pd);
525 static int rxe_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
526 struct ib_recv_wr **bad_wr)
530 struct rxe_srq *srq = to_rsrq(ibsrq);
532 spin_lock_irqsave(&srq->rq.producer_lock, flags);
535 err = post_one_recv(&srq->rq, wr);
541 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
549 static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
550 struct ib_qp_init_attr *init,
551 struct ib_udata *udata)
554 struct rxe_dev *rxe = to_rdev(ibpd->device);
555 struct rxe_pd *pd = to_rpd(ibpd);
558 err = rxe_qp_chk_init(rxe, init);
562 qp = rxe_alloc(&rxe->qp_pool);
578 err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd);
592 static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
593 int mask, struct ib_udata *udata)
596 struct rxe_dev *rxe = to_rdev(ibqp->device);
597 struct rxe_qp *qp = to_rqp(ibqp);
599 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
603 err = rxe_qp_from_attr(qp, attr, mask, udata);
613 static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
614 int mask, struct ib_qp_init_attr *init)
616 struct rxe_qp *qp = to_rqp(ibqp);
618 rxe_qp_to_init(qp, init);
619 rxe_qp_to_attr(qp, attr, mask);
624 static int rxe_destroy_qp(struct ib_qp *ibqp)
626 struct rxe_qp *qp = to_rqp(ibqp);
634 static int validate_send_wr(struct rxe_qp *qp, struct ib_send_wr *ibwr,
635 unsigned int mask, unsigned int length)
637 int num_sge = ibwr->num_sge;
638 struct rxe_sq *sq = &qp->sq;
640 if (unlikely(num_sge > sq->max_sge))
643 if (unlikely(mask & WR_ATOMIC_MASK)) {
647 if (atomic_wr(ibwr)->remote_addr & 0x7)
651 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
652 (length > sq->max_inline)))
661 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
662 struct ib_send_wr *ibwr)
664 wr->wr_id = ibwr->wr_id;
665 wr->num_sge = ibwr->num_sge;
666 wr->opcode = ibwr->opcode;
667 wr->send_flags = ibwr->send_flags;
669 if (qp_type(qp) == IB_QPT_UD ||
670 qp_type(qp) == IB_QPT_SMI ||
671 qp_type(qp) == IB_QPT_GSI) {
672 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
673 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
674 if (qp_type(qp) == IB_QPT_GSI)
675 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
676 if (wr->opcode == IB_WR_SEND_WITH_IMM)
677 wr->ex.imm_data = ibwr->ex.imm_data;
679 switch (wr->opcode) {
680 case IB_WR_RDMA_WRITE_WITH_IMM:
681 wr->ex.imm_data = ibwr->ex.imm_data;
682 case IB_WR_RDMA_READ:
683 case IB_WR_RDMA_WRITE:
684 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
685 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
687 case IB_WR_SEND_WITH_IMM:
688 wr->ex.imm_data = ibwr->ex.imm_data;
690 case IB_WR_SEND_WITH_INV:
691 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
693 case IB_WR_ATOMIC_CMP_AND_SWP:
694 case IB_WR_ATOMIC_FETCH_AND_ADD:
695 wr->wr.atomic.remote_addr =
696 atomic_wr(ibwr)->remote_addr;
697 wr->wr.atomic.compare_add =
698 atomic_wr(ibwr)->compare_add;
699 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
700 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
702 case IB_WR_LOCAL_INV:
703 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
706 wr->wr.reg.mr = reg_wr(ibwr)->mr;
707 wr->wr.reg.key = reg_wr(ibwr)->key;
708 wr->wr.reg.access = reg_wr(ibwr)->access;
716 static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
717 unsigned int mask, unsigned int length,
718 struct rxe_send_wqe *wqe)
720 int num_sge = ibwr->num_sge;
725 init_send_wr(qp, &wqe->wr, ibwr);
727 if (qp_type(qp) == IB_QPT_UD ||
728 qp_type(qp) == IB_QPT_SMI ||
729 qp_type(qp) == IB_QPT_GSI)
730 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
732 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
733 p = wqe->dma.inline_data;
736 for (i = 0; i < num_sge; i++, sge++) {
737 if (qp->is_user && copy_from_user(p, (__user void *)
738 (uintptr_t)sge->addr, sge->length))
741 else if (!qp->is_user)
742 memcpy(p, (void *)(uintptr_t)sge->addr,
747 } else if (mask & WR_REG_MASK) {
749 wqe->state = wqe_state_posted;
752 memcpy(wqe->dma.sge, ibwr->sg_list,
753 num_sge * sizeof(struct ib_sge));
755 wqe->iova = (mask & WR_ATOMIC_MASK) ?
756 atomic_wr(ibwr)->remote_addr :
757 rdma_wr(ibwr)->remote_addr;
759 wqe->dma.length = length;
760 wqe->dma.resid = length;
761 wqe->dma.num_sge = num_sge;
762 wqe->dma.cur_sge = 0;
763 wqe->dma.sge_offset = 0;
764 wqe->state = wqe_state_posted;
765 wqe->ssn = atomic_add_return(1, &qp->ssn);
770 static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
771 unsigned int mask, u32 length)
774 struct rxe_sq *sq = &qp->sq;
775 struct rxe_send_wqe *send_wqe;
778 err = validate_send_wr(qp, ibwr, mask, length);
782 spin_lock_irqsave(&qp->sq.sq_lock, flags);
784 if (unlikely(queue_full(sq->queue))) {
789 send_wqe = producer_addr(sq->queue);
791 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
796 * make sure all changes to the work queue are
797 * written before we update the producer pointer
801 advance_producer(sq->queue);
802 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
807 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
811 static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
812 struct ib_send_wr **bad_wr)
816 unsigned int length = 0;
821 mask = wr_opcode_mask(wr->opcode, qp);
822 if (unlikely(!mask)) {
828 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
829 !(mask & WR_INLINE_MASK))) {
836 for (i = 0; i < wr->num_sge; i++)
837 length += wr->sg_list[i].length;
839 err = post_one_send(qp, wr, mask, length);
849 * Must sched in case of GSI QP because ib_send_mad() hold irq lock,
850 * and the requester call ip_local_out_sk() that takes spin_lock_bh.
852 must_sched = (qp_type(qp) == IB_QPT_GSI) ||
853 (queue_count(qp->sq.queue) > 1);
855 rxe_run_task(&qp->req.task, must_sched);
860 static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
861 struct ib_send_wr **bad_wr)
863 struct rxe_qp *qp = to_rqp(ibqp);
865 if (unlikely(!qp->valid)) {
870 if (unlikely(qp->req.state < QP_STATE_READY)) {
876 /* Utilize process context to do protocol processing */
877 rxe_run_task(&qp->req.task, 0);
880 return rxe_post_send_kernel(qp, wr, bad_wr);
883 static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
884 struct ib_recv_wr **bad_wr)
887 struct rxe_qp *qp = to_rqp(ibqp);
888 struct rxe_rq *rq = &qp->rq;
891 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
897 if (unlikely(qp->srq)) {
903 spin_lock_irqsave(&rq->producer_lock, flags);
906 err = post_one_recv(rq, wr);
914 spin_unlock_irqrestore(&rq->producer_lock, flags);
920 static struct ib_cq *rxe_create_cq(struct ib_device *dev,
921 const struct ib_cq_init_attr *attr,
922 struct ib_ucontext *context,
923 struct ib_udata *udata)
926 struct rxe_dev *rxe = to_rdev(dev);
930 return ERR_PTR(-EINVAL);
932 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector, udata);
936 cq = rxe_alloc(&rxe->cq_pool);
942 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
955 static int rxe_destroy_cq(struct ib_cq *ibcq)
957 struct rxe_cq *cq = to_rcq(ibcq);
963 static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
966 struct rxe_cq *cq = to_rcq(ibcq);
967 struct rxe_dev *rxe = to_rdev(ibcq->device);
969 err = rxe_cq_chk_attr(rxe, cq, cqe, 0, udata);
973 err = rxe_cq_resize_queue(cq, cqe, udata);
983 static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
986 struct rxe_cq *cq = to_rcq(ibcq);
990 spin_lock_irqsave(&cq->cq_lock, flags);
991 for (i = 0; i < num_entries; i++) {
992 cqe = queue_head(cq->queue);
996 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
997 advance_consumer(cq->queue);
999 spin_unlock_irqrestore(&cq->cq_lock, flags);
1004 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
1006 struct rxe_cq *cq = to_rcq(ibcq);
1007 int count = queue_count(cq->queue);
1009 return (count > wc_cnt) ? wc_cnt : count;
1012 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1014 struct rxe_cq *cq = to_rcq(ibcq);
1015 unsigned long irq_flags;
1018 spin_lock_irqsave(&cq->cq_lock, irq_flags);
1019 if (cq->notify != IB_CQ_NEXT_COMP)
1020 cq->notify = flags & IB_CQ_SOLICITED_MASK;
1022 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
1025 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
1030 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
1032 struct rxe_dev *rxe = to_rdev(ibpd->device);
1033 struct rxe_pd *pd = to_rpd(ibpd);
1037 mr = rxe_alloc(&rxe->mr_pool);
1047 err = rxe_mem_init_dma(rxe, pd, access, mr);
1058 return ERR_PTR(err);
1061 static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
1065 int access, struct ib_udata *udata)
1068 struct rxe_dev *rxe = to_rdev(ibpd->device);
1069 struct rxe_pd *pd = to_rpd(ibpd);
1072 mr = rxe_alloc(&rxe->mr_pool);
1082 err = rxe_mem_init_user(rxe, pd, start, length, iova,
1094 return ERR_PTR(err);
1097 static int rxe_dereg_mr(struct ib_mr *ibmr)
1099 struct rxe_mem *mr = to_rmr(ibmr);
1101 mr->state = RXE_MEM_STATE_ZOMBIE;
1102 rxe_drop_ref(mr->pd);
1108 static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1109 enum ib_mr_type mr_type,
1112 struct rxe_dev *rxe = to_rdev(ibpd->device);
1113 struct rxe_pd *pd = to_rpd(ibpd);
1117 if (mr_type != IB_MR_TYPE_MEM_REG)
1118 return ERR_PTR(-EINVAL);
1120 mr = rxe_alloc(&rxe->mr_pool);
1130 err = rxe_mem_init_fast(rxe, pd, max_num_sg, mr);
1141 return ERR_PTR(err);
1144 static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1146 struct rxe_mem *mr = to_rmr(ibmr);
1147 struct rxe_map *map;
1148 struct rxe_phys_buf *buf;
1150 if (unlikely(mr->nbuf == mr->num_buf))
1153 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1154 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1157 buf->size = ibmr->page_size;
1163 static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1164 int sg_nents, unsigned int *sg_offset)
1166 struct rxe_mem *mr = to_rmr(ibmr);
1171 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1173 mr->va = ibmr->iova;
1174 mr->iova = ibmr->iova;
1175 mr->length = ibmr->length;
1176 mr->page_shift = ilog2(ibmr->page_size);
1177 mr->page_mask = ibmr->page_size - 1;
1178 mr->offset = mr->iova & mr->page_mask;
1183 static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1186 struct rxe_dev *rxe = to_rdev(ibqp->device);
1187 struct rxe_qp *qp = to_rqp(ibqp);
1188 struct rxe_mc_grp *grp;
1190 /* takes a ref on grp if successful */
1191 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1195 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1201 static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1203 struct rxe_dev *rxe = to_rdev(ibqp->device);
1204 struct rxe_qp *qp = to_rqp(ibqp);
1206 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1209 static ssize_t rxe_show_parent(struct device *device,
1210 struct device_attribute *attr, char *buf)
1212 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1215 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
1218 static DEVICE_ATTR(parent, S_IRUGO, rxe_show_parent, NULL);
1220 static struct device_attribute *rxe_dev_attributes[] = {
1224 int rxe_register_device(struct rxe_dev *rxe)
1228 struct ib_device *dev = &rxe->ib_dev;
1230 strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
1231 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1233 dev->owner = THIS_MODULE;
1234 dev->node_type = RDMA_NODE_IB_CA;
1235 dev->phys_port_cnt = 1;
1236 dev->num_comp_vectors = RXE_NUM_COMP_VECTORS;
1237 dev->dma_device = rxe_dma_device(rxe);
1238 dev->local_dma_lkey = 0;
1239 dev->node_guid = rxe_node_guid(rxe);
1240 dev->dma_ops = &rxe_dma_mapping_ops;
1242 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1243 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1244 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1245 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1246 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1247 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1248 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1249 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1250 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1251 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1252 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1253 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1254 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1255 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1256 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1257 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1258 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1259 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1260 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1261 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1262 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1263 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1264 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1265 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1266 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1267 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1268 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1269 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1270 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1271 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1272 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1273 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1276 dev->query_device = rxe_query_device;
1277 dev->modify_device = rxe_modify_device;
1278 dev->query_port = rxe_query_port;
1279 dev->modify_port = rxe_modify_port;
1280 dev->get_link_layer = rxe_get_link_layer;
1281 dev->query_gid = rxe_query_gid;
1282 dev->get_netdev = rxe_get_netdev;
1283 dev->add_gid = rxe_add_gid;
1284 dev->del_gid = rxe_del_gid;
1285 dev->query_pkey = rxe_query_pkey;
1286 dev->alloc_ucontext = rxe_alloc_ucontext;
1287 dev->dealloc_ucontext = rxe_dealloc_ucontext;
1288 dev->mmap = rxe_mmap;
1289 dev->get_port_immutable = rxe_port_immutable;
1290 dev->alloc_pd = rxe_alloc_pd;
1291 dev->dealloc_pd = rxe_dealloc_pd;
1292 dev->create_ah = rxe_create_ah;
1293 dev->modify_ah = rxe_modify_ah;
1294 dev->query_ah = rxe_query_ah;
1295 dev->destroy_ah = rxe_destroy_ah;
1296 dev->create_srq = rxe_create_srq;
1297 dev->modify_srq = rxe_modify_srq;
1298 dev->query_srq = rxe_query_srq;
1299 dev->destroy_srq = rxe_destroy_srq;
1300 dev->post_srq_recv = rxe_post_srq_recv;
1301 dev->create_qp = rxe_create_qp;
1302 dev->modify_qp = rxe_modify_qp;
1303 dev->query_qp = rxe_query_qp;
1304 dev->destroy_qp = rxe_destroy_qp;
1305 dev->post_send = rxe_post_send;
1306 dev->post_recv = rxe_post_recv;
1307 dev->create_cq = rxe_create_cq;
1308 dev->destroy_cq = rxe_destroy_cq;
1309 dev->resize_cq = rxe_resize_cq;
1310 dev->poll_cq = rxe_poll_cq;
1311 dev->peek_cq = rxe_peek_cq;
1312 dev->req_notify_cq = rxe_req_notify_cq;
1313 dev->get_dma_mr = rxe_get_dma_mr;
1314 dev->reg_user_mr = rxe_reg_user_mr;
1315 dev->dereg_mr = rxe_dereg_mr;
1316 dev->alloc_mr = rxe_alloc_mr;
1317 dev->map_mr_sg = rxe_map_mr_sg;
1318 dev->attach_mcast = rxe_attach_mcast;
1319 dev->detach_mcast = rxe_detach_mcast;
1321 err = ib_register_device(dev, NULL);
1323 pr_warn("rxe_register_device failed, err = %d\n", err);
1327 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
1328 err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
1330 pr_warn("device_create_file failed, i = %d, err = %d\n",
1339 ib_unregister_device(dev);
1344 int rxe_unregister_device(struct rxe_dev *rxe)
1347 struct ib_device *dev = &rxe->ib_dev;
1349 for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
1350 device_remove_file(&dev->dev, rxe_dev_attributes[i]);
1352 ib_unregister_device(dev);