2 * Copyright(c) 2016, 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
59 static void rvt_rc_timeout(unsigned long arg);
62 * Convert the AETH RNR timeout code into the number of microseconds.
64 static const u32 ib_rvt_rnr_table[32] = {
65 655360, /* 00: 655.36 */
85 10240, /* 14: 10.24 */
86 15360, /* 15: 15.36 */
87 20480, /* 16: 20.48 */
88 30720, /* 17: 30.72 */
89 40960, /* 18: 40.96 */
90 61440, /* 19: 61.44 */
91 81920, /* 1A: 81.92 */
92 122880, /* 1B: 122.88 */
93 163840, /* 1C: 163.84 */
94 245760, /* 1D: 245.76 */
95 327680, /* 1E: 327.68 */
96 491520 /* 1F: 491.52 */
100 * Note that it is OK to post send work requests in the SQE and ERR
101 * states; rvt_do_send() will process them and generate error
102 * completions as per IB 1.2 C10-96.
104 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
106 [IB_QPS_INIT] = RVT_POST_RECV_OK,
107 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
108 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
109 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
110 RVT_PROCESS_NEXT_SEND_OK,
111 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
112 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
113 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
114 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
115 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
116 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
118 EXPORT_SYMBOL(ib_rvt_state_ops);
120 static void get_map_page(struct rvt_qpn_table *qpt,
121 struct rvt_qpn_map *map)
123 unsigned long page = get_zeroed_page(GFP_KERNEL);
126 * Free the page if someone raced with us installing it.
129 spin_lock(&qpt->lock);
133 map->page = (void *)page;
134 spin_unlock(&qpt->lock);
138 * init_qpn_table - initialize the QP number table for a device
139 * @qpt: the QPN table
141 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
144 struct rvt_qpn_map *map;
147 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
150 spin_lock_init(&qpt->lock);
152 qpt->last = rdi->dparms.qpn_start;
153 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
156 * Drivers may want some QPs beyond what we need for verbs let them use
157 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
158 * for those. The reserved range must be *after* the range which verbs
162 /* Figure out number of bit maps needed before reserved range */
163 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
165 /* This should always be zero */
166 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
168 /* Starting with the first reserved bit map */
169 map = &qpt->map[qpt->nmaps];
171 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
172 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
173 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
175 get_map_page(qpt, map);
181 set_bit(offset, map->page);
183 if (offset == RVT_BITS_PER_PAGE) {
194 * free_qpn_table - free the QP number table for a device
195 * @qpt: the QPN table
197 static void free_qpn_table(struct rvt_qpn_table *qpt)
201 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
202 free_page((unsigned long)qpt->map[i].page);
206 * rvt_driver_qp_init - Init driver qp resources
207 * @rdi: rvt dev strucutre
209 * Return: 0 on success
211 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
216 if (!rdi->dparms.qp_table_size)
220 * If driver is not doing any QP allocation then make sure it is
221 * providing the necessary QP functions.
223 if (!rdi->driver_f.free_all_qps ||
224 !rdi->driver_f.qp_priv_alloc ||
225 !rdi->driver_f.qp_priv_free ||
226 !rdi->driver_f.notify_qp_reset ||
227 !rdi->driver_f.notify_restart_rc)
230 /* allocate parent object */
231 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
236 /* allocate hash table */
237 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
238 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
239 rdi->qp_dev->qp_table =
240 kmalloc_node(rdi->qp_dev->qp_table_size *
241 sizeof(*rdi->qp_dev->qp_table),
242 GFP_KERNEL, rdi->dparms.node);
243 if (!rdi->qp_dev->qp_table)
246 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
247 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
249 spin_lock_init(&rdi->qp_dev->qpt_lock);
251 /* initialize qpn map */
252 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
255 spin_lock_init(&rdi->n_qps_lock);
260 kfree(rdi->qp_dev->qp_table);
261 free_qpn_table(&rdi->qp_dev->qpn_table);
270 * free_all_qps - check for QPs still in use
271 * @qpt: the QP table to empty
273 * There should not be any QPs still in use.
274 * Free memory for table.
276 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
280 unsigned n, qp_inuse = 0;
281 spinlock_t *ql; /* work around too long line below */
283 if (rdi->driver_f.free_all_qps)
284 qp_inuse = rdi->driver_f.free_all_qps(rdi);
286 qp_inuse += rvt_mcast_tree_empty(rdi);
291 ql = &rdi->qp_dev->qpt_lock;
292 spin_lock_irqsave(ql, flags);
293 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
294 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
295 lockdep_is_held(ql));
296 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
298 for (; qp; qp = rcu_dereference_protected(qp->next,
299 lockdep_is_held(ql)))
302 spin_unlock_irqrestore(ql, flags);
308 * rvt_qp_exit - clean up qps on device exit
309 * @rdi: rvt dev structure
311 * Check for qp leaks and free resources.
313 void rvt_qp_exit(struct rvt_dev_info *rdi)
315 u32 qps_inuse = rvt_free_all_qps(rdi);
318 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
323 kfree(rdi->qp_dev->qp_table);
324 free_qpn_table(&rdi->qp_dev->qpn_table);
328 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
329 struct rvt_qpn_map *map, unsigned off)
331 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
335 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
336 * IB_QPT_SMI/IB_QPT_GSI
337 *@rdi: rvt device info structure
338 *@qpt: queue pair number table pointer
339 *@port_num: IB port number, 1 based, comes from core
341 * Return: The queue pair number
343 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
344 enum ib_qp_type type, u8 port_num)
346 u32 i, offset, max_scan, qpn;
347 struct rvt_qpn_map *map;
350 if (rdi->driver_f.alloc_qpn)
351 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
353 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
356 ret = type == IB_QPT_GSI;
357 n = 1 << (ret + 2 * (port_num - 1));
358 spin_lock(&qpt->lock);
363 spin_unlock(&qpt->lock);
367 qpn = qpt->last + qpt->incr;
368 if (qpn >= RVT_QPN_MAX)
369 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
370 /* offset carries bit 0 */
371 offset = qpn & RVT_BITS_PER_PAGE_MASK;
372 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
373 max_scan = qpt->nmaps - !offset;
375 if (unlikely(!map->page)) {
376 get_map_page(qpt, map);
377 if (unlikely(!map->page))
381 if (!test_and_set_bit(offset, map->page)) {
388 * This qpn might be bogus if offset >= BITS_PER_PAGE.
389 * That is OK. It gets re-assigned below
391 qpn = mk_qpn(qpt, map, offset);
392 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
394 * In order to keep the number of pages allocated to a
395 * minimum, we scan the all existing pages before increasing
396 * the size of the bitmap table.
398 if (++i > max_scan) {
399 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
401 map = &qpt->map[qpt->nmaps++];
402 /* start at incr with current bit 0 */
403 offset = qpt->incr | (offset & 1);
404 } else if (map < &qpt->map[qpt->nmaps]) {
406 /* start at incr with current bit 0 */
407 offset = qpt->incr | (offset & 1);
410 /* wrap to first map page, invert bit 0 */
411 offset = qpt->incr | ((offset & 1) ^ 1);
413 /* there can be no set bits in low-order QoS bits */
414 WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
415 qpn = mk_qpn(qpt, map, offset);
424 static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
426 struct rvt_qpn_map *map;
428 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
430 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
434 * rvt_clear_mr_refs - Drop help mr refs
435 * @qp: rvt qp data structure
436 * @clr_sends: If shoudl clear send side or not
438 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
441 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
443 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
444 rvt_put_ss(&qp->s_rdma_read_sge);
446 rvt_put_ss(&qp->r_sge);
449 while (qp->s_last != qp->s_head) {
450 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
453 for (i = 0; i < wqe->wr.num_sge; i++) {
454 struct rvt_sge *sge = &wqe->sg_list[i];
458 if (qp->ibqp.qp_type == IB_QPT_UD ||
459 qp->ibqp.qp_type == IB_QPT_SMI ||
460 qp->ibqp.qp_type == IB_QPT_GSI)
461 atomic_dec(&ibah_to_rvtah(
462 wqe->ud_wr.ah)->refcount);
463 if (++qp->s_last >= qp->s_size)
465 smp_wmb(); /* see qp_set_savail */
468 rvt_put_mr(qp->s_rdma_mr);
469 qp->s_rdma_mr = NULL;
473 if (qp->ibqp.qp_type != IB_QPT_RC)
476 for (n = 0; n < rvt_max_atomic(rdi); n++) {
477 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
479 if (e->rdma_sge.mr) {
480 rvt_put_mr(e->rdma_sge.mr);
481 e->rdma_sge.mr = NULL;
487 * rvt_remove_qp - remove qp form table
488 * @rdi: rvt dev struct
491 * Remove the QP from the table so it can't be found asynchronously by
492 * the receive routine.
494 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
496 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
497 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
501 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
503 if (rcu_dereference_protected(rvp->qp[0],
504 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
505 RCU_INIT_POINTER(rvp->qp[0], NULL);
506 } else if (rcu_dereference_protected(rvp->qp[1],
507 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
508 RCU_INIT_POINTER(rvp->qp[1], NULL);
511 struct rvt_qp __rcu **qpp;
514 qpp = &rdi->qp_dev->qp_table[n];
515 for (; (q = rcu_dereference_protected(*qpp,
516 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
519 RCU_INIT_POINTER(*qpp,
520 rcu_dereference_protected(qp->next,
521 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
523 trace_rvt_qpremove(qp, n);
529 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
537 * rvt_init_qp - initialize the QP state to the reset state
538 * @qp: the QP to init or reinit
541 * This function is called from both rvt_create_qp() and
542 * rvt_reset_qp(). The difference is that the reset
543 * patch the necessary locks to protect against concurent
546 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
547 enum ib_qp_type type)
551 qp->qp_access_flags = 0;
552 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
558 qp->s_sending_psn = 0;
559 qp->s_sending_hpsn = 0;
563 if (type == IB_QPT_RC) {
564 qp->s_state = IB_OPCODE_RC_SEND_LAST;
565 qp->r_state = IB_OPCODE_RC_SEND_LAST;
567 qp->s_state = IB_OPCODE_UC_SEND_LAST;
568 qp->r_state = IB_OPCODE_UC_SEND_LAST;
570 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
581 qp->s_mig_state = IB_MIG_MIGRATED;
582 qp->r_head_ack_queue = 0;
583 qp->s_tail_ack_queue = 0;
584 qp->s_num_rd_atomic = 0;
586 qp->r_rq.wq->head = 0;
587 qp->r_rq.wq->tail = 0;
589 qp->r_sge.num_sge = 0;
590 atomic_set(&qp->s_reserved_used, 0);
594 * rvt_reset_qp - initialize the QP state to the reset state
595 * @qp: the QP to reset
598 * r_lock, s_hlock, and s_lock are required to be held by the caller
600 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
601 enum ib_qp_type type)
602 __must_hold(&qp->s_lock)
603 __must_hold(&qp->s_hlock)
604 __must_hold(&qp->r_lock)
606 lockdep_assert_held(&qp->r_lock);
607 lockdep_assert_held(&qp->s_hlock);
608 lockdep_assert_held(&qp->s_lock);
609 if (qp->state != IB_QPS_RESET) {
610 qp->state = IB_QPS_RESET;
612 /* Let drivers flush their waitlist */
613 rdi->driver_f.flush_qp_waiters(qp);
614 rvt_stop_rc_timers(qp);
615 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
616 spin_unlock(&qp->s_lock);
617 spin_unlock(&qp->s_hlock);
618 spin_unlock_irq(&qp->r_lock);
620 /* Stop the send queue and the retry timer */
621 rdi->driver_f.stop_send_queue(qp);
622 rvt_del_timers_sync(qp);
623 /* Wait for things to stop */
624 rdi->driver_f.quiesce_qp(qp);
626 /* take qp out the hash and wait for it to be unused */
627 rvt_remove_qp(rdi, qp);
628 wait_event(qp->wait, !atomic_read(&qp->refcount));
630 /* grab the lock b/c it was locked at call time */
631 spin_lock_irq(&qp->r_lock);
632 spin_lock(&qp->s_hlock);
633 spin_lock(&qp->s_lock);
635 rvt_clear_mr_refs(qp, 1);
637 * Let the driver do any tear down or re-init it needs to for
638 * a qp that has been reset
640 rdi->driver_f.notify_qp_reset(qp);
642 rvt_init_qp(rdi, qp, type);
643 lockdep_assert_held(&qp->r_lock);
644 lockdep_assert_held(&qp->s_hlock);
645 lockdep_assert_held(&qp->s_lock);
649 * rvt_create_qp - create a queue pair for a device
650 * @ibpd: the protection domain who's device we create the queue pair for
651 * @init_attr: the attributes of the queue pair
652 * @udata: user data for libibverbs.so
654 * Queue pair creation is mostly an rvt issue. However, drivers have their own
655 * unique idea of what queue pair numbers mean. For instance there is a reserved
658 * Return: the queue pair on success, otherwise returns an errno.
660 * Called by the ib_create_qp() core verbs function.
662 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
663 struct ib_qp_init_attr *init_attr,
664 struct ib_udata *udata)
668 struct rvt_swqe *swq = NULL;
671 struct ib_qp *ret = ERR_PTR(-ENOMEM);
672 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
677 return ERR_PTR(-EINVAL);
679 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
680 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
681 init_attr->create_flags)
682 return ERR_PTR(-EINVAL);
684 /* Check receive queue parameters if no SRQ is specified. */
685 if (!init_attr->srq) {
686 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
687 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
688 return ERR_PTR(-EINVAL);
690 if (init_attr->cap.max_send_sge +
691 init_attr->cap.max_send_wr +
692 init_attr->cap.max_recv_sge +
693 init_attr->cap.max_recv_wr == 0)
694 return ERR_PTR(-EINVAL);
697 init_attr->cap.max_send_wr + 1 +
698 rdi->dparms.reserved_operations;
699 switch (init_attr->qp_type) {
702 if (init_attr->port_num == 0 ||
703 init_attr->port_num > ibpd->device->phys_port_cnt)
704 return ERR_PTR(-EINVAL);
708 sz = sizeof(struct rvt_sge) *
709 init_attr->cap.max_send_sge +
710 sizeof(struct rvt_swqe);
711 swq = vzalloc_node(sqsize * sz, rdi->dparms.node);
713 return ERR_PTR(-ENOMEM);
717 if (init_attr->srq) {
718 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
720 if (srq->rq.max_sge > 1)
721 sg_list_sz = sizeof(*qp->r_sg_list) *
722 (srq->rq.max_sge - 1);
723 } else if (init_attr->cap.max_recv_sge > 1)
724 sg_list_sz = sizeof(*qp->r_sg_list) *
725 (init_attr->cap.max_recv_sge - 1);
726 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
731 RCU_INIT_POINTER(qp->next, NULL);
732 if (init_attr->qp_type == IB_QPT_RC) {
735 sizeof(*qp->s_ack_queue) *
739 if (!qp->s_ack_queue)
742 /* initialize timers needed for rc qp */
743 setup_timer(&qp->s_timer, rvt_rc_timeout, (unsigned long)qp);
744 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
746 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
749 * Driver needs to set up it's private QP structure and do any
750 * initialization that is needed.
752 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
758 qp->timeout_jiffies =
759 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
761 if (init_attr->srq) {
764 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
765 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
766 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
767 sizeof(struct rvt_rwqe);
769 qp->r_rq.wq = vmalloc_user(
770 sizeof(struct rvt_rwq) +
773 qp->r_rq.wq = vzalloc_node(
774 sizeof(struct rvt_rwq) +
778 goto bail_driver_priv;
782 * ib_create_qp() will initialize qp->ibqp
783 * except for qp->ibqp.qp_num.
785 spin_lock_init(&qp->r_lock);
786 spin_lock_init(&qp->s_hlock);
787 spin_lock_init(&qp->s_lock);
788 spin_lock_init(&qp->r_rq.lock);
789 atomic_set(&qp->refcount, 0);
790 atomic_set(&qp->local_ops_pending, 0);
791 init_waitqueue_head(&qp->wait);
792 init_timer(&qp->s_timer);
793 qp->s_timer.data = (unsigned long)qp;
794 INIT_LIST_HEAD(&qp->rspwait);
795 qp->state = IB_QPS_RESET;
798 qp->s_avail = init_attr->cap.max_send_wr;
799 qp->s_max_sge = init_attr->cap.max_send_sge;
800 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
801 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
803 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
805 init_attr->port_num);
810 qp->ibqp.qp_num = err;
811 qp->port_num = init_attr->port_num;
812 rvt_init_qp(rdi, qp, init_attr->qp_type);
816 /* Don't support raw QPs */
817 return ERR_PTR(-EINVAL);
820 init_attr->cap.max_inline_data = 0;
823 * Return the address of the RWQ as the offset to mmap.
824 * See rvt_mmap() for details.
826 if (udata && udata->outlen >= sizeof(__u64)) {
830 err = ib_copy_to_udata(udata, &offset,
837 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
839 qp->ip = rvt_create_mmap_info(rdi, s,
840 ibpd->uobject->context,
843 ret = ERR_PTR(-ENOMEM);
847 err = ib_copy_to_udata(udata, &qp->ip->offset,
848 sizeof(qp->ip->offset));
854 qp->pid = current->pid;
857 spin_lock(&rdi->n_qps_lock);
858 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
859 spin_unlock(&rdi->n_qps_lock);
860 ret = ERR_PTR(-ENOMEM);
864 rdi->n_qps_allocated++;
866 * Maintain a busy_jiffies variable that will be added to the timeout
867 * period in mod_retry_timer and add_retry_timer. This busy jiffies
868 * is scaled by the number of rc qps created for the device to reduce
869 * the number of timeouts occurring when there is a large number of
870 * qps. busy_jiffies is incremented every rc qp scaling interval.
871 * The scaling interval is selected based on extensive performance
872 * evaluation of targeted workloads.
874 if (init_attr->qp_type == IB_QPT_RC) {
876 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
878 spin_unlock(&rdi->n_qps_lock);
881 spin_lock_irq(&rdi->pending_lock);
882 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
883 spin_unlock_irq(&rdi->pending_lock);
889 * We have our QP and its good, now keep track of what types of opcodes
890 * can be processed on this QP. We do this by keeping track of what the
891 * 3 high order bits of the opcode are.
893 switch (init_attr->qp_type) {
897 qp->allowed_ops = IB_OPCODE_UD;
900 qp->allowed_ops = IB_OPCODE_RC;
903 qp->allowed_ops = IB_OPCODE_UC;
906 ret = ERR_PTR(-EINVAL);
914 kref_put(&qp->ip->ref, rvt_release_mmap_info);
917 free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
924 rdi->driver_f.qp_priv_free(rdi, qp);
927 kfree(qp->s_ack_queue);
937 * rvt_error_qp - put a QP into the error state
938 * @qp: the QP to put into the error state
939 * @err: the receive completion error to signal if a RWQE is active
941 * Flushes both send and receive work queues.
943 * Return: true if last WQE event should be generated.
944 * The QP r_lock and s_lock should be held and interrupts disabled.
945 * If we are already in error state, just return.
947 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
951 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
953 lockdep_assert_held(&qp->r_lock);
954 lockdep_assert_held(&qp->s_lock);
955 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
958 qp->state = IB_QPS_ERR;
960 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
961 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
962 del_timer(&qp->s_timer);
965 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
966 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
968 rdi->driver_f.notify_error_qp(qp);
970 /* Schedule the sending tasklet to drain the send work queue. */
971 if (ACCESS_ONCE(qp->s_last) != qp->s_head)
972 rdi->driver_f.schedule_send(qp);
974 rvt_clear_mr_refs(qp, 0);
976 memset(&wc, 0, sizeof(wc));
978 wc.opcode = IB_WC_RECV;
980 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
981 wc.wr_id = qp->r_wr_id;
983 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
985 wc.status = IB_WC_WR_FLUSH_ERR;
992 spin_lock(&qp->r_rq.lock);
994 /* sanity check pointers before trusting them */
997 if (head >= qp->r_rq.size)
1000 if (tail >= qp->r_rq.size)
1002 while (tail != head) {
1003 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1004 if (++tail >= qp->r_rq.size)
1006 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1010 spin_unlock(&qp->r_rq.lock);
1011 } else if (qp->ibqp.event_handler) {
1018 EXPORT_SYMBOL(rvt_error_qp);
1021 * Put the QP into the hash table.
1022 * The hash table holds a reference to the QP.
1024 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1026 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1027 unsigned long flags;
1030 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1032 if (qp->ibqp.qp_num <= 1) {
1033 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1035 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1037 qp->next = rdi->qp_dev->qp_table[n];
1038 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1039 trace_rvt_qpinsert(qp, n);
1042 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1046 * rvt_modify_qp - modify the attributes of a queue pair
1047 * @ibqp: the queue pair who's attributes we're modifying
1048 * @attr: the new attributes
1049 * @attr_mask: the mask of attributes to modify
1050 * @udata: user data for libibverbs.so
1052 * Return: 0 on success, otherwise returns an errno.
1054 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1055 int attr_mask, struct ib_udata *udata)
1057 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1058 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1059 enum ib_qp_state cur_state, new_state;
1063 int pmtu = 0; /* for gcc warning only */
1064 enum rdma_link_layer link;
1066 link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
1068 spin_lock_irq(&qp->r_lock);
1069 spin_lock(&qp->s_hlock);
1070 spin_lock(&qp->s_lock);
1072 cur_state = attr_mask & IB_QP_CUR_STATE ?
1073 attr->cur_qp_state : qp->state;
1074 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1076 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1080 if (rdi->driver_f.check_modify_qp &&
1081 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1084 if (attr_mask & IB_QP_AV) {
1085 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1086 be16_to_cpu(IB_MULTICAST_LID_BASE))
1088 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1092 if (attr_mask & IB_QP_ALT_PATH) {
1093 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1094 be16_to_cpu(IB_MULTICAST_LID_BASE))
1096 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1098 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1102 if (attr_mask & IB_QP_PKEY_INDEX)
1103 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1106 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1107 if (attr->min_rnr_timer > 31)
1110 if (attr_mask & IB_QP_PORT)
1111 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1112 qp->ibqp.qp_type == IB_QPT_GSI ||
1113 attr->port_num == 0 ||
1114 attr->port_num > ibqp->device->phys_port_cnt)
1117 if (attr_mask & IB_QP_DEST_QPN)
1118 if (attr->dest_qp_num > RVT_QPN_MASK)
1121 if (attr_mask & IB_QP_RETRY_CNT)
1122 if (attr->retry_cnt > 7)
1125 if (attr_mask & IB_QP_RNR_RETRY)
1126 if (attr->rnr_retry > 7)
1130 * Don't allow invalid path_mtu values. OK to set greater
1131 * than the active mtu (or even the max_cap, if we have tuned
1132 * that to a small mtu. We'll set qp->path_mtu
1133 * to the lesser of requested attribute mtu and active,
1134 * for packetizing messages.
1135 * Note that the QP port has to be set in INIT and MTU in RTR.
1137 if (attr_mask & IB_QP_PATH_MTU) {
1138 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1143 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1144 if (attr->path_mig_state == IB_MIG_REARM) {
1145 if (qp->s_mig_state == IB_MIG_ARMED)
1147 if (new_state != IB_QPS_RTS)
1149 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1150 if (qp->s_mig_state == IB_MIG_REARM)
1152 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1154 if (qp->s_mig_state == IB_MIG_ARMED)
1161 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1162 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1165 switch (new_state) {
1167 if (qp->state != IB_QPS_RESET)
1168 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1172 /* Allow event to re-trigger if QP set to RTR more than once */
1173 qp->r_flags &= ~RVT_R_COMM_EST;
1174 qp->state = new_state;
1178 qp->s_draining = qp->s_last != qp->s_cur;
1179 qp->state = new_state;
1183 if (qp->ibqp.qp_type == IB_QPT_RC)
1185 qp->state = new_state;
1189 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1193 qp->state = new_state;
1197 if (attr_mask & IB_QP_PKEY_INDEX)
1198 qp->s_pkey_index = attr->pkey_index;
1200 if (attr_mask & IB_QP_PORT)
1201 qp->port_num = attr->port_num;
1203 if (attr_mask & IB_QP_DEST_QPN)
1204 qp->remote_qpn = attr->dest_qp_num;
1206 if (attr_mask & IB_QP_SQ_PSN) {
1207 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1208 qp->s_psn = qp->s_next_psn;
1209 qp->s_sending_psn = qp->s_next_psn;
1210 qp->s_last_psn = qp->s_next_psn - 1;
1211 qp->s_sending_hpsn = qp->s_last_psn;
1214 if (attr_mask & IB_QP_RQ_PSN)
1215 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1217 if (attr_mask & IB_QP_ACCESS_FLAGS)
1218 qp->qp_access_flags = attr->qp_access_flags;
1220 if (attr_mask & IB_QP_AV) {
1221 qp->remote_ah_attr = attr->ah_attr;
1222 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1223 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1226 if (attr_mask & IB_QP_ALT_PATH) {
1227 qp->alt_ah_attr = attr->alt_ah_attr;
1228 qp->s_alt_pkey_index = attr->alt_pkey_index;
1231 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1232 qp->s_mig_state = attr->path_mig_state;
1234 qp->remote_ah_attr = qp->alt_ah_attr;
1235 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1236 qp->s_pkey_index = qp->s_alt_pkey_index;
1240 if (attr_mask & IB_QP_PATH_MTU) {
1241 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1242 qp->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1243 qp->log_pmtu = ilog2(qp->pmtu);
1246 if (attr_mask & IB_QP_RETRY_CNT) {
1247 qp->s_retry_cnt = attr->retry_cnt;
1248 qp->s_retry = attr->retry_cnt;
1251 if (attr_mask & IB_QP_RNR_RETRY) {
1252 qp->s_rnr_retry_cnt = attr->rnr_retry;
1253 qp->s_rnr_retry = attr->rnr_retry;
1256 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1257 qp->r_min_rnr_timer = attr->min_rnr_timer;
1259 if (attr_mask & IB_QP_TIMEOUT) {
1260 qp->timeout = attr->timeout;
1261 qp->timeout_jiffies =
1262 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1266 if (attr_mask & IB_QP_QKEY)
1267 qp->qkey = attr->qkey;
1269 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1270 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1272 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1273 qp->s_max_rd_atomic = attr->max_rd_atomic;
1275 if (rdi->driver_f.modify_qp)
1276 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1278 spin_unlock(&qp->s_lock);
1279 spin_unlock(&qp->s_hlock);
1280 spin_unlock_irq(&qp->r_lock);
1282 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1283 rvt_insert_qp(rdi, qp);
1286 ev.device = qp->ibqp.device;
1287 ev.element.qp = &qp->ibqp;
1288 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1289 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1292 ev.device = qp->ibqp.device;
1293 ev.element.qp = &qp->ibqp;
1294 ev.event = IB_EVENT_PATH_MIG;
1295 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1300 spin_unlock(&qp->s_lock);
1301 spin_unlock(&qp->s_hlock);
1302 spin_unlock_irq(&qp->r_lock);
1306 /** rvt_free_qpn - Free a qpn from the bit map
1308 * @qpn: queue pair number to free
1310 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
1312 struct rvt_qpn_map *map;
1314 map = qpt->map + qpn / RVT_BITS_PER_PAGE;
1316 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
1320 * rvt_destroy_qp - destroy a queue pair
1321 * @ibqp: the queue pair to destroy
1323 * Note that this can be called while the QP is actively sending or
1326 * Return: 0 on success.
1328 int rvt_destroy_qp(struct ib_qp *ibqp)
1330 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1331 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1333 spin_lock_irq(&qp->r_lock);
1334 spin_lock(&qp->s_hlock);
1335 spin_lock(&qp->s_lock);
1336 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1337 spin_unlock(&qp->s_lock);
1338 spin_unlock(&qp->s_hlock);
1339 spin_unlock_irq(&qp->r_lock);
1341 /* qpn is now available for use again */
1342 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1344 spin_lock(&rdi->n_qps_lock);
1345 rdi->n_qps_allocated--;
1346 if (qp->ibqp.qp_type == IB_QPT_RC) {
1348 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1350 spin_unlock(&rdi->n_qps_lock);
1353 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1357 rdi->driver_f.qp_priv_free(rdi, qp);
1358 kfree(qp->s_ack_queue);
1364 * rvt_query_qp - query an ipbq
1365 * @ibqp: IB qp to query
1366 * @attr: attr struct to fill in
1367 * @attr_mask: attr mask ignored
1368 * @init_attr: struct to fill in
1372 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1373 int attr_mask, struct ib_qp_init_attr *init_attr)
1375 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1376 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1378 attr->qp_state = qp->state;
1379 attr->cur_qp_state = attr->qp_state;
1380 attr->path_mtu = qp->path_mtu;
1381 attr->path_mig_state = qp->s_mig_state;
1382 attr->qkey = qp->qkey;
1383 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1384 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1385 attr->dest_qp_num = qp->remote_qpn;
1386 attr->qp_access_flags = qp->qp_access_flags;
1387 attr->cap.max_send_wr = qp->s_size - 1 -
1388 rdi->dparms.reserved_operations;
1389 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1390 attr->cap.max_send_sge = qp->s_max_sge;
1391 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1392 attr->cap.max_inline_data = 0;
1393 attr->ah_attr = qp->remote_ah_attr;
1394 attr->alt_ah_attr = qp->alt_ah_attr;
1395 attr->pkey_index = qp->s_pkey_index;
1396 attr->alt_pkey_index = qp->s_alt_pkey_index;
1397 attr->en_sqd_async_notify = 0;
1398 attr->sq_draining = qp->s_draining;
1399 attr->max_rd_atomic = qp->s_max_rd_atomic;
1400 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1401 attr->min_rnr_timer = qp->r_min_rnr_timer;
1402 attr->port_num = qp->port_num;
1403 attr->timeout = qp->timeout;
1404 attr->retry_cnt = qp->s_retry_cnt;
1405 attr->rnr_retry = qp->s_rnr_retry_cnt;
1406 attr->alt_port_num =
1407 rdma_ah_get_port_num(&qp->alt_ah_attr);
1408 attr->alt_timeout = qp->alt_timeout;
1410 init_attr->event_handler = qp->ibqp.event_handler;
1411 init_attr->qp_context = qp->ibqp.qp_context;
1412 init_attr->send_cq = qp->ibqp.send_cq;
1413 init_attr->recv_cq = qp->ibqp.recv_cq;
1414 init_attr->srq = qp->ibqp.srq;
1415 init_attr->cap = attr->cap;
1416 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1417 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1419 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1420 init_attr->qp_type = qp->ibqp.qp_type;
1421 init_attr->port_num = qp->port_num;
1426 * rvt_post_receive - post a receive on a QP
1427 * @ibqp: the QP to post the receive on
1428 * @wr: the WR to post
1429 * @bad_wr: the first bad WR is put here
1431 * This may be called from interrupt context.
1433 * Return: 0 on success otherwise errno
1435 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1436 struct ib_recv_wr **bad_wr)
1438 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1439 struct rvt_rwq *wq = qp->r_rq.wq;
1440 unsigned long flags;
1441 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1444 /* Check that state is OK to post receive. */
1445 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1450 for (; wr; wr = wr->next) {
1451 struct rvt_rwqe *wqe;
1455 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1460 spin_lock_irqsave(&qp->r_rq.lock, flags);
1461 next = wq->head + 1;
1462 if (next >= qp->r_rq.size)
1464 if (next == wq->tail) {
1465 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1469 if (unlikely(qp_err_flush)) {
1472 memset(&wc, 0, sizeof(wc));
1474 wc.opcode = IB_WC_RECV;
1475 wc.wr_id = wr->wr_id;
1476 wc.status = IB_WC_WR_FLUSH_ERR;
1477 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1479 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1480 wqe->wr_id = wr->wr_id;
1481 wqe->num_sge = wr->num_sge;
1482 for (i = 0; i < wr->num_sge; i++)
1483 wqe->sg_list[i] = wr->sg_list[i];
1485 * Make sure queue entry is written
1486 * before the head index.
1491 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1497 * rvt_qp_valid_operation - validate post send wr request
1499 * @post-parms - the post send table for the driver
1500 * @wr - the work request
1502 * The routine validates the operation based on the
1503 * validation table an returns the length of the operation
1504 * which can extend beyond the ib_send_bw. Operation
1505 * dependent flags key atomic operation validation.
1507 * There is an exception for UD qps that validates the pd and
1508 * overrides the length to include the additional UD specific
1511 * Returns a negative error or the length of the work request
1512 * for building the swqe.
1514 static inline int rvt_qp_valid_operation(
1516 const struct rvt_operation_params *post_parms,
1517 struct ib_send_wr *wr)
1521 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1523 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1525 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1526 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1528 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1529 (wr->num_sge == 0 ||
1530 wr->sg_list[0].length < sizeof(u64) ||
1531 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1533 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1534 !qp->s_max_rd_atomic)
1536 len = post_parms[wr->opcode].length;
1538 if (qp->ibqp.qp_type != IB_QPT_UC &&
1539 qp->ibqp.qp_type != IB_QPT_RC) {
1540 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1542 len = sizeof(struct ib_ud_wr);
1548 * rvt_qp_is_avail - determine queue capacity
1550 * @rdi - the rdmavt device
1551 * @reserved_op - is reserved operation
1553 * This assumes the s_hlock is held but the s_last
1554 * qp variable is uncontrolled.
1556 * For non reserved operations, the qp->s_avail
1559 * The return value is zero or a -ENOMEM.
1561 static inline int rvt_qp_is_avail(
1563 struct rvt_dev_info *rdi,
1570 /* see rvt_qp_wqe_unreserve() */
1571 smp_mb__before_atomic();
1572 reserved_used = atomic_read(&qp->s_reserved_used);
1573 if (unlikely(reserved_op)) {
1574 /* see rvt_qp_wqe_unreserve() */
1575 smp_mb__before_atomic();
1576 if (reserved_used >= rdi->dparms.reserved_operations)
1580 /* non-reserved operations */
1581 if (likely(qp->s_avail))
1583 smp_read_barrier_depends(); /* see rc.c */
1584 slast = ACCESS_ONCE(qp->s_last);
1585 if (qp->s_head >= slast)
1586 avail = qp->s_size - (qp->s_head - slast);
1588 avail = slast - qp->s_head;
1590 /* see rvt_qp_wqe_unreserve() */
1591 smp_mb__before_atomic();
1592 reserved_used = atomic_read(&qp->s_reserved_used);
1594 (rdi->dparms.reserved_operations - reserved_used);
1595 /* insure we don't assign a negative s_avail */
1596 if ((s32)avail <= 0)
1598 qp->s_avail = avail;
1599 if (WARN_ON(qp->s_avail >
1600 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1602 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1603 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1604 qp->s_head, qp->s_tail, qp->s_cur,
1605 qp->s_acked, qp->s_last);
1610 * rvt_post_one_wr - post one RC, UC, or UD send work request
1611 * @qp: the QP to post on
1612 * @wr: the work request to send
1614 static int rvt_post_one_wr(struct rvt_qp *qp,
1615 struct ib_send_wr *wr,
1618 struct rvt_swqe *wqe;
1623 struct rvt_lkey_table *rkt;
1625 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1630 int local_ops_delayed = 0;
1632 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1634 /* IB spec says that num_sge == 0 is OK. */
1635 if (unlikely(wr->num_sge > qp->s_max_sge))
1638 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1644 * Local operations include fast register and local invalidate.
1645 * Fast register needs to be processed immediately because the
1646 * registered lkey may be used by following work requests and the
1647 * lkey needs to be valid at the time those requests are posted.
1648 * Local invalidate can be processed immediately if fencing is
1649 * not required and no previous local invalidate ops are pending.
1650 * Signaled local operations that have been processed immediately
1651 * need to have requests with "completion only" flags set posted
1652 * to the send queue in order to generate completions.
1654 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
1655 switch (wr->opcode) {
1657 ret = rvt_fast_reg_mr(qp,
1660 reg_wr(wr)->access);
1661 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1664 case IB_WR_LOCAL_INV:
1665 if ((wr->send_flags & IB_SEND_FENCE) ||
1666 atomic_read(&qp->local_ops_pending)) {
1667 local_ops_delayed = 1;
1669 ret = rvt_invalidate_rkey(
1670 qp, wr->ex.invalidate_rkey);
1671 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1680 reserved_op = rdi->post_parms[wr->opcode].flags &
1681 RVT_OPERATION_USE_RESERVE;
1682 /* check for avail */
1683 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
1686 next = qp->s_head + 1;
1687 if (next >= qp->s_size)
1690 rkt = &rdi->lkey_table;
1691 pd = ibpd_to_rvtpd(qp->ibqp.pd);
1692 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1694 /* cplen has length from above */
1695 memcpy(&wqe->wr, wr, cplen);
1700 acc = wr->opcode >= IB_WR_RDMA_READ ?
1701 IB_ACCESS_LOCAL_WRITE : 0;
1702 for (i = 0; i < wr->num_sge; i++) {
1703 u32 length = wr->sg_list[i].length;
1708 ok = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j],
1709 &wr->sg_list[i], acc);
1712 goto bail_inval_free;
1714 wqe->length += length;
1717 wqe->wr.num_sge = j;
1720 /* general part of wqe valid - allow for driver checks */
1721 if (rdi->driver_f.check_send_wqe) {
1722 ret = rdi->driver_f.check_send_wqe(qp, wqe);
1724 goto bail_inval_free;
1729 log_pmtu = qp->log_pmtu;
1730 if (qp->ibqp.qp_type != IB_QPT_UC &&
1731 qp->ibqp.qp_type != IB_QPT_RC) {
1732 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
1734 log_pmtu = ah->log_pmtu;
1735 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1738 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
1739 if (local_ops_delayed)
1740 atomic_inc(&qp->local_ops_pending);
1742 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
1747 wqe->ssn = qp->s_ssn++;
1748 wqe->psn = qp->s_next_psn;
1749 wqe->lpsn = wqe->psn +
1751 ((wqe->length - 1) >> log_pmtu) :
1753 qp->s_next_psn = wqe->lpsn + 1;
1755 if (unlikely(reserved_op)) {
1756 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
1757 rvt_qp_wqe_reserve(qp, wqe);
1759 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
1762 trace_rvt_post_one_wr(qp, wqe);
1763 smp_wmb(); /* see request builders */
1769 /* release mr holds */
1771 struct rvt_sge *sge = &wqe->sg_list[--j];
1773 rvt_put_mr(sge->mr);
1779 * rvt_post_send - post a send on a QP
1780 * @ibqp: the QP to post the send on
1781 * @wr: the list of work requests to post
1782 * @bad_wr: the first bad WR is put here
1784 * This may be called from interrupt context.
1786 * Return: 0 on success else errno
1788 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1789 struct ib_send_wr **bad_wr)
1791 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1792 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1793 unsigned long flags = 0;
1798 spin_lock_irqsave(&qp->s_hlock, flags);
1801 * Ensure QP state is such that we can send. If not bail out early,
1802 * there is no need to do this every time we post a send.
1804 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
1805 spin_unlock_irqrestore(&qp->s_hlock, flags);
1810 * If the send queue is empty, and we only have a single WR then just go
1811 * ahead and kick the send engine into gear. Otherwise we will always
1812 * just schedule the send to happen later.
1814 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
1816 for (; wr; wr = wr->next) {
1817 err = rvt_post_one_wr(qp, wr, &call_send);
1818 if (unlikely(err)) {
1825 spin_unlock_irqrestore(&qp->s_hlock, flags);
1828 rdi->driver_f.do_send(qp);
1830 rdi->driver_f.schedule_send_no_lock(qp);
1836 * rvt_post_srq_receive - post a receive on a shared receive queue
1837 * @ibsrq: the SRQ to post the receive on
1838 * @wr: the list of work requests to post
1839 * @bad_wr: A pointer to the first WR to cause a problem is put here
1841 * This may be called from interrupt context.
1843 * Return: 0 on success else errno
1845 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1846 struct ib_recv_wr **bad_wr)
1848 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
1850 unsigned long flags;
1852 for (; wr; wr = wr->next) {
1853 struct rvt_rwqe *wqe;
1857 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
1862 spin_lock_irqsave(&srq->rq.lock, flags);
1864 next = wq->head + 1;
1865 if (next >= srq->rq.size)
1867 if (next == wq->tail) {
1868 spin_unlock_irqrestore(&srq->rq.lock, flags);
1873 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
1874 wqe->wr_id = wr->wr_id;
1875 wqe->num_sge = wr->num_sge;
1876 for (i = 0; i < wr->num_sge; i++)
1877 wqe->sg_list[i] = wr->sg_list[i];
1878 /* Make sure queue entry is written before the head index. */
1881 spin_unlock_irqrestore(&srq->rq.lock, flags);
1887 * qp_comm_est - handle trap with QP established
1890 void rvt_comm_est(struct rvt_qp *qp)
1892 qp->r_flags |= RVT_R_COMM_EST;
1893 if (qp->ibqp.event_handler) {
1896 ev.device = qp->ibqp.device;
1897 ev.element.qp = &qp->ibqp;
1898 ev.event = IB_EVENT_COMM_EST;
1899 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1902 EXPORT_SYMBOL(rvt_comm_est);
1904 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
1906 unsigned long flags;
1909 spin_lock_irqsave(&qp->s_lock, flags);
1910 lastwqe = rvt_error_qp(qp, err);
1911 spin_unlock_irqrestore(&qp->s_lock, flags);
1916 ev.device = qp->ibqp.device;
1917 ev.element.qp = &qp->ibqp;
1918 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1919 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1922 EXPORT_SYMBOL(rvt_rc_error);
1925 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
1926 * @index - the index
1927 * return usec from an index into ib_rvt_rnr_table
1929 unsigned long rvt_rnr_tbl_to_usec(u32 index)
1931 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
1933 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
1935 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
1937 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
1938 IB_AETH_CREDIT_MASK];
1942 * rvt_add_retry_timer - add/start a retry timer
1944 * add a retry timer on the QP
1946 void rvt_add_retry_timer(struct rvt_qp *qp)
1948 struct ib_qp *ibqp = &qp->ibqp;
1949 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1951 lockdep_assert_held(&qp->s_lock);
1952 qp->s_flags |= RVT_S_TIMER;
1953 /* 4.096 usec. * (1 << qp->timeout) */
1954 qp->s_timer.expires = jiffies + qp->timeout_jiffies +
1956 add_timer(&qp->s_timer);
1958 EXPORT_SYMBOL(rvt_add_retry_timer);
1961 * rvt_add_rnr_timer - add/start an rnr timer
1963 * @aeth - aeth of RNR timeout, simulated aeth for loopback
1964 * add an rnr timer on the QP
1966 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
1970 lockdep_assert_held(&qp->s_lock);
1971 qp->s_flags |= RVT_S_WAIT_RNR;
1972 to = rvt_aeth_to_usec(aeth);
1973 hrtimer_start(&qp->s_rnr_timer,
1974 ns_to_ktime(1000 * to), HRTIMER_MODE_REL);
1976 EXPORT_SYMBOL(rvt_add_rnr_timer);
1979 * rvt_stop_rc_timers - stop all timers
1981 * stop any pending timers
1983 void rvt_stop_rc_timers(struct rvt_qp *qp)
1985 lockdep_assert_held(&qp->s_lock);
1986 /* Remove QP from all timers */
1987 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1988 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1989 del_timer(&qp->s_timer);
1990 hrtimer_try_to_cancel(&qp->s_rnr_timer);
1993 EXPORT_SYMBOL(rvt_stop_rc_timers);
1996 * rvt_stop_rnr_timer - stop an rnr timer
1999 * stop an rnr timer and return if the timer
2002 static int rvt_stop_rnr_timer(struct rvt_qp *qp)
2006 lockdep_assert_held(&qp->s_lock);
2007 /* Remove QP from rnr timer */
2008 if (qp->s_flags & RVT_S_WAIT_RNR) {
2009 qp->s_flags &= ~RVT_S_WAIT_RNR;
2010 rval = hrtimer_try_to_cancel(&qp->s_rnr_timer);
2016 * rvt_del_timers_sync - wait for any timeout routines to exit
2019 void rvt_del_timers_sync(struct rvt_qp *qp)
2021 del_timer_sync(&qp->s_timer);
2022 hrtimer_cancel(&qp->s_rnr_timer);
2024 EXPORT_SYMBOL(rvt_del_timers_sync);
2027 * This is called from s_timer for missing responses.
2029 static void rvt_rc_timeout(unsigned long arg)
2031 struct rvt_qp *qp = (struct rvt_qp *)arg;
2032 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2033 unsigned long flags;
2035 spin_lock_irqsave(&qp->r_lock, flags);
2036 spin_lock(&qp->s_lock);
2037 if (qp->s_flags & RVT_S_TIMER) {
2038 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2040 qp->s_flags &= ~RVT_S_TIMER;
2041 rvp->n_rc_timeouts++;
2042 del_timer(&qp->s_timer);
2043 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2044 if (rdi->driver_f.notify_restart_rc)
2045 rdi->driver_f.notify_restart_rc(qp,
2048 rdi->driver_f.schedule_send(qp);
2050 spin_unlock(&qp->s_lock);
2051 spin_unlock_irqrestore(&qp->r_lock, flags);
2055 * This is called from s_timer for RNR timeouts.
2057 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2059 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2060 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2061 unsigned long flags;
2063 spin_lock_irqsave(&qp->s_lock, flags);
2064 rvt_stop_rnr_timer(qp);
2065 rdi->driver_f.schedule_send(qp);
2066 spin_unlock_irqrestore(&qp->s_lock, flags);
2067 return HRTIMER_NORESTART;
2069 EXPORT_SYMBOL(rvt_rc_rnr_retry);