2 * Copyright (c) 2012 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <linux/jhash.h>
41 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
42 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
44 static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
45 struct qpn_map *map, unsigned off)
47 return (map - qpt->map) * BITS_PER_PAGE + off;
50 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
51 struct qpn_map *map, unsigned off,
56 if (((off & qpt->mask) >> 1) >= n)
57 off = (off | qpt->mask) + 2;
59 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
64 * Convert the AETH credit code into the number of credits.
66 static u32 credit_table[31] = {
100 static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
102 unsigned long page = get_zeroed_page(GFP_KERNEL);
105 * Free the page if someone raced with us installing it.
108 spin_lock(&qpt->lock);
112 map->page = (void *)page;
113 spin_unlock(&qpt->lock);
117 * Allocate the next available QPN or
118 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
120 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
121 enum ib_qp_type type, u8 port)
123 u32 i, offset, max_scan, qpn;
127 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
130 ret = type == IB_QPT_GSI;
131 n = 1 << (ret + 2 * (port - 1));
132 spin_lock(&qpt->lock);
137 spin_unlock(&qpt->lock);
144 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
145 qpn = (qpn | qpt->mask) + 2;
146 offset = qpn & BITS_PER_PAGE_MASK;
147 map = &qpt->map[qpn / BITS_PER_PAGE];
148 max_scan = qpt->nmaps - !offset;
150 if (unlikely(!map->page)) {
151 get_map_page(qpt, map);
152 if (unlikely(!map->page))
156 if (!test_and_set_bit(offset, map->page)) {
161 offset = find_next_offset(qpt, map, offset,
163 qpn = mk_qpn(qpt, map, offset);
165 * This test differs from alloc_pidmap().
166 * If find_next_offset() does find a zero
167 * bit, we don't need to check for QPN
168 * wrapping around past our starting QPN.
169 * We just need to be sure we don't loop
172 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
174 * In order to keep the number of pages allocated to a
175 * minimum, we scan the all existing pages before increasing
176 * the size of the bitmap table.
178 if (++i > max_scan) {
179 if (qpt->nmaps == QPNMAP_ENTRIES)
181 map = &qpt->map[qpt->nmaps++];
183 } else if (map < &qpt->map[qpt->nmaps]) {
190 qpn = mk_qpn(qpt, map, offset);
199 static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
203 map = qpt->map + qpn / BITS_PER_PAGE;
205 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
208 static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
210 return jhash_1word(qpn, dev->qp_rnd) &
211 (dev->qp_table_size - 1);
216 * Put the QP into the hash table.
217 * The hash table holds a reference to the QP.
219 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
221 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
223 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
225 spin_lock_irqsave(&dev->qpt_lock, flags);
226 atomic_inc(&qp->refcount);
228 if (qp->ibqp.qp_num == 0)
229 rcu_assign_pointer(ibp->qp0, qp);
230 else if (qp->ibqp.qp_num == 1)
231 rcu_assign_pointer(ibp->qp1, qp);
233 qp->next = dev->qp_table[n];
234 rcu_assign_pointer(dev->qp_table[n], qp);
237 spin_unlock_irqrestore(&dev->qpt_lock, flags);
242 * Remove the QP from the table so it can't be found asynchronously by
243 * the receive interrupt routine.
245 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
247 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
248 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
251 spin_lock_irqsave(&dev->qpt_lock, flags);
253 if (rcu_dereference_protected(ibp->qp0,
254 lockdep_is_held(&dev->qpt_lock)) == qp) {
255 atomic_dec(&qp->refcount);
256 rcu_assign_pointer(ibp->qp0, NULL);
257 } else if (rcu_dereference_protected(ibp->qp1,
258 lockdep_is_held(&dev->qpt_lock)) == qp) {
259 atomic_dec(&qp->refcount);
260 rcu_assign_pointer(ibp->qp1, NULL);
263 struct qib_qp __rcu **qpp;
265 qpp = &dev->qp_table[n];
266 for (; (q = rcu_dereference_protected(*qpp,
267 lockdep_is_held(&dev->qpt_lock))) != NULL;
270 atomic_dec(&qp->refcount);
272 rcu_assign_pointer(qp->next, NULL);
277 spin_unlock_irqrestore(&dev->qpt_lock, flags);
282 * qib_free_all_qps - check for QPs still in use
283 * @qpt: the QP table to empty
285 * There should not be any QPs still in use.
286 * Free memory for table.
288 unsigned qib_free_all_qps(struct qib_devdata *dd)
290 struct qib_ibdev *dev = &dd->verbs_dev;
293 unsigned n, qp_inuse = 0;
295 for (n = 0; n < dd->num_pports; n++) {
296 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
298 if (!qib_mcast_tree_empty(ibp))
301 if (rcu_dereference(ibp->qp0))
303 if (rcu_dereference(ibp->qp1))
308 spin_lock_irqsave(&dev->qpt_lock, flags);
309 for (n = 0; n < dev->qp_table_size; n++) {
310 qp = rcu_dereference_protected(dev->qp_table[n],
311 lockdep_is_held(&dev->qpt_lock));
312 rcu_assign_pointer(dev->qp_table[n], NULL);
314 for (; qp; qp = rcu_dereference_protected(qp->next,
315 lockdep_is_held(&dev->qpt_lock)))
318 spin_unlock_irqrestore(&dev->qpt_lock, flags);
325 * qib_lookup_qpn - return the QP with the given QPN
327 * @qpn: the QP number to look up
329 * The caller is responsible for decrementing the QP reference count
332 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
334 struct qib_qp *qp = NULL;
336 if (unlikely(qpn <= 1)) {
339 qp = rcu_dereference(ibp->qp0);
341 qp = rcu_dereference(ibp->qp1);
343 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
344 unsigned n = qpn_hash(dev, qpn);
347 for (qp = rcu_dereference(dev->qp_table[n]); qp;
348 qp = rcu_dereference(qp->next))
349 if (qp->ibqp.qp_num == qpn)
353 if (unlikely(!atomic_inc_not_zero(&qp->refcount)))
361 * qib_reset_qp - initialize the QP state to the reset state
362 * @qp: the QP to reset
365 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
369 qp->qp_access_flags = 0;
370 atomic_set(&qp->s_dma_busy, 0);
371 qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
377 qp->s_sending_psn = 0;
378 qp->s_sending_hpsn = 0;
382 if (type == IB_QPT_RC) {
383 qp->s_state = IB_OPCODE_RC_SEND_LAST;
384 qp->r_state = IB_OPCODE_RC_SEND_LAST;
386 qp->s_state = IB_OPCODE_UC_SEND_LAST;
387 qp->r_state = IB_OPCODE_UC_SEND_LAST;
389 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
400 qp->s_mig_state = IB_MIG_MIGRATED;
401 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
402 qp->r_head_ack_queue = 0;
403 qp->s_tail_ack_queue = 0;
404 qp->s_num_rd_atomic = 0;
406 qp->r_rq.wq->head = 0;
407 qp->r_rq.wq->tail = 0;
409 qp->r_sge.num_sge = 0;
412 static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
416 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
417 qib_put_ss(&qp->s_rdma_read_sge);
419 qib_put_ss(&qp->r_sge);
422 while (qp->s_last != qp->s_head) {
423 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
426 for (i = 0; i < wqe->wr.num_sge; i++) {
427 struct qib_sge *sge = &wqe->sg_list[i];
431 if (qp->ibqp.qp_type == IB_QPT_UD ||
432 qp->ibqp.qp_type == IB_QPT_SMI ||
433 qp->ibqp.qp_type == IB_QPT_GSI)
434 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
435 if (++qp->s_last >= qp->s_size)
439 qib_put_mr(qp->s_rdma_mr);
440 qp->s_rdma_mr = NULL;
444 if (qp->ibqp.qp_type != IB_QPT_RC)
447 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
448 struct qib_ack_entry *e = &qp->s_ack_queue[n];
450 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
452 qib_put_mr(e->rdma_sge.mr);
453 e->rdma_sge.mr = NULL;
459 * qib_error_qp - put a QP into the error state
460 * @qp: the QP to put into the error state
461 * @err: the receive completion error to signal if a RWQE is active
463 * Flushes both send and receive work queues.
464 * Returns true if last WQE event should be generated.
465 * The QP r_lock and s_lock should be held and interrupts disabled.
466 * If we are already in error state, just return.
468 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
470 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
474 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
477 qp->state = IB_QPS_ERR;
479 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
480 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
481 del_timer(&qp->s_timer);
484 if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
485 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
487 spin_lock(&dev->pending_lock);
488 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
489 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
490 list_del_init(&qp->iowait);
492 spin_unlock(&dev->pending_lock);
494 if (!(qp->s_flags & QIB_S_BUSY)) {
497 qib_put_mr(qp->s_rdma_mr);
498 qp->s_rdma_mr = NULL;
501 qib_put_txreq(qp->s_tx);
506 /* Schedule the sending tasklet to drain the send work queue. */
507 if (qp->s_last != qp->s_head)
508 qib_schedule_send(qp);
510 clear_mr_refs(qp, 0);
512 memset(&wc, 0, sizeof(wc));
514 wc.opcode = IB_WC_RECV;
516 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
517 wc.wr_id = qp->r_wr_id;
519 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
521 wc.status = IB_WC_WR_FLUSH_ERR;
528 spin_lock(&qp->r_rq.lock);
530 /* sanity check pointers before trusting them */
533 if (head >= qp->r_rq.size)
536 if (tail >= qp->r_rq.size)
538 while (tail != head) {
539 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
540 if (++tail >= qp->r_rq.size)
542 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
546 spin_unlock(&qp->r_rq.lock);
547 } else if (qp->ibqp.event_handler)
555 * qib_modify_qp - modify the attributes of a queue pair
556 * @ibqp: the queue pair who's attributes we're modifying
557 * @attr: the new attributes
558 * @attr_mask: the mask of attributes to modify
559 * @udata: user data for libibverbs.so
561 * Returns 0 on success, otherwise returns an errno.
563 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
564 int attr_mask, struct ib_udata *udata)
566 struct qib_ibdev *dev = to_idev(ibqp->device);
567 struct qib_qp *qp = to_iqp(ibqp);
568 enum ib_qp_state cur_state, new_state;
573 u32 pmtu = 0; /* for gcc warning only */
575 spin_lock_irq(&qp->r_lock);
576 spin_lock(&qp->s_lock);
578 cur_state = attr_mask & IB_QP_CUR_STATE ?
579 attr->cur_qp_state : qp->state;
580 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
582 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
586 if (attr_mask & IB_QP_AV) {
587 if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
589 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
593 if (attr_mask & IB_QP_ALT_PATH) {
594 if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
596 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
598 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
602 if (attr_mask & IB_QP_PKEY_INDEX)
603 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
606 if (attr_mask & IB_QP_MIN_RNR_TIMER)
607 if (attr->min_rnr_timer > 31)
610 if (attr_mask & IB_QP_PORT)
611 if (qp->ibqp.qp_type == IB_QPT_SMI ||
612 qp->ibqp.qp_type == IB_QPT_GSI ||
613 attr->port_num == 0 ||
614 attr->port_num > ibqp->device->phys_port_cnt)
617 if (attr_mask & IB_QP_DEST_QPN)
618 if (attr->dest_qp_num > QIB_QPN_MASK)
621 if (attr_mask & IB_QP_RETRY_CNT)
622 if (attr->retry_cnt > 7)
625 if (attr_mask & IB_QP_RNR_RETRY)
626 if (attr->rnr_retry > 7)
630 * Don't allow invalid path_mtu values. OK to set greater
631 * than the active mtu (or even the max_cap, if we have tuned
632 * that to a small mtu. We'll set qp->path_mtu
633 * to the lesser of requested attribute mtu and active,
634 * for packetizing messages.
635 * Note that the QP port has to be set in INIT and MTU in RTR.
637 if (attr_mask & IB_QP_PATH_MTU) {
638 struct qib_devdata *dd = dd_from_dev(dev);
639 int mtu, pidx = qp->port_num - 1;
641 mtu = ib_mtu_enum_to_int(attr->path_mtu);
644 if (mtu > dd->pport[pidx].ibmtu) {
645 switch (dd->pport[pidx].ibmtu) {
665 pmtu = attr->path_mtu;
668 if (attr_mask & IB_QP_PATH_MIG_STATE) {
669 if (attr->path_mig_state == IB_MIG_REARM) {
670 if (qp->s_mig_state == IB_MIG_ARMED)
672 if (new_state != IB_QPS_RTS)
674 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
675 if (qp->s_mig_state == IB_MIG_REARM)
677 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
679 if (qp->s_mig_state == IB_MIG_ARMED)
685 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
686 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
691 if (qp->state != IB_QPS_RESET) {
692 qp->state = IB_QPS_RESET;
693 spin_lock(&dev->pending_lock);
694 if (!list_empty(&qp->iowait))
695 list_del_init(&qp->iowait);
696 spin_unlock(&dev->pending_lock);
697 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
698 spin_unlock(&qp->s_lock);
699 spin_unlock_irq(&qp->r_lock);
700 /* Stop the sending work queue and retry timer */
701 cancel_work_sync(&qp->s_work);
702 del_timer_sync(&qp->s_timer);
703 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
705 qib_put_txreq(qp->s_tx);
709 wait_event(qp->wait, !atomic_read(&qp->refcount));
710 spin_lock_irq(&qp->r_lock);
711 spin_lock(&qp->s_lock);
712 clear_mr_refs(qp, 1);
713 qib_reset_qp(qp, ibqp->qp_type);
718 /* Allow event to retrigger if QP set to RTR more than once */
719 qp->r_flags &= ~QIB_R_COMM_EST;
720 qp->state = new_state;
724 qp->s_draining = qp->s_last != qp->s_cur;
725 qp->state = new_state;
729 if (qp->ibqp.qp_type == IB_QPT_RC)
731 qp->state = new_state;
735 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
739 qp->state = new_state;
743 if (attr_mask & IB_QP_PKEY_INDEX)
744 qp->s_pkey_index = attr->pkey_index;
746 if (attr_mask & IB_QP_PORT)
747 qp->port_num = attr->port_num;
749 if (attr_mask & IB_QP_DEST_QPN)
750 qp->remote_qpn = attr->dest_qp_num;
752 if (attr_mask & IB_QP_SQ_PSN) {
753 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
754 qp->s_psn = qp->s_next_psn;
755 qp->s_sending_psn = qp->s_next_psn;
756 qp->s_last_psn = qp->s_next_psn - 1;
757 qp->s_sending_hpsn = qp->s_last_psn;
760 if (attr_mask & IB_QP_RQ_PSN)
761 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
763 if (attr_mask & IB_QP_ACCESS_FLAGS)
764 qp->qp_access_flags = attr->qp_access_flags;
766 if (attr_mask & IB_QP_AV) {
767 qp->remote_ah_attr = attr->ah_attr;
768 qp->s_srate = attr->ah_attr.static_rate;
771 if (attr_mask & IB_QP_ALT_PATH) {
772 qp->alt_ah_attr = attr->alt_ah_attr;
773 qp->s_alt_pkey_index = attr->alt_pkey_index;
776 if (attr_mask & IB_QP_PATH_MIG_STATE) {
777 qp->s_mig_state = attr->path_mig_state;
779 qp->remote_ah_attr = qp->alt_ah_attr;
780 qp->port_num = qp->alt_ah_attr.port_num;
781 qp->s_pkey_index = qp->s_alt_pkey_index;
785 if (attr_mask & IB_QP_PATH_MTU) {
787 qp->pmtu = ib_mtu_enum_to_int(pmtu);
790 if (attr_mask & IB_QP_RETRY_CNT) {
791 qp->s_retry_cnt = attr->retry_cnt;
792 qp->s_retry = attr->retry_cnt;
795 if (attr_mask & IB_QP_RNR_RETRY) {
796 qp->s_rnr_retry_cnt = attr->rnr_retry;
797 qp->s_rnr_retry = attr->rnr_retry;
800 if (attr_mask & IB_QP_MIN_RNR_TIMER)
801 qp->r_min_rnr_timer = attr->min_rnr_timer;
803 if (attr_mask & IB_QP_TIMEOUT) {
804 qp->timeout = attr->timeout;
805 qp->timeout_jiffies =
806 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
810 if (attr_mask & IB_QP_QKEY)
811 qp->qkey = attr->qkey;
813 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
814 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
816 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
817 qp->s_max_rd_atomic = attr->max_rd_atomic;
819 spin_unlock(&qp->s_lock);
820 spin_unlock_irq(&qp->r_lock);
822 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
826 ev.device = qp->ibqp.device;
827 ev.element.qp = &qp->ibqp;
828 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
829 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
832 ev.device = qp->ibqp.device;
833 ev.element.qp = &qp->ibqp;
834 ev.event = IB_EVENT_PATH_MIG;
835 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
841 spin_unlock(&qp->s_lock);
842 spin_unlock_irq(&qp->r_lock);
849 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
850 int attr_mask, struct ib_qp_init_attr *init_attr)
852 struct qib_qp *qp = to_iqp(ibqp);
854 attr->qp_state = qp->state;
855 attr->cur_qp_state = attr->qp_state;
856 attr->path_mtu = qp->path_mtu;
857 attr->path_mig_state = qp->s_mig_state;
858 attr->qkey = qp->qkey;
859 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
860 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
861 attr->dest_qp_num = qp->remote_qpn;
862 attr->qp_access_flags = qp->qp_access_flags;
863 attr->cap.max_send_wr = qp->s_size - 1;
864 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
865 attr->cap.max_send_sge = qp->s_max_sge;
866 attr->cap.max_recv_sge = qp->r_rq.max_sge;
867 attr->cap.max_inline_data = 0;
868 attr->ah_attr = qp->remote_ah_attr;
869 attr->alt_ah_attr = qp->alt_ah_attr;
870 attr->pkey_index = qp->s_pkey_index;
871 attr->alt_pkey_index = qp->s_alt_pkey_index;
872 attr->en_sqd_async_notify = 0;
873 attr->sq_draining = qp->s_draining;
874 attr->max_rd_atomic = qp->s_max_rd_atomic;
875 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
876 attr->min_rnr_timer = qp->r_min_rnr_timer;
877 attr->port_num = qp->port_num;
878 attr->timeout = qp->timeout;
879 attr->retry_cnt = qp->s_retry_cnt;
880 attr->rnr_retry = qp->s_rnr_retry_cnt;
881 attr->alt_port_num = qp->alt_ah_attr.port_num;
882 attr->alt_timeout = qp->alt_timeout;
884 init_attr->event_handler = qp->ibqp.event_handler;
885 init_attr->qp_context = qp->ibqp.qp_context;
886 init_attr->send_cq = qp->ibqp.send_cq;
887 init_attr->recv_cq = qp->ibqp.recv_cq;
888 init_attr->srq = qp->ibqp.srq;
889 init_attr->cap = attr->cap;
890 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
891 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
893 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
894 init_attr->qp_type = qp->ibqp.qp_type;
895 init_attr->port_num = qp->port_num;
900 * qib_compute_aeth - compute the AETH (syndrome + MSN)
901 * @qp: the queue pair to compute the AETH for
905 __be32 qib_compute_aeth(struct qib_qp *qp)
907 u32 aeth = qp->r_msn & QIB_MSN_MASK;
911 * Shared receive queues don't generate credits.
912 * Set the credit field to the invalid value.
914 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
918 struct qib_rwq *wq = qp->r_rq.wq;
922 /* sanity check pointers before trusting them */
924 if (head >= qp->r_rq.size)
927 if (tail >= qp->r_rq.size)
930 * Compute the number of credits available (RWQEs).
931 * XXX Not holding the r_rq.lock here so there is a small
932 * chance that the pair of reads are not atomic.
934 credits = head - tail;
935 if ((int)credits < 0)
936 credits += qp->r_rq.size;
938 * Binary search the credit table to find the code to
945 if (credit_table[x] == credits)
947 if (credit_table[x] > credits)
954 aeth |= x << QIB_AETH_CREDIT_SHIFT;
956 return cpu_to_be32(aeth);
960 * qib_create_qp - create a queue pair for a device
961 * @ibpd: the protection domain who's device we create the queue pair for
962 * @init_attr: the attributes of the queue pair
963 * @udata: user data for libibverbs.so
965 * Returns the queue pair on success, otherwise returns an errno.
967 * Called by the ib_create_qp() core verbs function.
969 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
970 struct ib_qp_init_attr *init_attr,
971 struct ib_udata *udata)
975 struct qib_swqe *swq = NULL;
976 struct qib_ibdev *dev;
977 struct qib_devdata *dd;
982 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
983 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
984 ret = ERR_PTR(-EINVAL);
988 /* Check receive queue parameters if no SRQ is specified. */
989 if (!init_attr->srq) {
990 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
991 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
992 ret = ERR_PTR(-EINVAL);
995 if (init_attr->cap.max_send_sge +
996 init_attr->cap.max_send_wr +
997 init_attr->cap.max_recv_sge +
998 init_attr->cap.max_recv_wr == 0) {
999 ret = ERR_PTR(-EINVAL);
1004 switch (init_attr->qp_type) {
1007 if (init_attr->port_num == 0 ||
1008 init_attr->port_num > ibpd->device->phys_port_cnt) {
1009 ret = ERR_PTR(-EINVAL);
1015 sz = sizeof(struct qib_sge) *
1016 init_attr->cap.max_send_sge +
1017 sizeof(struct qib_swqe);
1018 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
1020 ret = ERR_PTR(-ENOMEM);
1025 if (init_attr->srq) {
1026 struct qib_srq *srq = to_isrq(init_attr->srq);
1028 if (srq->rq.max_sge > 1)
1029 sg_list_sz = sizeof(*qp->r_sg_list) *
1030 (srq->rq.max_sge - 1);
1031 } else if (init_attr->cap.max_recv_sge > 1)
1032 sg_list_sz = sizeof(*qp->r_sg_list) *
1033 (init_attr->cap.max_recv_sge - 1);
1034 qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1036 ret = ERR_PTR(-ENOMEM);
1039 RCU_INIT_POINTER(qp->next, NULL);
1040 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
1042 ret = ERR_PTR(-ENOMEM);
1045 qp->timeout_jiffies =
1046 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1051 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1052 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1053 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1054 sizeof(struct qib_rwqe);
1055 qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1056 qp->r_rq.size * sz);
1058 ret = ERR_PTR(-ENOMEM);
1064 * ib_create_qp() will initialize qp->ibqp
1065 * except for qp->ibqp.qp_num.
1067 spin_lock_init(&qp->r_lock);
1068 spin_lock_init(&qp->s_lock);
1069 spin_lock_init(&qp->r_rq.lock);
1070 atomic_set(&qp->refcount, 0);
1071 init_waitqueue_head(&qp->wait);
1072 init_waitqueue_head(&qp->wait_dma);
1073 init_timer(&qp->s_timer);
1074 qp->s_timer.data = (unsigned long)qp;
1075 INIT_WORK(&qp->s_work, qib_do_send);
1076 INIT_LIST_HEAD(&qp->iowait);
1077 INIT_LIST_HEAD(&qp->rspwait);
1078 qp->state = IB_QPS_RESET;
1080 qp->s_size = init_attr->cap.max_send_wr + 1;
1081 qp->s_max_sge = init_attr->cap.max_send_sge;
1082 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1083 qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1084 dev = to_idev(ibpd->device);
1085 dd = dd_from_dev(dev);
1086 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1087 init_attr->port_num);
1093 qp->ibqp.qp_num = err;
1094 qp->port_num = init_attr->port_num;
1095 qib_reset_qp(qp, init_attr->qp_type);
1099 /* Don't support raw QPs */
1100 ret = ERR_PTR(-ENOSYS);
1104 init_attr->cap.max_inline_data = 0;
1107 * Return the address of the RWQ as the offset to mmap.
1108 * See qib_mmap() for details.
1110 if (udata && udata->outlen >= sizeof(__u64)) {
1114 err = ib_copy_to_udata(udata, &offset,
1121 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1123 qp->ip = qib_create_mmap_info(dev, s,
1124 ibpd->uobject->context,
1127 ret = ERR_PTR(-ENOMEM);
1131 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1132 sizeof(qp->ip->offset));
1140 spin_lock(&dev->n_qps_lock);
1141 if (dev->n_qps_allocated == ib_qib_max_qps) {
1142 spin_unlock(&dev->n_qps_lock);
1143 ret = ERR_PTR(-ENOMEM);
1147 dev->n_qps_allocated++;
1148 spin_unlock(&dev->n_qps_lock);
1151 spin_lock_irq(&dev->pending_lock);
1152 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1153 spin_unlock_irq(&dev->pending_lock);
1161 kref_put(&qp->ip->ref, qib_release_mmap_info);
1164 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1175 * qib_destroy_qp - destroy a queue pair
1176 * @ibqp: the queue pair to destroy
1178 * Returns 0 on success.
1180 * Note that this can be called while the QP is actively sending or
1183 int qib_destroy_qp(struct ib_qp *ibqp)
1185 struct qib_qp *qp = to_iqp(ibqp);
1186 struct qib_ibdev *dev = to_idev(ibqp->device);
1188 /* Make sure HW and driver activity is stopped. */
1189 spin_lock_irq(&qp->s_lock);
1190 if (qp->state != IB_QPS_RESET) {
1191 qp->state = IB_QPS_RESET;
1192 spin_lock(&dev->pending_lock);
1193 if (!list_empty(&qp->iowait))
1194 list_del_init(&qp->iowait);
1195 spin_unlock(&dev->pending_lock);
1196 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1197 spin_unlock_irq(&qp->s_lock);
1198 cancel_work_sync(&qp->s_work);
1199 del_timer_sync(&qp->s_timer);
1200 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1202 qib_put_txreq(qp->s_tx);
1206 wait_event(qp->wait, !atomic_read(&qp->refcount));
1207 clear_mr_refs(qp, 1);
1209 spin_unlock_irq(&qp->s_lock);
1211 /* all user's cleaned up, mark it available */
1212 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1213 spin_lock(&dev->n_qps_lock);
1214 dev->n_qps_allocated--;
1215 spin_unlock(&dev->n_qps_lock);
1218 kref_put(&qp->ip->ref, qib_release_mmap_info);
1228 * qib_init_qpn_table - initialize the QP number table for a device
1229 * @qpt: the QPN table
1231 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1233 spin_lock_init(&qpt->lock);
1234 qpt->last = 1; /* start with QPN 2 */
1236 qpt->mask = dd->qpn_mask;
1240 * qib_free_qpn_table - free the QP number table for a device
1241 * @qpt: the QPN table
1243 void qib_free_qpn_table(struct qib_qpn_table *qpt)
1247 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1248 if (qpt->map[i].page)
1249 free_page((unsigned long) qpt->map[i].page);
1253 * qib_get_credit - flush the send work queue of a QP
1254 * @qp: the qp who's send work queue to flush
1255 * @aeth: the Acknowledge Extended Transport Header
1257 * The QP s_lock should be held.
1259 void qib_get_credit(struct qib_qp *qp, u32 aeth)
1261 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1264 * If the credit is invalid, we can send
1265 * as many packets as we like. Otherwise, we have to
1266 * honor the credit field.
1268 if (credit == QIB_AETH_CREDIT_INVAL) {
1269 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1270 qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1271 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1272 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1273 qib_schedule_send(qp);
1276 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1277 /* Compute new LSN (i.e., MSN + credit) */
1278 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1279 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1281 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1282 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1283 qib_schedule_send(qp);