2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rdma/ib_mad.h>
49 #include <rdma/ib_user_verbs.h>
51 #include <linux/module.h>
52 #include <linux/utsname.h>
53 #include <linux/rculist.h>
55 #include <linux/random.h>
56 #include <linux/vmalloc.h>
63 #include "verbs_txreq.h"
65 static unsigned int hfi1_lkey_table_size = 16;
66 module_param_named(lkey_table_size, hfi1_lkey_table_size, uint,
68 MODULE_PARM_DESC(lkey_table_size,
69 "LKEY table size in bits (2^n, 1 <= n <= 23)");
71 static unsigned int hfi1_max_pds = 0xFFFF;
72 module_param_named(max_pds, hfi1_max_pds, uint, S_IRUGO);
73 MODULE_PARM_DESC(max_pds,
74 "Maximum number of protection domains to support");
76 static unsigned int hfi1_max_ahs = 0xFFFF;
77 module_param_named(max_ahs, hfi1_max_ahs, uint, S_IRUGO);
78 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
80 unsigned int hfi1_max_cqes = 0x2FFFF;
81 module_param_named(max_cqes, hfi1_max_cqes, uint, S_IRUGO);
82 MODULE_PARM_DESC(max_cqes,
83 "Maximum number of completion queue entries to support");
85 unsigned int hfi1_max_cqs = 0x1FFFF;
86 module_param_named(max_cqs, hfi1_max_cqs, uint, S_IRUGO);
87 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
89 unsigned int hfi1_max_qp_wrs = 0x3FFF;
90 module_param_named(max_qp_wrs, hfi1_max_qp_wrs, uint, S_IRUGO);
91 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
93 unsigned int hfi1_max_qps = 16384;
94 module_param_named(max_qps, hfi1_max_qps, uint, S_IRUGO);
95 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
97 unsigned int hfi1_max_sges = 0x60;
98 module_param_named(max_sges, hfi1_max_sges, uint, S_IRUGO);
99 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
101 unsigned int hfi1_max_mcast_grps = 16384;
102 module_param_named(max_mcast_grps, hfi1_max_mcast_grps, uint, S_IRUGO);
103 MODULE_PARM_DESC(max_mcast_grps,
104 "Maximum number of multicast groups to support");
106 unsigned int hfi1_max_mcast_qp_attached = 16;
107 module_param_named(max_mcast_qp_attached, hfi1_max_mcast_qp_attached,
109 MODULE_PARM_DESC(max_mcast_qp_attached,
110 "Maximum number of attached QPs to support");
112 unsigned int hfi1_max_srqs = 1024;
113 module_param_named(max_srqs, hfi1_max_srqs, uint, S_IRUGO);
114 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
116 unsigned int hfi1_max_srq_sges = 128;
117 module_param_named(max_srq_sges, hfi1_max_srq_sges, uint, S_IRUGO);
118 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
120 unsigned int hfi1_max_srq_wrs = 0x1FFFF;
121 module_param_named(max_srq_wrs, hfi1_max_srq_wrs, uint, S_IRUGO);
122 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
124 unsigned short piothreshold;
125 module_param(piothreshold, ushort, S_IRUGO);
126 MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
128 #define COPY_CACHELESS 1
129 #define COPY_ADAPTIVE 2
130 static unsigned int sge_copy_mode;
131 module_param(sge_copy_mode, uint, S_IRUGO);
132 MODULE_PARM_DESC(sge_copy_mode,
133 "Verbs copy mode: 0 use memcpy, 1 use cacheless copy, 2 adapt based on WSS");
135 static void verbs_sdma_complete(
136 struct sdma_txreq *cookie,
139 static int pio_wait(struct rvt_qp *qp,
140 struct send_context *sc,
141 struct hfi1_pkt_state *ps,
144 /* Length of buffer to create verbs txreq cache name */
145 #define TXREQ_NAME_LEN 24
147 static uint wss_threshold;
148 module_param(wss_threshold, uint, S_IRUGO);
149 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
150 static uint wss_clean_period = 256;
151 module_param(wss_clean_period, uint, S_IRUGO);
152 MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
154 /* memory working set size */
156 unsigned long *entries;
157 atomic_t total_count;
158 atomic_t clean_counter;
159 atomic_t clean_entry;
166 static struct hfi1_wss wss;
168 int hfi1_wss_init(void)
175 /* check for a valid percent range - default to 80 if none or invalid */
176 if (wss_threshold < 1 || wss_threshold > 100)
178 /* reject a wildly large period */
179 if (wss_clean_period > 1000000)
180 wss_clean_period = 256;
181 /* reject a zero period */
182 if (wss_clean_period == 0)
183 wss_clean_period = 1;
186 * Calculate the table size - the next power of 2 larger than the
187 * LLC size. LLC size is in KiB.
189 llc_size = wss_llc_size() * 1024;
190 table_size = roundup_pow_of_two(llc_size);
192 /* one bit per page in rounded up table */
193 llc_bits = llc_size / PAGE_SIZE;
194 table_bits = table_size / PAGE_SIZE;
195 wss.pages_mask = table_bits - 1;
196 wss.num_entries = table_bits / BITS_PER_LONG;
198 wss.threshold = (llc_bits * wss_threshold) / 100;
199 if (wss.threshold == 0)
202 atomic_set(&wss.clean_counter, wss_clean_period);
204 wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries),
214 void hfi1_wss_exit(void)
216 /* coded to handle partially initialized and repeat callers */
222 * Advance the clean counter. When the clean period has expired,
225 * This is implemented in atomics to avoid locking. Because multiple
226 * variables are involved, it can be racy which can lead to slightly
227 * inaccurate information. Since this is only a heuristic, this is
228 * OK. Any innaccuracies will clean themselves out as the counter
229 * advances. That said, it is unlikely the entry clean operation will
230 * race - the next possible racer will not start until the next clean
233 * The clean counter is implemented as a decrement to zero. When zero
234 * is reached an entry is cleaned.
236 static void wss_advance_clean_counter(void)
242 /* become the cleaner if we decrement the counter to zero */
243 if (atomic_dec_and_test(&wss.clean_counter)) {
245 * Set, not add, the clean period. This avoids an issue
246 * where the counter could decrement below the clean period.
247 * Doing a set can result in lost decrements, slowing the
248 * clean advance. Since this a heuristic, this possible
251 * An alternative is to loop, advancing the counter by a
252 * clean period until the result is > 0. However, this could
253 * lead to several threads keeping another in the clean loop.
254 * This could be mitigated by limiting the number of times
255 * we stay in the loop.
257 atomic_set(&wss.clean_counter, wss_clean_period);
260 * Uniquely grab the entry to clean and move to next.
261 * The current entry is always the lower bits of
262 * wss.clean_entry. The table size, wss.num_entries,
263 * is always a power-of-2.
265 entry = (atomic_inc_return(&wss.clean_entry) - 1)
266 & (wss.num_entries - 1);
268 /* clear the entry and count the bits */
269 bits = xchg(&wss.entries[entry], 0);
270 weight = hweight64((u64)bits);
271 /* only adjust the contended total count if needed */
273 atomic_sub(weight, &wss.total_count);
278 * Insert the given address into the working set array.
280 static void wss_insert(void *address)
282 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask;
283 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
284 u32 nr = page & (BITS_PER_LONG - 1);
286 if (!test_and_set_bit(nr, &wss.entries[entry]))
287 atomic_inc(&wss.total_count);
289 wss_advance_clean_counter();
293 * Is the working set larger than the threshold?
295 static inline int wss_exceeds_threshold(void)
297 return atomic_read(&wss.total_count) >= wss.threshold;
301 * Translate ib_wr_opcode into ib_wc_opcode.
303 const enum ib_wc_opcode ib_hfi1_wc_opcode[] = {
304 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
305 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
306 [IB_WR_SEND] = IB_WC_SEND,
307 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
308 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
309 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
310 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
314 * Length of header by opcode, 0 --> not supported
316 const u8 hdr_len_by_opcode[256] = {
318 [IB_OPCODE_RC_SEND_FIRST] = 12 + 8,
319 [IB_OPCODE_RC_SEND_MIDDLE] = 12 + 8,
320 [IB_OPCODE_RC_SEND_LAST] = 12 + 8,
321 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
322 [IB_OPCODE_RC_SEND_ONLY] = 12 + 8,
323 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
324 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
325 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = 12 + 8,
326 [IB_OPCODE_RC_RDMA_WRITE_LAST] = 12 + 8,
327 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
328 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
329 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
330 [IB_OPCODE_RC_RDMA_READ_REQUEST] = 12 + 8 + 16,
331 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = 12 + 8 + 4,
332 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = 12 + 8,
333 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = 12 + 8 + 4,
334 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = 12 + 8 + 4,
335 [IB_OPCODE_RC_ACKNOWLEDGE] = 12 + 8 + 4,
336 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
337 [IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
338 [IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
340 [IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
341 [IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
342 [IB_OPCODE_UC_SEND_LAST] = 12 + 8,
343 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
344 [IB_OPCODE_UC_SEND_ONLY] = 12 + 8,
345 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 4,
346 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = 12 + 8 + 16,
347 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = 12 + 8,
348 [IB_OPCODE_UC_RDMA_WRITE_LAST] = 12 + 8,
349 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = 12 + 8 + 4,
350 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = 12 + 8 + 16,
351 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = 12 + 8 + 20,
353 [IB_OPCODE_UD_SEND_ONLY] = 12 + 8 + 8,
354 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = 12 + 8 + 12
357 static const opcode_handler opcode_handler_tbl[256] = {
359 [IB_OPCODE_RC_SEND_FIRST] = &hfi1_rc_rcv,
360 [IB_OPCODE_RC_SEND_MIDDLE] = &hfi1_rc_rcv,
361 [IB_OPCODE_RC_SEND_LAST] = &hfi1_rc_rcv,
362 [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
363 [IB_OPCODE_RC_SEND_ONLY] = &hfi1_rc_rcv,
364 [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
365 [IB_OPCODE_RC_RDMA_WRITE_FIRST] = &hfi1_rc_rcv,
366 [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = &hfi1_rc_rcv,
367 [IB_OPCODE_RC_RDMA_WRITE_LAST] = &hfi1_rc_rcv,
368 [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_rc_rcv,
369 [IB_OPCODE_RC_RDMA_WRITE_ONLY] = &hfi1_rc_rcv,
370 [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_rc_rcv,
371 [IB_OPCODE_RC_RDMA_READ_REQUEST] = &hfi1_rc_rcv,
372 [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = &hfi1_rc_rcv,
373 [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = &hfi1_rc_rcv,
374 [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = &hfi1_rc_rcv,
375 [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = &hfi1_rc_rcv,
376 [IB_OPCODE_RC_ACKNOWLEDGE] = &hfi1_rc_rcv,
377 [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = &hfi1_rc_rcv,
378 [IB_OPCODE_RC_COMPARE_SWAP] = &hfi1_rc_rcv,
379 [IB_OPCODE_RC_FETCH_ADD] = &hfi1_rc_rcv,
381 [IB_OPCODE_UC_SEND_FIRST] = &hfi1_uc_rcv,
382 [IB_OPCODE_UC_SEND_MIDDLE] = &hfi1_uc_rcv,
383 [IB_OPCODE_UC_SEND_LAST] = &hfi1_uc_rcv,
384 [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
385 [IB_OPCODE_UC_SEND_ONLY] = &hfi1_uc_rcv,
386 [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
387 [IB_OPCODE_UC_RDMA_WRITE_FIRST] = &hfi1_uc_rcv,
388 [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = &hfi1_uc_rcv,
389 [IB_OPCODE_UC_RDMA_WRITE_LAST] = &hfi1_uc_rcv,
390 [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = &hfi1_uc_rcv,
391 [IB_OPCODE_UC_RDMA_WRITE_ONLY] = &hfi1_uc_rcv,
392 [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = &hfi1_uc_rcv,
394 [IB_OPCODE_UD_SEND_ONLY] = &hfi1_ud_rcv,
395 [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = &hfi1_ud_rcv,
397 [IB_OPCODE_CNP] = &hfi1_cnp_rcv
403 __be64 ib_hfi1_sys_image_guid;
406 * hfi1_copy_sge - copy data to SGE memory
408 * @data: the data to copy
409 * @length: the length of the data
410 * @copy_last: do a separate copy of the last 8 bytes
413 struct rvt_sge_state *ss,
414 void *data, u32 length,
418 struct rvt_sge *sge = &ss->sge;
421 int cacheless_copy = 0;
423 if (sge_copy_mode == COPY_CACHELESS) {
424 cacheless_copy = length >= PAGE_SIZE;
425 } else if (sge_copy_mode == COPY_ADAPTIVE) {
426 if (length >= PAGE_SIZE) {
428 * NOTE: this *assumes*:
429 * o The first vaddr is the dest.
430 * o If multiple pages, then vaddr is sequential.
432 wss_insert(sge->vaddr);
433 if (length >= (2 * PAGE_SIZE))
434 wss_insert(sge->vaddr + PAGE_SIZE);
436 cacheless_copy = wss_exceeds_threshold();
438 wss_advance_clean_counter();
452 u32 len = sge->length;
456 if (len > sge->sge_length)
457 len = sge->sge_length;
458 WARN_ON_ONCE(len == 0);
459 if (unlikely(in_last)) {
460 /* enforce byte transfer ordering */
461 for (i = 0; i < len; i++)
462 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
463 } else if (cacheless_copy) {
464 cacheless_memcpy(sge->vaddr, data, len);
466 memcpy(sge->vaddr, data, len);
470 sge->sge_length -= len;
471 if (sge->sge_length == 0) {
475 *sge = *ss->sg_list++;
476 } else if (sge->length == 0 && sge->mr->lkey) {
477 if (++sge->n >= RVT_SEGSZ) {
478 if (++sge->m >= sge->mr->mapsz)
483 sge->mr->map[sge->m]->segs[sge->n].vaddr;
485 sge->mr->map[sge->m]->segs[sge->n].length;
500 * hfi1_skip_sge - skip over SGE memory
502 * @length: the number of bytes to skip
504 void hfi1_skip_sge(struct rvt_sge_state *ss, u32 length, int release)
506 struct rvt_sge *sge = &ss->sge;
509 u32 len = sge->length;
513 if (len > sge->sge_length)
514 len = sge->sge_length;
515 WARN_ON_ONCE(len == 0);
518 sge->sge_length -= len;
519 if (sge->sge_length == 0) {
523 *sge = *ss->sg_list++;
524 } else if (sge->length == 0 && sge->mr->lkey) {
525 if (++sge->n >= RVT_SEGSZ) {
526 if (++sge->m >= sge->mr->mapsz)
531 sge->mr->map[sge->m]->segs[sge->n].vaddr;
533 sge->mr->map[sge->m]->segs[sge->n].length;
540 * Make sure the QP is ready and able to accept the given opcode.
542 static inline int qp_ok(int opcode, struct hfi1_packet *packet)
544 struct hfi1_ibport *ibp;
546 if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
548 if (((opcode & OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
549 (opcode == IB_OPCODE_CNP))
552 ibp = &packet->rcd->ppd->ibport_data;
553 ibp->rvp.n_pkt_drops++;
558 * hfi1_ib_rcv - process an incoming packet
559 * @packet: data packet information
561 * This is called to process an incoming packet at interrupt level.
563 * Tlen is the length of the header + data + CRC in bytes.
565 void hfi1_ib_rcv(struct hfi1_packet *packet)
567 struct hfi1_ctxtdata *rcd = packet->rcd;
568 struct hfi1_ib_header *hdr = packet->hdr;
569 u32 tlen = packet->tlen;
570 struct hfi1_pportdata *ppd = rcd->ppd;
571 struct hfi1_ibport *ibp = &ppd->ibport_data;
572 struct rvt_dev_info *rdi = &ppd->dd->verbs_dev.rdi;
580 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
581 if (lnh == HFI1_LRH_BTH) {
582 packet->ohdr = &hdr->u.oth;
583 } else if (lnh == HFI1_LRH_GRH) {
586 packet->ohdr = &hdr->u.l.oth;
587 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
589 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
590 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
592 packet->rcv_flags |= HFI1_HAS_GRH;
597 trace_input_ibhdr(rcd->dd, hdr);
599 opcode = (be32_to_cpu(packet->ohdr->bth[0]) >> 24);
600 inc_opstats(tlen, &rcd->opstats->stats[opcode]);
602 /* Get the destination QP number. */
603 qp_num = be32_to_cpu(packet->ohdr->bth[1]) & RVT_QPN_MASK;
604 lid = be16_to_cpu(hdr->lrh[1]);
605 if (unlikely((lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
606 (lid != be16_to_cpu(IB_LID_PERMISSIVE)))) {
607 struct rvt_mcast *mcast;
608 struct rvt_mcast_qp *p;
610 if (lnh != HFI1_LRH_GRH)
612 mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid);
615 list_for_each_entry_rcu(p, &mcast->qp_list, list) {
617 spin_lock_irqsave(&packet->qp->r_lock, flags);
618 if (likely((qp_ok(opcode, packet))))
619 opcode_handler_tbl[opcode](packet);
620 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
623 * Notify rvt_multicast_detach() if it is waiting for us
626 if (atomic_dec_return(&mcast->refcount) <= 1)
627 wake_up(&mcast->wait);
630 packet->qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num);
635 spin_lock_irqsave(&packet->qp->r_lock, flags);
636 if (likely((qp_ok(opcode, packet))))
637 opcode_handler_tbl[opcode](packet);
638 spin_unlock_irqrestore(&packet->qp->r_lock, flags);
644 ibp->rvp.n_pkt_drops++;
648 * This is called from a timer to check for QPs
649 * which need kernel memory in order to send a packet.
651 static void mem_timer(unsigned long data)
653 struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
654 struct list_head *list = &dev->memwait;
655 struct rvt_qp *qp = NULL;
658 struct hfi1_qp_priv *priv;
660 write_seqlock_irqsave(&dev->iowait_lock, flags);
661 if (!list_empty(list)) {
662 wait = list_first_entry(list, struct iowait, list);
663 qp = iowait_to_qp(wait);
665 list_del_init(&priv->s_iowait.list);
666 /* refcount held until actual wake up */
667 if (!list_empty(list))
668 mod_timer(&dev->mem_timer, jiffies + 1);
670 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
673 hfi1_qp_wakeup(qp, RVT_S_WAIT_KMEM);
676 void update_sge(struct rvt_sge_state *ss, u32 length)
678 struct rvt_sge *sge = &ss->sge;
680 sge->vaddr += length;
681 sge->length -= length;
682 sge->sge_length -= length;
683 if (sge->sge_length == 0) {
685 *sge = *ss->sg_list++;
686 } else if (sge->length == 0 && sge->mr->lkey) {
687 if (++sge->n >= RVT_SEGSZ) {
688 if (++sge->m >= sge->mr->mapsz)
692 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
693 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
698 * This is called with progress side lock held.
701 static void verbs_sdma_complete(
702 struct sdma_txreq *cookie,
705 struct verbs_txreq *tx =
706 container_of(cookie, struct verbs_txreq, txreq);
707 struct rvt_qp *qp = tx->qp;
709 spin_lock(&qp->s_lock);
711 hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
712 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
713 struct hfi1_ib_header *hdr;
716 hfi1_rc_send_complete(qp, hdr);
718 spin_unlock(&qp->s_lock);
723 static int wait_kmem(struct hfi1_ibdev *dev,
725 struct hfi1_pkt_state *ps)
727 struct hfi1_qp_priv *priv = qp->priv;
731 spin_lock_irqsave(&qp->s_lock, flags);
732 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
733 write_seqlock(&dev->iowait_lock);
734 list_add_tail(&ps->s_txreq->txreq.list,
735 &priv->s_iowait.tx_head);
736 if (list_empty(&priv->s_iowait.list)) {
737 if (list_empty(&dev->memwait))
738 mod_timer(&dev->mem_timer, jiffies + 1);
739 qp->s_flags |= RVT_S_WAIT_KMEM;
740 list_add_tail(&priv->s_iowait.list, &dev->memwait);
741 trace_hfi1_qpsleep(qp, RVT_S_WAIT_KMEM);
742 atomic_inc(&qp->refcount);
744 write_sequnlock(&dev->iowait_lock);
745 qp->s_flags &= ~RVT_S_BUSY;
748 spin_unlock_irqrestore(&qp->s_lock, flags);
754 * This routine calls txadds for each sg entry.
756 * Add failures will revert the sge cursor
758 static noinline int build_verbs_ulp_payload(
759 struct sdma_engine *sde,
760 struct rvt_sge_state *ss,
762 struct verbs_txreq *tx)
764 struct rvt_sge *sg_list = ss->sg_list;
765 struct rvt_sge sge = ss->sge;
766 u8 num_sge = ss->num_sge;
771 len = ss->sge.length;
774 if (len > ss->sge.sge_length)
775 len = ss->sge.sge_length;
776 WARN_ON_ONCE(len == 0);
777 ret = sdma_txadd_kvaddr(
791 ss->num_sge = num_sge;
792 ss->sg_list = sg_list;
797 * Build the number of DMA descriptors needed to send length bytes of data.
799 * NOTE: DMA mapping is held in the tx until completed in the ring or
800 * the tx desc is freed without having been submitted to the ring
802 * This routine ensures all the helper routine calls succeed.
805 static int build_verbs_tx_desc(
806 struct sdma_engine *sde,
807 struct rvt_sge_state *ss,
809 struct verbs_txreq *tx,
810 struct ahg_ib_header *ahdr,
814 struct hfi1_pio_header *phdr = &tx->phdr;
815 u16 hdrbytes = tx->hdr_dwords << 2;
817 if (!ahdr->ahgcount) {
818 ret = sdma_txinit_ahg(
826 verbs_sdma_complete);
829 phdr->pbc = cpu_to_le64(pbc);
830 ret = sdma_txadd_kvaddr(
838 ret = sdma_txinit_ahg(
846 verbs_sdma_complete);
851 /* add the ulp payload - if any. ss can be NULL for acks */
853 ret = build_verbs_ulp_payload(sde, ss, length, tx);
858 int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
861 struct hfi1_qp_priv *priv = qp->priv;
862 struct ahg_ib_header *ahdr = priv->s_hdr;
863 u32 hdrwords = qp->s_hdrwords;
864 struct rvt_sge_state *ss = qp->s_cur_sge;
865 u32 len = qp->s_cur_size;
866 u32 plen = hdrwords + ((len + 3) >> 2) + 2; /* includes pbc */
867 struct hfi1_ibdev *dev = ps->dev;
868 struct hfi1_pportdata *ppd = ps->ppd;
869 struct verbs_txreq *tx;
876 if (!sdma_txreq_built(&tx->txreq)) {
877 if (likely(pbc == 0)) {
878 u32 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
880 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
881 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
883 pbc = create_pbc(ppd,
890 ret = build_verbs_tx_desc(tx->sde, ss, len, tx, ahdr, pbc);
894 ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq);
895 if (unlikely(ret < 0)) {
900 trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
901 &ps->s_txreq->phdr.hdr);
905 /* The current one got "sent" */
908 ret = wait_kmem(dev, qp, ps);
910 /* free txreq - bad state */
911 hfi1_put_txreq(ps->s_txreq);
918 * If we are now in the error state, return zero to flush the
921 static int pio_wait(struct rvt_qp *qp,
922 struct send_context *sc,
923 struct hfi1_pkt_state *ps,
926 struct hfi1_qp_priv *priv = qp->priv;
927 struct hfi1_devdata *dd = sc->dd;
928 struct hfi1_ibdev *dev = &dd->verbs_dev;
933 * Note that as soon as want_buffer() is called and
934 * possibly before it returns, sc_piobufavail()
935 * could be called. Therefore, put QP on the I/O wait list before
936 * enabling the PIO avail interrupt.
938 spin_lock_irqsave(&qp->s_lock, flags);
939 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
940 write_seqlock(&dev->iowait_lock);
941 list_add_tail(&ps->s_txreq->txreq.list,
942 &priv->s_iowait.tx_head);
943 if (list_empty(&priv->s_iowait.list)) {
944 struct hfi1_ibdev *dev = &dd->verbs_dev;
947 dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
948 dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
951 was_empty = list_empty(&sc->piowait);
952 list_add_tail(&priv->s_iowait.list, &sc->piowait);
953 trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
954 atomic_inc(&qp->refcount);
955 /* counting: only call wantpiobuf_intr if first user */
957 hfi1_sc_wantpiobuf_intr(sc, 1);
959 write_sequnlock(&dev->iowait_lock);
960 qp->s_flags &= ~RVT_S_BUSY;
963 spin_unlock_irqrestore(&qp->s_lock, flags);
967 static void verbs_pio_complete(void *arg, int code)
969 struct rvt_qp *qp = (struct rvt_qp *)arg;
970 struct hfi1_qp_priv *priv = qp->priv;
972 if (iowait_pio_dec(&priv->s_iowait))
973 iowait_drain_wakeup(&priv->s_iowait);
976 int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
979 struct hfi1_qp_priv *priv = qp->priv;
980 u32 hdrwords = qp->s_hdrwords;
981 struct rvt_sge_state *ss = qp->s_cur_sge;
982 u32 len = qp->s_cur_size;
983 u32 dwords = (len + 3) >> 2;
984 u32 plen = hdrwords + dwords + 2; /* includes pbc */
985 struct hfi1_pportdata *ppd = ps->ppd;
986 u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
989 unsigned long flags = 0;
990 struct send_context *sc;
991 struct pio_buf *pbuf;
992 int wc_status = IB_WC_SUCCESS;
994 pio_release_cb cb = NULL;
996 /* only RC/UC use complete */
997 switch (qp->ibqp.qp_type) {
1000 cb = verbs_pio_complete;
1006 /* vl15 special case taken care of in ud.c */
1008 sc = ps->s_txreq->psc;
1010 if (likely(pbc == 0)) {
1011 u8 vl = sc_to_vlt(dd_from_ibdev(qp->ibqp.device), sc5);
1012 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
1013 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
1014 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
1017 iowait_pio_inc(&priv->s_iowait);
1018 pbuf = sc_buffer_alloc(sc, plen, cb, qp);
1019 if (unlikely(!pbuf)) {
1021 verbs_pio_complete(qp, 0);
1022 if (ppd->host_link_state != HLS_UP_ACTIVE) {
1024 * If we have filled the PIO buffers to capacity and are
1025 * not in an active state this request is not going to
1026 * go out to so just complete it with an error or else a
1027 * ULP or the core may be stuck waiting.
1031 "alloc failed. state not active, completing");
1032 wc_status = IB_WC_GENERAL_ERR;
1036 * This is a normal occurrence. The PIO buffs are full
1037 * up but we are still happily sending, well we could be
1038 * so lets continue to queue the request.
1040 hfi1_cdbg(PIO, "alloc failed. state active, queuing");
1041 ret = pio_wait(qp, sc, ps, RVT_S_WAIT_PIO);
1043 /* txreq not queued - free */
1045 /* tx consumed in wait */
1051 pio_copy(ppd->dd, pbuf, pbc, hdr, hdrwords);
1054 seg_pio_copy_start(pbuf, pbc, hdr, hdrwords * 4);
1056 void *addr = ss->sge.vaddr;
1057 u32 slen = ss->sge.length;
1061 update_sge(ss, slen);
1062 seg_pio_copy_mid(pbuf, addr, slen);
1065 seg_pio_copy_end(pbuf);
1069 trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
1070 &ps->s_txreq->phdr.hdr);
1074 spin_lock_irqsave(&qp->s_lock, flags);
1075 hfi1_send_complete(qp, qp->s_wqe, wc_status);
1076 spin_unlock_irqrestore(&qp->s_lock, flags);
1077 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1078 spin_lock_irqsave(&qp->s_lock, flags);
1079 hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
1080 spin_unlock_irqrestore(&qp->s_lock, flags);
1086 hfi1_put_txreq(ps->s_txreq);
1091 * egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1092 * being an entry from the ingress partition key table), return 0
1093 * otherwise. Use the matching criteria for egress partition keys
1094 * specified in the OPAv1 spec., section 9.1l.7.
1096 static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
1098 u16 mkey = pkey & PKEY_LOW_15_MASK;
1099 u16 ment = ent & PKEY_LOW_15_MASK;
1103 * If pkey[15] is set (full partition member),
1104 * is bit 15 in the corresponding table element
1105 * clear (limited member)?
1107 if (pkey & PKEY_MEMBER_MASK)
1108 return !!(ent & PKEY_MEMBER_MASK);
1115 * egress_pkey_check - return 0 if hdr's pkey matches according to the
1116 * criteria in the OPAv1 spec., section 9.11.7.
1118 static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
1119 struct hfi1_ib_header *hdr,
1122 struct hfi1_qp_priv *priv = qp->priv;
1123 struct hfi1_other_headers *ohdr;
1124 struct hfi1_devdata *dd;
1127 u8 lnh, sc5 = priv->s_sc;
1129 if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
1132 /* locate the pkey within the headers */
1133 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
1134 if (lnh == HFI1_LRH_GRH)
1135 ohdr = &hdr->u.l.oth;
1139 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
1141 /* If SC15, pkey[0:14] must be 0x7fff */
1142 if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1145 /* Is the pkey = 0x0, or 0x8000? */
1146 if ((pkey & PKEY_LOW_15_MASK) == 0)
1149 /* The most likely matching pkey has index qp->s_pkey_index */
1150 if (unlikely(!egress_pkey_matches_entry(pkey,
1152 [qp->s_pkey_index]))) {
1153 /* no match - try the entire table */
1154 for (; i < MAX_PKEY_VALUES; i++) {
1155 if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1160 if (i < MAX_PKEY_VALUES)
1163 incr_cntr64(&ppd->port_xmit_constraint_errors);
1165 if (!(dd->err_info_xmit_constraint.status & OPA_EI_STATUS_SMASK)) {
1166 u16 slid = be16_to_cpu(hdr->lrh[3]);
1168 dd->err_info_xmit_constraint.status |= OPA_EI_STATUS_SMASK;
1169 dd->err_info_xmit_constraint.slid = slid;
1170 dd->err_info_xmit_constraint.pkey = pkey;
1176 * get_send_routine - choose an egress routine
1178 * Choose an egress routine based on QP type
1181 static inline send_routine get_send_routine(struct rvt_qp *qp,
1182 struct hfi1_ib_header *h)
1184 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1185 struct hfi1_qp_priv *priv = qp->priv;
1187 if (unlikely(!(dd->flags & HFI1_HAS_SEND_DMA)))
1188 return dd->process_pio_send;
1189 switch (qp->ibqp.qp_type) {
1191 return dd->process_pio_send;
1194 if (piothreshold && qp->s_cur_size <= piothreshold)
1195 return dd->process_pio_send;
1199 qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
1200 (BIT(get_opcode(h) & 0x1f) & rc_only_opcode) &&
1201 iowait_sdma_pending(&priv->s_iowait) == 0)
1202 return dd->process_pio_send;
1206 qp->s_cur_size <= min(piothreshold, qp->pmtu) &&
1207 (BIT(get_opcode(h) & 0x1f) & uc_only_opcode) &&
1208 iowait_sdma_pending(&priv->s_iowait) == 0)
1209 return dd->process_pio_send;
1214 return dd->process_dma_send;
1218 * hfi1_verbs_send - send a packet
1219 * @qp: the QP to send on
1220 * @ps: the state of the packet to send
1222 * Return zero if packet is sent or queued OK.
1223 * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise.
1225 int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
1227 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1231 sr = get_send_routine(qp, &ps->s_txreq->phdr.hdr);
1232 ret = egress_pkey_check(dd->pport, &ps->s_txreq->phdr.hdr, qp);
1233 if (unlikely(ret)) {
1235 * The value we are returning here does not get propagated to
1236 * the verbs caller. Thus we need to complete the request with
1237 * error otherwise the caller could be sitting waiting on the
1238 * completion event. Only do this for PIO. SDMA has its own
1239 * mechanism for handling the errors. So for SDMA we can just
1242 if (sr == dd->process_pio_send) {
1243 unsigned long flags;
1245 hfi1_cdbg(PIO, "%s() Failed. Completing with err",
1247 spin_lock_irqsave(&qp->s_lock, flags);
1248 hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
1249 spin_unlock_irqrestore(&qp->s_lock, flags);
1253 return sr(qp, ps, 0);
1257 * hfi1_fill_device_attr - Fill in rvt dev info device attributes.
1258 * @dd: the device data structure
1260 static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
1262 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
1264 memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props));
1266 rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1267 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1268 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1269 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1270 rdi->dparms.props.page_size_cap = PAGE_SIZE;
1271 rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3;
1272 rdi->dparms.props.vendor_part_id = dd->pcidev->device;
1273 rdi->dparms.props.hw_ver = dd->minrev;
1274 rdi->dparms.props.sys_image_guid = ib_hfi1_sys_image_guid;
1275 rdi->dparms.props.max_mr_size = ~0ULL;
1276 rdi->dparms.props.max_qp = hfi1_max_qps;
1277 rdi->dparms.props.max_qp_wr = hfi1_max_qp_wrs;
1278 rdi->dparms.props.max_sge = hfi1_max_sges;
1279 rdi->dparms.props.max_sge_rd = hfi1_max_sges;
1280 rdi->dparms.props.max_cq = hfi1_max_cqs;
1281 rdi->dparms.props.max_ah = hfi1_max_ahs;
1282 rdi->dparms.props.max_cqe = hfi1_max_cqes;
1283 rdi->dparms.props.max_mr = rdi->lkey_table.max;
1284 rdi->dparms.props.max_fmr = rdi->lkey_table.max;
1285 rdi->dparms.props.max_map_per_fmr = 32767;
1286 rdi->dparms.props.max_pd = hfi1_max_pds;
1287 rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
1288 rdi->dparms.props.max_qp_init_rd_atom = 255;
1289 rdi->dparms.props.max_srq = hfi1_max_srqs;
1290 rdi->dparms.props.max_srq_wr = hfi1_max_srq_wrs;
1291 rdi->dparms.props.max_srq_sge = hfi1_max_srq_sges;
1292 rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB;
1293 rdi->dparms.props.max_pkeys = hfi1_get_npkeys(dd);
1294 rdi->dparms.props.max_mcast_grp = hfi1_max_mcast_grps;
1295 rdi->dparms.props.max_mcast_qp_attach = hfi1_max_mcast_qp_attached;
1296 rdi->dparms.props.max_total_mcast_qp_attach =
1297 rdi->dparms.props.max_mcast_qp_attach *
1298 rdi->dparms.props.max_mcast_grp;
1301 static inline u16 opa_speed_to_ib(u16 in)
1305 if (in & OPA_LINK_SPEED_25G)
1306 out |= IB_SPEED_EDR;
1307 if (in & OPA_LINK_SPEED_12_5G)
1308 out |= IB_SPEED_FDR;
1314 * Convert a single OPA link width (no multiple flags) to an IB value.
1315 * A zero OPA link width means link down, which means the IB width value
1318 static inline u16 opa_width_to_ib(u16 in)
1321 case OPA_LINK_WIDTH_1X:
1322 /* map 2x and 3x to 1x as they don't exist in IB */
1323 case OPA_LINK_WIDTH_2X:
1324 case OPA_LINK_WIDTH_3X:
1326 default: /* link down or unknown, return our largest width */
1327 case OPA_LINK_WIDTH_4X:
1332 static int query_port(struct rvt_dev_info *rdi, u8 port_num,
1333 struct ib_port_attr *props)
1335 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1336 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1337 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1340 props->lid = lid ? lid : 0;
1341 props->lmc = ppd->lmc;
1342 /* OPA logical states match IB logical states */
1343 props->state = driver_lstate(ppd);
1344 props->phys_state = hfi1_ibphys_portstate(ppd);
1345 props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
1346 props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
1347 /* see rate_show() in ib core/sysfs.c */
1348 props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
1349 props->max_vl_num = ppd->vls_supported;
1351 /* Once we are a "first class" citizen and have added the OPA MTUs to
1352 * the core we can advertise the larger MTU enum to the ULPs, for now
1353 * advertise only 4K.
1355 * Those applications which are either OPA aware or pass the MTU enum
1356 * from the Path Records to us will get the new 8k MTU. Those that
1357 * attempt to process the MTU enum may fail in various ways.
1359 props->max_mtu = mtu_to_enum((!valid_ib_mtu(hfi1_max_mtu) ?
1360 4096 : hfi1_max_mtu), IB_MTU_4096);
1361 props->active_mtu = !valid_ib_mtu(ppd->ibmtu) ? props->max_mtu :
1362 mtu_to_enum(ppd->ibmtu, IB_MTU_2048);
1367 static int modify_device(struct ib_device *device,
1368 int device_modify_mask,
1369 struct ib_device_modify *device_modify)
1371 struct hfi1_devdata *dd = dd_from_ibdev(device);
1375 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1376 IB_DEVICE_MODIFY_NODE_DESC)) {
1381 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1382 memcpy(device->node_desc, device_modify->node_desc, 64);
1383 for (i = 0; i < dd->num_pports; i++) {
1384 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1386 hfi1_node_desc_chg(ibp);
1390 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1391 ib_hfi1_sys_image_guid =
1392 cpu_to_be64(device_modify->sys_image_guid);
1393 for (i = 0; i < dd->num_pports; i++) {
1394 struct hfi1_ibport *ibp = &dd->pport[i].ibport_data;
1396 hfi1_sys_guid_chg(ibp);
1406 static int shut_down_port(struct rvt_dev_info *rdi, u8 port_num)
1408 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
1409 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
1410 struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
1413 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
1414 OPA_LINKDOWN_REASON_UNKNOWN);
1415 ret = set_link_state(ppd, HLS_DN_DOWNDEF);
1419 static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
1420 int guid_index, __be64 *guid)
1422 struct hfi1_ibport *ibp = container_of(rvp, struct hfi1_ibport, rvp);
1423 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1425 if (guid_index == 0)
1426 *guid = cpu_to_be64(ppd->guid);
1427 else if (guid_index < HFI1_GUIDS_PER_PORT)
1428 *guid = ibp->guids[guid_index - 1];
1436 * convert ah port,sl to sc
1438 u8 ah_to_sc(struct ib_device *ibdev, struct ib_ah_attr *ah)
1440 struct hfi1_ibport *ibp = to_iport(ibdev, ah->port_num);
1442 return ibp->sl_to_sc[ah->sl];
1445 static int hfi1_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1447 struct hfi1_ibport *ibp;
1448 struct hfi1_pportdata *ppd;
1449 struct hfi1_devdata *dd;
1452 /* test the mapping for validity */
1453 ibp = to_iport(ibdev, ah_attr->port_num);
1454 ppd = ppd_from_ibp(ibp);
1455 sc5 = ibp->sl_to_sc[ah_attr->sl];
1456 dd = dd_from_ppd(ppd);
1457 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
1462 static void hfi1_notify_new_ah(struct ib_device *ibdev,
1463 struct ib_ah_attr *ah_attr,
1466 struct hfi1_ibport *ibp;
1467 struct hfi1_pportdata *ppd;
1468 struct hfi1_devdata *dd;
1472 * Do not trust reading anything from rvt_ah at this point as it is not
1473 * done being setup. We can however modify things which we need to set.
1476 ibp = to_iport(ibdev, ah_attr->port_num);
1477 ppd = ppd_from_ibp(ibp);
1478 sc5 = ibp->sl_to_sc[ah->attr.sl];
1479 dd = dd_from_ppd(ppd);
1480 ah->vl = sc_to_vlt(dd, sc5);
1481 if (ah->vl < num_vls || ah->vl == 15)
1482 ah->log_pmtu = ilog2(dd->vld[ah->vl].mtu);
1485 struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u16 dlid)
1487 struct ib_ah_attr attr;
1488 struct ib_ah *ah = ERR_PTR(-EINVAL);
1491 memset(&attr, 0, sizeof(attr));
1493 attr.port_num = ppd_from_ibp(ibp)->port;
1495 qp0 = rcu_dereference(ibp->rvp.qp[0]);
1497 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1503 * hfi1_get_npkeys - return the size of the PKEY table for context 0
1504 * @dd: the hfi1_ib device
1506 unsigned hfi1_get_npkeys(struct hfi1_devdata *dd)
1508 return ARRAY_SIZE(dd->pport[0].pkeys);
1511 static void init_ibport(struct hfi1_pportdata *ppd)
1513 struct hfi1_ibport *ibp = &ppd->ibport_data;
1514 size_t sz = ARRAY_SIZE(ibp->sl_to_sc);
1517 for (i = 0; i < sz; i++) {
1518 ibp->sl_to_sc[i] = i;
1519 ibp->sc_to_sl[i] = i;
1522 spin_lock_init(&ibp->rvp.lock);
1523 /* Set the prefix to the default value (see ch. 4.1.1) */
1524 ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX;
1525 ibp->rvp.sm_lid = 0;
1526 /* Below should only set bits defined in OPA PortInfo.CapabilityMask */
1527 ibp->rvp.port_cap_flags = IB_PORT_AUTO_MIGR_SUP |
1528 IB_PORT_CAP_MASK_NOTICE_SUP;
1529 ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
1530 ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1531 ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1532 ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1533 ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1535 RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
1536 RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
1540 * hfi1_register_ib_device - register our device with the infiniband core
1541 * @dd: the device data structure
1542 * Return 0 if successful, errno if unsuccessful.
1544 int hfi1_register_ib_device(struct hfi1_devdata *dd)
1546 struct hfi1_ibdev *dev = &dd->verbs_dev;
1547 struct ib_device *ibdev = &dev->rdi.ibdev;
1548 struct hfi1_pportdata *ppd = dd->pport;
1551 size_t lcpysz = IB_DEVICE_NAME_MAX;
1553 for (i = 0; i < dd->num_pports; i++)
1554 init_ibport(ppd + i);
1556 /* Only need to initialize non-zero fields. */
1558 setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
1560 seqlock_init(&dev->iowait_lock);
1561 INIT_LIST_HEAD(&dev->txwait);
1562 INIT_LIST_HEAD(&dev->memwait);
1564 ret = verbs_txreq_init(dev);
1566 goto err_verbs_txreq;
1569 * The system image GUID is supposed to be the same for all
1570 * HFIs in a single system but since there can be other
1571 * device types in the system, we can't be sure this is unique.
1573 if (!ib_hfi1_sys_image_guid)
1574 ib_hfi1_sys_image_guid = cpu_to_be64(ppd->guid);
1575 lcpysz = strlcpy(ibdev->name, class_name(), lcpysz);
1576 strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz);
1577 ibdev->owner = THIS_MODULE;
1578 ibdev->node_guid = cpu_to_be64(ppd->guid);
1579 ibdev->phys_port_cnt = dd->num_pports;
1580 ibdev->dma_device = &dd->pcidev->dev;
1581 ibdev->modify_device = modify_device;
1583 /* keep process mad in the driver */
1584 ibdev->process_mad = hfi1_process_mad;
1586 strncpy(ibdev->node_desc, init_utsname()->nodename,
1587 sizeof(ibdev->node_desc));
1590 * Fill in rvt info object.
1592 dd->verbs_dev.rdi.driver_f.port_callback = hfi1_create_port_files;
1593 dd->verbs_dev.rdi.driver_f.get_card_name = get_card_name;
1594 dd->verbs_dev.rdi.driver_f.get_pci_dev = get_pci_dev;
1595 dd->verbs_dev.rdi.driver_f.check_ah = hfi1_check_ah;
1596 dd->verbs_dev.rdi.driver_f.notify_new_ah = hfi1_notify_new_ah;
1597 dd->verbs_dev.rdi.driver_f.get_guid_be = hfi1_get_guid_be;
1598 dd->verbs_dev.rdi.driver_f.query_port_state = query_port;
1599 dd->verbs_dev.rdi.driver_f.shut_down_port = shut_down_port;
1600 dd->verbs_dev.rdi.driver_f.cap_mask_chg = hfi1_cap_mask_chg;
1602 * Fill in rvt info device attributes.
1604 hfi1_fill_device_attr(dd);
1607 dd->verbs_dev.rdi.dparms.qp_table_size = hfi1_qp_table_size;
1608 dd->verbs_dev.rdi.dparms.qpn_start = 0;
1609 dd->verbs_dev.rdi.dparms.qpn_inc = 1;
1610 dd->verbs_dev.rdi.dparms.qos_shift = dd->qos_shift;
1611 dd->verbs_dev.rdi.dparms.qpn_res_start = kdeth_qp << 16;
1612 dd->verbs_dev.rdi.dparms.qpn_res_end =
1613 dd->verbs_dev.rdi.dparms.qpn_res_start + 65535;
1614 dd->verbs_dev.rdi.dparms.max_rdma_atomic = HFI1_MAX_RDMA_ATOMIC;
1615 dd->verbs_dev.rdi.dparms.psn_mask = PSN_MASK;
1616 dd->verbs_dev.rdi.dparms.psn_shift = PSN_SHIFT;
1617 dd->verbs_dev.rdi.dparms.psn_modify_mask = PSN_MODIFY_MASK;
1618 dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_INTEL_OPA;
1619 dd->verbs_dev.rdi.dparms.max_mad_size = OPA_MGMT_MAD_SIZE;
1621 dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qp_priv_alloc;
1622 dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
1623 dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
1624 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
1625 dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send;
1626 dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
1627 dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
1628 dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
1629 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1630 dd->verbs_dev.rdi.driver_f.flush_qp_waiters = flush_qp_waiters;
1631 dd->verbs_dev.rdi.driver_f.stop_send_queue = stop_send_queue;
1632 dd->verbs_dev.rdi.driver_f.quiesce_qp = quiesce_qp;
1633 dd->verbs_dev.rdi.driver_f.notify_error_qp = notify_error_qp;
1634 dd->verbs_dev.rdi.driver_f.mtu_from_qp = mtu_from_qp;
1635 dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = mtu_to_path_mtu;
1636 dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
1637 dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
1638 dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
1640 /* completeion queue */
1641 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
1642 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
1643 "hfi1_cq%d", dd->unit);
1644 dd->verbs_dev.rdi.dparms.node = dd->node;
1647 dd->verbs_dev.rdi.flags = 0; /* Let rdmavt handle it all */
1648 dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
1649 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
1650 dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
1653 for (i = 0; i < dd->num_pports; i++, ppd++)
1654 rvt_init_port(&dd->verbs_dev.rdi,
1655 &ppd->ibport_data.rvp,
1659 ret = rvt_register_device(&dd->verbs_dev.rdi);
1661 goto err_verbs_txreq;
1663 ret = hfi1_verbs_register_sysfs(dd);
1670 rvt_unregister_device(&dd->verbs_dev.rdi);
1672 verbs_txreq_exit(dev);
1673 dd_dev_err(dd, "cannot register verbs: %d!\n", -ret);
1677 void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
1679 struct hfi1_ibdev *dev = &dd->verbs_dev;
1681 hfi1_verbs_unregister_sysfs(dd);
1683 rvt_unregister_device(&dd->verbs_dev.rdi);
1685 if (!list_empty(&dev->txwait))
1686 dd_dev_err(dd, "txwait list not empty!\n");
1687 if (!list_empty(&dev->memwait))
1688 dd_dev_err(dd, "memwait list not empty!\n");
1690 del_timer_sync(&dev->mem_timer);
1691 verbs_txreq_exit(dev);
1694 void hfi1_cnp_rcv(struct hfi1_packet *packet)
1696 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
1697 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1698 struct hfi1_ib_header *hdr = packet->hdr;
1699 struct rvt_qp *qp = packet->qp;
1702 u8 sl, sc5, sc4_bit, svc_type;
1703 bool sc4_set = has_sc4_bit(packet);
1705 switch (packet->qp->ibqp.qp_type) {
1707 rlid = qp->remote_ah_attr.dlid;
1708 rqpn = qp->remote_qpn;
1709 svc_type = IB_CC_SVCTYPE_UC;
1712 rlid = qp->remote_ah_attr.dlid;
1713 rqpn = qp->remote_qpn;
1714 svc_type = IB_CC_SVCTYPE_RC;
1719 svc_type = IB_CC_SVCTYPE_UD;
1722 ibp->rvp.n_pkt_drops++;
1726 sc4_bit = sc4_set << 4;
1727 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
1729 sl = ibp->sc_to_sl[sc5];
1730 lqpn = qp->ibqp.qp_num;
1732 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);