3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/spinlock.h>
52 #include <linux/pci.h>
54 #include <linux/delay.h>
55 #include <linux/netdevice.h>
56 #include <linux/vmalloc.h>
57 #include <linux/module.h>
58 #include <linux/prefetch.h>
66 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
69 * The size has to be longer than this string, so we can append
70 * board/chip information to it in the initialization code.
72 const char ib_hfi1_version[] = HFI1_DRIVER_VERSION "\n";
74 DEFINE_SPINLOCK(hfi1_devs_lock);
75 LIST_HEAD(hfi1_dev_list);
76 DEFINE_MUTEX(hfi1_mutex); /* general driver use */
78 unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
79 module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
80 MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is 8192");
82 unsigned int hfi1_cu = 1;
83 module_param_named(cu, hfi1_cu, uint, S_IRUGO);
84 MODULE_PARM_DESC(cu, "Credit return units");
86 unsigned long hfi1_cap_mask = HFI1_CAP_MASK_DEFAULT;
87 static int hfi1_caps_set(const char *, const struct kernel_param *);
88 static int hfi1_caps_get(char *, const struct kernel_param *);
89 static const struct kernel_param_ops cap_ops = {
93 module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
94 MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
96 MODULE_LICENSE("Dual BSD/GPL");
97 MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
98 MODULE_VERSION(HFI1_DRIVER_VERSION);
101 * MAX_PKT_RCV is the max # if packets processed per receive interrupt.
103 #define MAX_PKT_RECV 64
104 #define EGR_HEAD_UPDATE_THRESHOLD 16
106 struct hfi1_ib_stats hfi1_stats;
108 static int hfi1_caps_set(const char *val, const struct kernel_param *kp)
111 unsigned long *cap_mask_ptr = (unsigned long *)kp->arg,
112 cap_mask = *cap_mask_ptr, value, diff,
113 write_mask = ((HFI1_CAP_WRITABLE_MASK << HFI1_CAP_USER_SHIFT) |
114 HFI1_CAP_WRITABLE_MASK);
116 ret = kstrtoul(val, 0, &value);
118 pr_warn("Invalid module parameter value for 'cap_mask'\n");
121 /* Get the changed bits (except the locked bit) */
122 diff = value ^ (cap_mask & ~HFI1_CAP_LOCKED_SMASK);
124 /* Remove any bits that are not allowed to change after driver load */
125 if (HFI1_CAP_LOCKED() && (diff & ~write_mask)) {
126 pr_warn("Ignoring non-writable capability bits %#lx\n",
131 /* Mask off any reserved bits */
132 diff &= ~HFI1_CAP_RESERVED_MASK;
133 /* Clear any previously set and changing bits */
135 /* Update the bits with the new capability */
136 cap_mask |= (value & diff);
137 /* Check for any kernel/user restrictions */
138 diff = (cap_mask & (HFI1_CAP_MUST_HAVE_KERN << HFI1_CAP_USER_SHIFT)) ^
139 ((cap_mask & HFI1_CAP_MUST_HAVE_KERN) << HFI1_CAP_USER_SHIFT);
141 /* Set the bitmask to the final set */
142 *cap_mask_ptr = cap_mask;
147 static int hfi1_caps_get(char *buffer, const struct kernel_param *kp)
149 unsigned long cap_mask = *(unsigned long *)kp->arg;
151 cap_mask &= ~HFI1_CAP_LOCKED_SMASK;
152 cap_mask |= ((cap_mask & HFI1_CAP_K2U) << HFI1_CAP_USER_SHIFT);
154 return scnprintf(buffer, PAGE_SIZE, "0x%lx", cap_mask);
157 const char *get_unit_name(int unit)
159 static char iname[16];
161 snprintf(iname, sizeof(iname), DRIVER_NAME"_%u", unit);
166 * Return count of units with at least one port ACTIVE.
168 int hfi1_count_active_units(void)
170 struct hfi1_devdata *dd;
171 struct hfi1_pportdata *ppd;
173 int pidx, nunits_active = 0;
175 spin_lock_irqsave(&hfi1_devs_lock, flags);
176 list_for_each_entry(dd, &hfi1_dev_list, list) {
177 if (!(dd->flags & HFI1_PRESENT) || !dd->kregbase)
179 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
180 ppd = dd->pport + pidx;
181 if (ppd->lid && ppd->linkup) {
187 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
188 return nunits_active;
192 * Return count of all units, optionally return in arguments
193 * the number of usable (present) units, and the number of
196 int hfi1_count_units(int *npresentp, int *nupp)
198 int nunits = 0, npresent = 0, nup = 0;
199 struct hfi1_devdata *dd;
202 struct hfi1_pportdata *ppd;
204 spin_lock_irqsave(&hfi1_devs_lock, flags);
206 list_for_each_entry(dd, &hfi1_dev_list, list) {
208 if ((dd->flags & HFI1_PRESENT) && dd->kregbase)
210 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
211 ppd = dd->pport + pidx;
212 if (ppd->lid && ppd->linkup)
217 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
220 *npresentp = npresent;
228 * Get address of eager buffer from it's index (allocated in chunks, not
231 static inline void *get_egrbuf(const struct hfi1_ctxtdata *rcd, u64 rhf,
234 u32 idx = rhf_egr_index(rhf), offset = rhf_egr_buf_offset(rhf);
236 *update |= !(idx & (rcd->egrbufs.threshold - 1)) && !offset;
237 return (void *)(((u64)(rcd->egrbufs.rcvtids[idx].addr)) +
238 (offset * RCV_BUF_BLOCK_SIZE));
242 * Validate and encode the a given RcvArray Buffer size.
243 * The function will check whether the given size falls within
244 * allowed size ranges for the respective type and, optionally,
245 * return the proper encoding.
247 inline int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encoded)
249 if (unlikely(!IS_ALIGNED(size, PAGE_SIZE)))
251 if (unlikely(size < MIN_EAGER_BUFFER))
254 (type == PT_EAGER ? MAX_EAGER_BUFFER : MAX_EXPECTED_BUFFER))
257 *encoded = ilog2(size / PAGE_SIZE) + 1;
261 static void rcv_hdrerr(struct hfi1_ctxtdata *rcd, struct hfi1_pportdata *ppd,
262 struct hfi1_packet *packet)
264 struct hfi1_message_header *rhdr = packet->hdr;
265 u32 rte = rhf_rcv_type_err(packet->rhf);
266 int lnh = be16_to_cpu(rhdr->lrh[0]) & 3;
267 struct hfi1_ibport *ibp = &ppd->ibport_data;
269 if (packet->rhf & (RHF_VCRC_ERR | RHF_ICRC_ERR))
272 if (packet->rhf & RHF_TID_ERR) {
273 /* For TIDERR and RC QPs preemptively schedule a NAK */
274 struct hfi1_ib_header *hdr = (struct hfi1_ib_header *)rhdr;
275 struct hfi1_other_headers *ohdr = NULL;
276 u32 tlen = rhf_pkt_len(packet->rhf); /* in bytes */
277 u16 lid = be16_to_cpu(hdr->lrh[1]);
281 /* Sanity check packet */
286 if (lnh == HFI1_LRH_BTH)
288 else if (lnh == HFI1_LRH_GRH) {
291 ohdr = &hdr->u.l.oth;
292 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
294 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
295 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
297 rcv_flags |= HFI1_HAS_GRH;
301 /* Get the destination QP number. */
302 qp_num = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
303 if (lid < HFI1_MULTICAST_LID_BASE) {
307 qp = hfi1_lookup_qpn(ibp, qp_num);
314 * Handle only RC QPs - for other QP types drop error
317 spin_lock(&qp->r_lock);
319 /* Check for valid receive state. */
320 if (!(ib_hfi1_state_ops[qp->state] &
321 HFI1_PROCESS_RECV_OK)) {
325 switch (qp->ibqp.qp_type) {
334 /* For now don't handle any other QP types */
338 spin_unlock(&qp->r_lock);
341 } /* Valid packet with TIDErr */
343 /* handle "RcvTypeErr" flags */
345 case RHF_RTE_ERROR_OP_CODE_ERR:
351 if (rhf_use_egr_bfr(packet->rhf))
355 goto drop; /* this should never happen */
357 if (lnh == HFI1_LRH_BTH)
358 bth = (__be32 *)ebuf;
359 else if (lnh == HFI1_LRH_GRH)
360 bth = (__be32 *)((char *)ebuf + sizeof(struct ib_grh));
364 opcode = be32_to_cpu(bth[0]) >> 24;
367 if (opcode == IB_OPCODE_CNP) {
369 * Only in pre-B0 h/w is the CNP_OPCODE handled
370 * via this code path (errata 291394).
372 struct hfi1_qp *qp = NULL;
375 u8 svc_type, sl, sc5;
377 sc5 = (be16_to_cpu(rhdr->lrh[0]) >> 12) & 0xf;
378 if (rhf_dc_info(packet->rhf))
380 sl = ibp->sc_to_sl[sc5];
382 lqpn = be32_to_cpu(bth[1]) & HFI1_QPN_MASK;
384 qp = hfi1_lookup_qpn(ibp, lqpn);
390 switch (qp->ibqp.qp_type) {
394 svc_type = IB_CC_SVCTYPE_UD;
397 rlid = be16_to_cpu(rhdr->lrh[3]);
398 rqpn = qp->remote_qpn;
399 svc_type = IB_CC_SVCTYPE_UC;
405 process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
409 packet->rhf &= ~RHF_RCV_TYPE_ERR_SMASK;
420 static inline void init_packet(struct hfi1_ctxtdata *rcd,
421 struct hfi1_packet *packet)
424 packet->rsize = rcd->rcvhdrqentsize; /* words */
425 packet->maxcnt = rcd->rcvhdrq_cnt * packet->rsize; /* words */
429 packet->rhf_addr = (__le32 *) rcd->rcvhdrq + rcd->head +
431 packet->rhf = rhf_to_cpu(packet->rhf_addr);
432 packet->rhqoff = rcd->head;
434 packet->rcv_flags = 0;
437 #ifndef CONFIG_PRESCAN_RXQ
438 static void prescan_rxq(struct hfi1_packet *packet) {}
439 #else /* CONFIG_PRESCAN_RXQ */
440 static int prescan_receive_queue;
442 static void process_ecn(struct hfi1_qp *qp, struct hfi1_ib_header *hdr,
443 struct hfi1_other_headers *ohdr,
444 u64 rhf, struct ib_grh *grh)
446 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
449 int is_fecn, is_becn;
451 switch (qp->ibqp.qp_type) {
453 svc_type = IB_CC_SVCTYPE_UD;
455 case IB_QPT_UC: /* LATER */
456 case IB_QPT_RC: /* LATER */
461 is_fecn = (be32_to_cpu(ohdr->bth[1]) >> HFI1_FECN_SHIFT) &
463 is_becn = (be32_to_cpu(ohdr->bth[1]) >> HFI1_BECN_SHIFT) &
466 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
467 if (rhf_dc_info(rhf))
471 u32 src_qpn = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK;
472 u16 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
473 u16 dlid = be16_to_cpu(hdr->lrh[1]);
474 u16 slid = be16_to_cpu(hdr->lrh[3]);
476 return_cnp(ibp, qp, src_qpn, pkey, dlid, slid, sc5, grh);
480 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
481 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
482 u8 sl = ibp->sc_to_sl[sc5];
484 process_becn(ppd, sl, 0, lqpn, 0, svc_type);
487 /* turn off BECN, or FECN */
488 bth1 = be32_to_cpu(ohdr->bth[1]);
489 bth1 &= ~(HFI1_FECN_MASK << HFI1_FECN_SHIFT);
490 bth1 &= ~(HFI1_BECN_MASK << HFI1_BECN_SHIFT);
491 ohdr->bth[1] = cpu_to_be32(bth1);
495 struct hfi1_ctxtdata *rcd;
503 static inline void init_ps_mdata(struct ps_mdata *mdata,
504 struct hfi1_packet *packet)
506 struct hfi1_ctxtdata *rcd = packet->rcd;
509 mdata->rsize = packet->rsize;
510 mdata->maxcnt = packet->maxcnt;
512 if (rcd->ps_state.initialized == 0) {
513 mdata->ps_head = packet->rhqoff;
514 rcd->ps_state.initialized++;
516 mdata->ps_head = rcd->ps_state.ps_head;
518 if (HFI1_CAP_IS_KSET(DMA_RTAIL)) {
519 mdata->ps_tail = packet->hdrqtail;
520 mdata->ps_seq = 0; /* not used with DMA_RTAIL */
522 mdata->ps_tail = 0; /* used only with DMA_RTAIL*/
523 mdata->ps_seq = rcd->seq_cnt;
527 static inline int ps_done(struct ps_mdata *mdata, u64 rhf)
529 if (HFI1_CAP_IS_KSET(DMA_RTAIL))
530 return mdata->ps_head == mdata->ps_tail;
531 return mdata->ps_seq != rhf_rcv_seq(rhf);
534 static inline void update_ps_mdata(struct ps_mdata *mdata)
536 struct hfi1_ctxtdata *rcd = mdata->rcd;
538 mdata->ps_head += mdata->rsize;
539 if (mdata->ps_head > mdata->maxcnt)
541 rcd->ps_state.ps_head = mdata->ps_head;
542 if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
543 if (++mdata->ps_seq > 13)
549 * prescan_rxq - search through the receive queue looking for packets
550 * containing Excplicit Congestion Notifications (FECNs, or BECNs).
551 * When an ECN is found, process the Congestion Notification, and toggle
554 static void prescan_rxq(struct hfi1_packet *packet)
556 struct hfi1_ctxtdata *rcd = packet->rcd;
557 struct ps_mdata mdata;
559 if (!prescan_receive_queue)
562 init_ps_mdata(&mdata, packet);
565 struct hfi1_devdata *dd = rcd->dd;
566 struct hfi1_ibport *ibp = &rcd->ppd->ibport_data;
567 __le32 *rhf_addr = (__le32 *) rcd->rcvhdrq + mdata.ps_head +
570 struct hfi1_ib_header *hdr;
571 struct hfi1_other_headers *ohdr;
572 struct ib_grh *grh = NULL;
573 u64 rhf = rhf_to_cpu(rhf_addr);
574 u32 etype = rhf_rcv_type(rhf), qpn;
578 if (ps_done(&mdata, rhf))
581 if (etype != RHF_RCV_TYPE_IB)
584 hdr = (struct hfi1_ib_header *)
585 hfi1_get_msgheader(dd, rhf_addr);
586 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
588 if (lnh == HFI1_LRH_BTH)
590 else if (lnh == HFI1_LRH_GRH) {
591 ohdr = &hdr->u.l.oth;
594 goto next; /* just in case */
596 is_ecn |= be32_to_cpu(ohdr->bth[1]) &
597 (HFI1_FECN_MASK << HFI1_FECN_SHIFT);
598 is_ecn |= be32_to_cpu(ohdr->bth[1]) &
599 (HFI1_BECN_MASK << HFI1_BECN_SHIFT);
604 qpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
606 qp = hfi1_lookup_qpn(ibp, qpn);
613 process_ecn(qp, hdr, ohdr, rhf, grh);
616 update_ps_mdata(&mdata);
619 #endif /* CONFIG_PRESCAN_RXQ */
621 #define RCV_PKT_OK 0x0
622 #define RCV_PKT_MAX 0x1
624 static inline int process_rcv_packet(struct hfi1_packet *packet)
626 int ret = RCV_PKT_OK;
628 packet->hdr = hfi1_get_msgheader(packet->rcd->dd,
630 packet->hlen = (u8 *)packet->rhf_addr - (u8 *)packet->hdr;
631 packet->etype = rhf_rcv_type(packet->rhf);
633 packet->tlen = rhf_pkt_len(packet->rhf); /* in bytes */
634 /* retrieve eager buffer details */
636 if (rhf_use_egr_bfr(packet->rhf)) {
637 packet->etail = rhf_egr_index(packet->rhf);
638 packet->ebuf = get_egrbuf(packet->rcd, packet->rhf,
641 * Prefetch the contents of the eager buffer. It is
642 * OK to send a negative length to prefetch_range().
643 * The +2 is the size of the RHF.
645 prefetch_range(packet->ebuf,
646 packet->tlen - ((packet->rcd->rcvhdrqentsize -
647 (rhf_hdrq_offset(packet->rhf)+2)) * 4));
651 * Call a type specific handler for the packet. We
652 * should be able to trust that etype won't be beyond
653 * the range of valid indexes. If so something is really
654 * wrong and we can probably just let things come
655 * crashing down. There is no need to eat another
656 * comparison in this performance critical code.
658 packet->rcd->dd->rhf_rcv_function_map[packet->etype](packet);
661 /* Set up for the next packet */
662 packet->rhqoff += packet->rsize;
663 if (packet->rhqoff >= packet->maxcnt)
666 if (packet->numpkt == MAX_PKT_RECV) {
668 this_cpu_inc(*packet->rcd->dd->rcv_limit);
671 packet->rhf_addr = (__le32 *) packet->rcd->rcvhdrq + packet->rhqoff +
672 packet->rcd->dd->rhf_offset;
673 packet->rhf = rhf_to_cpu(packet->rhf_addr);
678 static inline void process_rcv_update(int last, struct hfi1_packet *packet)
681 * Update head regs etc., every 16 packets, if not last pkt,
682 * to help prevent rcvhdrq overflows, when many packets
683 * are processed and queue is nearly full.
684 * Don't request an interrupt for intermediate updates.
686 if (!last && !(packet->numpkt & 0xf)) {
687 update_usrhead(packet->rcd, packet->rhqoff, packet->updegr,
688 packet->etail, 0, 0);
691 packet->rcv_flags = 0;
694 static inline void finish_packet(struct hfi1_packet *packet)
698 * Nothing we need to free for the packet.
700 * The only thing we need to do is a final update and call for an
703 update_usrhead(packet->rcd, packet->rcd->head, packet->updegr,
704 packet->etail, rcv_intr_dynamic, packet->numpkt);
708 static inline void process_rcv_qp_work(struct hfi1_packet *packet)
711 struct hfi1_ctxtdata *rcd;
712 struct hfi1_qp *qp, *nqp;
715 rcd->head = packet->rhqoff;
718 * Iterate over all QPs waiting to respond.
719 * The list won't change since the IRQ is only run on one CPU.
721 list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) {
722 list_del_init(&qp->rspwait);
723 if (qp->r_flags & HFI1_R_RSP_NAK) {
724 qp->r_flags &= ~HFI1_R_RSP_NAK;
725 hfi1_send_rc_ack(rcd, qp, 0);
727 if (qp->r_flags & HFI1_R_RSP_SEND) {
730 qp->r_flags &= ~HFI1_R_RSP_SEND;
731 spin_lock_irqsave(&qp->s_lock, flags);
732 if (ib_hfi1_state_ops[qp->state] &
733 HFI1_PROCESS_OR_FLUSH_SEND)
734 hfi1_schedule_send(qp);
735 spin_unlock_irqrestore(&qp->s_lock, flags);
737 if (atomic_dec_and_test(&qp->refcount))
743 * Handle receive interrupts when using the no dma rtail option.
745 void handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd)
749 struct hfi1_packet packet;
751 init_packet(rcd, &packet);
752 seq = rhf_rcv_seq(packet.rhf);
753 if (seq != rcd->seq_cnt)
756 prescan_rxq(&packet);
759 last = process_rcv_packet(&packet);
760 seq = rhf_rcv_seq(packet.rhf);
761 if (++rcd->seq_cnt > 13)
763 if (seq != rcd->seq_cnt)
765 process_rcv_update(last, &packet);
767 process_rcv_qp_work(&packet);
769 finish_packet(&packet);
772 void handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd)
776 struct hfi1_packet packet;
778 init_packet(rcd, &packet);
779 hdrqtail = get_rcvhdrtail(rcd);
780 if (packet.rhqoff == hdrqtail)
782 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
784 prescan_rxq(&packet);
787 last = process_rcv_packet(&packet);
788 if (packet.rhqoff == hdrqtail)
790 process_rcv_update(last, &packet);
792 process_rcv_qp_work(&packet);
794 finish_packet(&packet);
798 static inline void set_all_nodma_rtail(struct hfi1_devdata *dd)
802 for (i = 0; i < dd->first_user_ctxt; i++)
803 dd->rcd[i]->do_interrupt =
804 &handle_receive_interrupt_nodma_rtail;
807 static inline void set_all_dma_rtail(struct hfi1_devdata *dd)
811 for (i = 0; i < dd->first_user_ctxt; i++)
812 dd->rcd[i]->do_interrupt =
813 &handle_receive_interrupt_dma_rtail;
817 * handle_receive_interrupt - receive a packet
820 * Called from interrupt handler for errors or receive interrupt.
821 * This is the slow path interrupt handler.
823 void handle_receive_interrupt(struct hfi1_ctxtdata *rcd)
826 struct hfi1_devdata *dd = rcd->dd;
828 int last = 0, needset = 1;
829 struct hfi1_packet packet;
831 init_packet(rcd, &packet);
833 if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
834 u32 seq = rhf_rcv_seq(packet.rhf);
836 if (seq != rcd->seq_cnt)
840 hdrqtail = get_rcvhdrtail(rcd);
841 if (packet.rhqoff == hdrqtail)
843 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
846 prescan_rxq(&packet);
850 if (unlikely(dd->do_drop && atomic_xchg(&dd->drop_packet,
851 DROP_PACKET_OFF) == DROP_PACKET_ON)) {
854 /* On to the next packet */
855 packet.rhqoff += packet.rsize;
856 packet.rhf_addr = (__le32 *) rcd->rcvhdrq +
859 packet.rhf = rhf_to_cpu(packet.rhf_addr);
862 last = process_rcv_packet(&packet);
865 if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) {
866 u32 seq = rhf_rcv_seq(packet.rhf);
868 if (++rcd->seq_cnt > 13)
870 if (seq != rcd->seq_cnt)
874 "Switching to NO_DMA_RTAIL\n");
875 set_all_nodma_rtail(dd);
879 if (packet.rhqoff == hdrqtail)
883 "Switching to DMA_RTAIL\n");
884 set_all_dma_rtail(dd);
889 process_rcv_update(last, &packet);
892 process_rcv_qp_work(&packet);
896 * Always write head at end, and setup rcv interrupt, even
897 * if no packets were processed.
899 finish_packet(&packet);
903 * Convert a given MTU size to the on-wire MAD packet enumeration.
904 * Return -1 if the size is invalid.
906 int mtu_to_enum(u32 mtu, int default_if_bad)
909 case 0: return OPA_MTU_0;
910 case 256: return OPA_MTU_256;
911 case 512: return OPA_MTU_512;
912 case 1024: return OPA_MTU_1024;
913 case 2048: return OPA_MTU_2048;
914 case 4096: return OPA_MTU_4096;
915 case 8192: return OPA_MTU_8192;
916 case 10240: return OPA_MTU_10240;
918 return default_if_bad;
921 u16 enum_to_mtu(int mtu)
924 case OPA_MTU_0: return 0;
925 case OPA_MTU_256: return 256;
926 case OPA_MTU_512: return 512;
927 case OPA_MTU_1024: return 1024;
928 case OPA_MTU_2048: return 2048;
929 case OPA_MTU_4096: return 4096;
930 case OPA_MTU_8192: return 8192;
931 case OPA_MTU_10240: return 10240;
932 default: return 0xffff;
937 * set_mtu - set the MTU
938 * @ppd: the per port data
940 * We can handle "any" incoming size, the issue here is whether we
941 * need to restrict our outgoing size. We do not deal with what happens
942 * to programs that are already running when the size changes.
944 int set_mtu(struct hfi1_pportdata *ppd)
946 struct hfi1_devdata *dd = ppd->dd;
947 int i, drain, ret = 0, is_up = 0;
950 for (i = 0; i < ppd->vls_supported; i++)
951 if (ppd->ibmtu < dd->vld[i].mtu)
952 ppd->ibmtu = dd->vld[i].mtu;
953 ppd->ibmaxlen = ppd->ibmtu + lrh_max_header_bytes(ppd->dd);
955 mutex_lock(&ppd->hls_lock);
956 if (ppd->host_link_state == HLS_UP_INIT
957 || ppd->host_link_state == HLS_UP_ARMED
958 || ppd->host_link_state == HLS_UP_ACTIVE)
961 drain = !is_ax(dd) && is_up;
965 * MTU is specified per-VL. To ensure that no packet gets
966 * stuck (due, e.g., to the MTU for the packet's VL being
967 * reduced), empty the per-VL FIFOs before adjusting MTU.
969 ret = stop_drain_data_vls(dd);
972 dd_dev_err(dd, "%s: cannot stop/drain VLs - refusing to change per-VL MTUs\n",
977 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_MTU, 0);
980 open_fill_data_vls(dd); /* reopen all VLs */
983 mutex_unlock(&ppd->hls_lock);
988 int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
990 struct hfi1_devdata *dd = ppd->dd;
994 hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
996 dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid);
1002 * Following deal with the "obviously simple" task of overriding the state
1003 * of the LEDs, which normally indicate link physical and logical status.
1004 * The complications arise in dealing with different hardware mappings
1005 * and the board-dependent routine being called from interrupts.
1006 * and then there's the requirement to _flash_ them.
1008 #define LED_OVER_FREQ_SHIFT 8
1009 #define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
1010 /* Below is "non-zero" to force override, but both actual LEDs are off */
1011 #define LED_OVER_BOTH_OFF (8)
1013 static void run_led_override(unsigned long opaque)
1015 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque;
1016 struct hfi1_devdata *dd = ppd->dd;
1020 if (!(dd->flags & HFI1_INITTED))
1023 ph_idx = ppd->led_override_phase++ & 1;
1024 ppd->led_override = ppd->led_override_vals[ph_idx];
1025 timeoff = ppd->led_override_timeoff;
1028 * don't re-fire the timer if user asked for it to be off; we let
1029 * it fire one more time after they turn it off to simplify
1031 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
1032 mod_timer(&ppd->led_override_timer, jiffies + timeoff);
1035 void hfi1_set_led_override(struct hfi1_pportdata *ppd, unsigned int val)
1037 struct hfi1_devdata *dd = ppd->dd;
1040 if (!(dd->flags & HFI1_INITTED))
1043 /* First check if we are blinking. If not, use 1HZ polling */
1045 freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
1048 /* For blink, set each phase from one nybble of val */
1049 ppd->led_override_vals[0] = val & 0xF;
1050 ppd->led_override_vals[1] = (val >> 4) & 0xF;
1051 timeoff = (HZ << 4)/freq;
1053 /* Non-blink set both phases the same. */
1054 ppd->led_override_vals[0] = val & 0xF;
1055 ppd->led_override_vals[1] = val & 0xF;
1057 ppd->led_override_timeoff = timeoff;
1060 * If the timer has not already been started, do so. Use a "quick"
1061 * timeout so the function will be called soon, to look at our request.
1063 if (atomic_inc_return(&ppd->led_override_timer_active) == 1) {
1064 /* Need to start timer */
1065 init_timer(&ppd->led_override_timer);
1066 ppd->led_override_timer.function = run_led_override;
1067 ppd->led_override_timer.data = (unsigned long) ppd;
1068 ppd->led_override_timer.expires = jiffies + 1;
1069 add_timer(&ppd->led_override_timer);
1071 if (ppd->led_override_vals[0] || ppd->led_override_vals[1])
1072 mod_timer(&ppd->led_override_timer, jiffies + 1);
1073 atomic_dec(&ppd->led_override_timer_active);
1078 * hfi1_reset_device - reset the chip if possible
1079 * @unit: the device to reset
1081 * Whether or not reset is successful, we attempt to re-initialize the chip
1082 * (that is, much like a driver unload/reload). We clear the INITTED flag
1083 * so that the various entry points will fail until we reinitialize. For
1084 * now, we only allow this if no user contexts are open that use chip resources
1086 int hfi1_reset_device(int unit)
1089 struct hfi1_devdata *dd = hfi1_lookup(unit);
1090 struct hfi1_pportdata *ppd;
1091 unsigned long flags;
1099 dd_dev_info(dd, "Reset on unit %u requested\n", unit);
1101 if (!dd->kregbase || !(dd->flags & HFI1_PRESENT)) {
1103 "Invalid unit number %u or not initialized or not present\n",
1109 spin_lock_irqsave(&dd->uctxt_lock, flags);
1111 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
1112 if (!dd->rcd[i] || !dd->rcd[i]->cnt)
1114 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1118 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1120 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
1121 ppd = dd->pport + pidx;
1122 if (atomic_read(&ppd->led_override_timer_active)) {
1123 /* Need to stop LED timer, _then_ shut off LEDs */
1124 del_timer_sync(&ppd->led_override_timer);
1125 atomic_set(&ppd->led_override_timer_active, 0);
1128 /* Shut off LEDs after we are sure timer is not running */
1129 ppd->led_override = LED_OVER_BOTH_OFF;
1131 if (dd->flags & HFI1_HAS_SEND_DMA)
1134 hfi1_reset_cpu_counters(dd);
1136 ret = hfi1_init(dd, 1);
1140 "Reinitialize unit %u after reset failed with %d\n",
1143 dd_dev_info(dd, "Reinitialized unit %u after resetting\n",
1150 void handle_eflags(struct hfi1_packet *packet)
1152 struct hfi1_ctxtdata *rcd = packet->rcd;
1153 u32 rte = rhf_rcv_type_err(packet->rhf);
1156 "receive context %d: rhf 0x%016llx, errs [ %s%s%s%s%s%s%s%s] rte 0x%x\n",
1157 rcd->ctxt, packet->rhf,
1158 packet->rhf & RHF_K_HDR_LEN_ERR ? "k_hdr_len " : "",
1159 packet->rhf & RHF_DC_UNC_ERR ? "dc_unc " : "",
1160 packet->rhf & RHF_DC_ERR ? "dc " : "",
1161 packet->rhf & RHF_TID_ERR ? "tid " : "",
1162 packet->rhf & RHF_LEN_ERR ? "len " : "",
1163 packet->rhf & RHF_ECC_ERR ? "ecc " : "",
1164 packet->rhf & RHF_VCRC_ERR ? "vcrc " : "",
1165 packet->rhf & RHF_ICRC_ERR ? "icrc " : "",
1168 rcv_hdrerr(rcd, rcd->ppd, packet);
1172 * The following functions are called by the interrupt handler. They are type
1173 * specific handlers for each packet type.
1175 int process_receive_ib(struct hfi1_packet *packet)
1177 trace_hfi1_rcvhdr(packet->rcd->ppd->dd,
1179 rhf_err_flags(packet->rhf),
1184 rhf_egr_index(packet->rhf));
1186 if (unlikely(rhf_err_flags(packet->rhf))) {
1187 handle_eflags(packet);
1188 return RHF_RCV_CONTINUE;
1191 hfi1_ib_rcv(packet);
1192 return RHF_RCV_CONTINUE;
1195 int process_receive_bypass(struct hfi1_packet *packet)
1197 if (unlikely(rhf_err_flags(packet->rhf)))
1198 handle_eflags(packet);
1200 dd_dev_err(packet->rcd->dd,
1201 "Bypass packets are not supported in normal operation. Dropping\n");
1202 return RHF_RCV_CONTINUE;
1205 int process_receive_error(struct hfi1_packet *packet)
1207 handle_eflags(packet);
1209 if (unlikely(rhf_err_flags(packet->rhf)))
1210 dd_dev_err(packet->rcd->dd,
1211 "Unhandled error packet received. Dropping.\n");
1213 return RHF_RCV_CONTINUE;
1216 int kdeth_process_expected(struct hfi1_packet *packet)
1218 if (unlikely(rhf_err_flags(packet->rhf)))
1219 handle_eflags(packet);
1221 dd_dev_err(packet->rcd->dd,
1222 "Unhandled expected packet received. Dropping.\n");
1223 return RHF_RCV_CONTINUE;
1226 int kdeth_process_eager(struct hfi1_packet *packet)
1228 if (unlikely(rhf_err_flags(packet->rhf)))
1229 handle_eflags(packet);
1231 dd_dev_err(packet->rcd->dd,
1232 "Unhandled eager packet received. Dropping.\n");
1233 return RHF_RCV_CONTINUE;
1236 int process_receive_invalid(struct hfi1_packet *packet)
1238 dd_dev_err(packet->rcd->dd, "Invalid packet type %d. Dropping\n",
1239 rhf_rcv_type(packet->rhf));
1240 return RHF_RCV_CONTINUE;