2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <asm/uaccess.h>
71 #include "cxgb4_dcb.h"
72 #include "cxgb4_debugfs.h"
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
86 #define MAX_SGE_TIMERVAL 200U
90 * Physical Function provisioning constants.
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
104 #ifdef CONFIG_PCI_IOV
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
138 unsigned int portn, portvec;
141 * Give PF's access to all of the ports.
144 return FW_PFVF_CMD_PMASK_M;
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
153 if (adapter->params.nports == 0)
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
175 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024,
177 MAX_RSPQ_ENTRIES = 16384,
178 MAX_RX_BUFFERS = 16384,
179 MIN_TXQ_ENTRIES = 32,
180 MIN_CTRL_TXQ_ENTRIES = 32,
181 MIN_RSPQ_ENTRIES = 128,
185 /* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large.
191 struct filter_entry {
192 /* Administrative fields for filter.
194 u32 valid:1; /* filter allocated and valid */
195 u32 locked:1; /* filter is administratively locked */
197 u32 pending:1; /* filter action is pending firmware reply */
198 u32 smtidx:8; /* Source MAC Table index for smac */
199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID.
206 struct ch_filter_specification fs;
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
213 /* Macros needed to support the PCI Device ID Table ...
215 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
216 static struct pci_device_id cxgb4_pci_tbl[] = {
217 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
219 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
222 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
224 #define CH_PCI_ID_TABLE_ENTRY(devid) \
225 {PCI_VDEVICE(CHELSIO, (devid)), 4}
227 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
231 #include "t4_pci_id_tbl.h"
233 #define FW4_FNAME "cxgb4/t4fw.bin"
234 #define FW5_FNAME "cxgb4/t5fw.bin"
235 #define FW4_CFNAME "cxgb4/t4-config.txt"
236 #define FW5_CFNAME "cxgb4/t5-config.txt"
238 MODULE_DESCRIPTION(DRV_DESC);
239 MODULE_AUTHOR("Chelsio Communications");
240 MODULE_LICENSE("Dual BSD/GPL");
241 MODULE_VERSION(DRV_VERSION);
242 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
243 MODULE_FIRMWARE(FW4_FNAME);
244 MODULE_FIRMWARE(FW5_FNAME);
247 * Normally we're willing to become the firmware's Master PF but will be happy
248 * if another PF has already become the Master and initialized the adapter.
249 * Setting "force_init" will cause this driver to forcibly establish itself as
250 * the Master PF and initialize the adapter.
252 static uint force_init;
254 module_param(force_init, uint, 0644);
255 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
258 * Normally if the firmware we connect to has Configuration File support, we
259 * use that and only fall back to the old Driver-based initialization if the
260 * Configuration File fails for some reason. If force_old_init is set, then
261 * we'll always use the old Driver-based initialization sequence.
263 static uint force_old_init;
265 module_param(force_old_init, uint, 0644);
266 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
268 static int dflt_msg_enable = DFLT_MSG_ENABLE;
270 module_param(dflt_msg_enable, int, 0644);
271 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
274 * The driver uses the best interrupt scheme available on a platform in the
275 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
276 * of these schemes the driver may consider as follows:
278 * msi = 2: choose from among all three options
279 * msi = 1: only consider MSI and INTx interrupts
280 * msi = 0: force INTx interrupts
284 module_param(msi, int, 0644);
285 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
288 * Queue interrupt hold-off timer values. Queues default to the first of these
291 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
293 module_param_array(intr_holdoff, uint, NULL, 0644);
294 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
295 "0..4 in microseconds");
297 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
299 module_param_array(intr_cnt, uint, NULL, 0644);
300 MODULE_PARM_DESC(intr_cnt,
301 "thresholds 1..3 for queue interrupt packet counters");
304 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
305 * offset by 2 bytes in order to have the IP headers line up on 4-byte
306 * boundaries. This is a requirement for many architectures which will throw
307 * a machine check fault if an attempt is made to access one of the 4-byte IP
308 * header fields on a non-4-byte boundary. And it's a major performance issue
309 * even on some architectures which allow it like some implementations of the
310 * x86 ISA. However, some architectures don't mind this and for some very
311 * edge-case performance sensitive applications (like forwarding large volumes
312 * of small packets), setting this DMA offset to 0 will decrease the number of
313 * PCI-E Bus transfers enough to measurably affect performance.
315 static int rx_dma_offset = 2;
319 #ifdef CONFIG_PCI_IOV
320 module_param(vf_acls, bool, 0644);
321 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
323 /* Configure the number of PCI-E Virtual Function which are to be instantiated
324 * on SR-IOV Capable Physical Functions.
326 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
328 module_param_array(num_vf, uint, NULL, 0644);
329 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
332 /* TX Queue select used to determine what algorithm to use for selecting TX
333 * queue. Select between the kernel provided function (select_queue=0) or user
334 * cxgb_select_queue function (select_queue=1)
336 * Default: select_queue=0
338 static int select_queue;
339 module_param(select_queue, int, 0644);
340 MODULE_PARM_DESC(select_queue,
341 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
344 * The filter TCAM has a fixed portion and a variable portion. The fixed
345 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
346 * ports. The variable portion is 36 bits which can include things like Exact
347 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
348 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
349 * far exceed the 36-bit budget for this "compressed" header portion of the
350 * filter. Thus, we have a scarce resource which must be carefully managed.
352 * By default we set this up to mostly match the set of filter matching
353 * capabilities of T3 but with accommodations for some of T4's more
354 * interesting features:
356 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
357 * [Inner] VLAN (17), Port (3), FCoE (1) }
360 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
361 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
362 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
365 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
367 module_param(tp_vlan_pri_map, uint, 0644);
368 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
370 static struct dentry *cxgb4_debugfs_root;
372 static LIST_HEAD(adapter_list);
373 static DEFINE_MUTEX(uld_mutex);
374 /* Adapter list to be accessed from atomic context */
375 static LIST_HEAD(adap_rcu_list);
376 static DEFINE_SPINLOCK(adap_rcu_lock);
377 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
378 static const char *uld_str[] = { "RDMA", "iSCSI" };
380 static void link_report(struct net_device *dev)
382 if (!netif_carrier_ok(dev))
383 netdev_info(dev, "link down\n");
385 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
387 const char *s = "10Mbps";
388 const struct port_info *p = netdev_priv(dev);
390 switch (p->link_cfg.speed) {
405 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
410 #ifdef CONFIG_CHELSIO_T4_DCB
411 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
412 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
414 struct port_info *pi = netdev_priv(dev);
415 struct adapter *adap = pi->adapter;
416 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
419 /* We use a simple mapping of Port TX Queue Index to DCB
420 * Priority when we're enabling DCB.
422 for (i = 0; i < pi->nqsets; i++, txq++) {
426 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
428 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
429 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
430 value = enable ? i : 0xffffffff;
432 /* Since we can be called while atomic (from "interrupt
433 * level") we need to issue the Set Parameters Commannd
434 * without sleeping (timeout < 0).
436 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
440 dev_err(adap->pdev_dev,
441 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
442 enable ? "set" : "unset", pi->port_id, i, -err);
444 txq->dcb_prio = value;
447 #endif /* CONFIG_CHELSIO_T4_DCB */
449 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
451 struct net_device *dev = adapter->port[port_id];
453 /* Skip changes from disabled ports. */
454 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
456 netif_carrier_on(dev);
458 #ifdef CONFIG_CHELSIO_T4_DCB
459 cxgb4_dcb_state_init(dev);
460 dcb_tx_queue_prio_enable(dev, false);
461 #endif /* CONFIG_CHELSIO_T4_DCB */
462 netif_carrier_off(dev);
469 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
471 static const char *mod_str[] = {
472 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
475 const struct net_device *dev = adap->port[port_id];
476 const struct port_info *pi = netdev_priv(dev);
478 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
479 netdev_info(dev, "port module unplugged\n");
480 else if (pi->mod_type < ARRAY_SIZE(mod_str))
481 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
485 * Configure the exact and hash address filters to handle a port's multicast
486 * and secondary unicast MAC addresses.
488 static int set_addr_filters(const struct net_device *dev, bool sleep)
496 const struct netdev_hw_addr *ha;
497 int uc_cnt = netdev_uc_count(dev);
498 int mc_cnt = netdev_mc_count(dev);
499 const struct port_info *pi = netdev_priv(dev);
500 unsigned int mb = pi->adapter->fn;
502 /* first do the secondary unicast addresses */
503 netdev_for_each_uc_addr(ha, dev) {
504 addr[naddr++] = ha->addr;
505 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
506 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
507 naddr, addr, filt_idx, &uhash, sleep);
516 /* next set up the multicast addresses */
517 netdev_for_each_mc_addr(ha, dev) {
518 addr[naddr++] = ha->addr;
519 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
520 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
521 naddr, addr, filt_idx, &mhash, sleep);
530 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
531 uhash | mhash, sleep);
534 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
535 module_param(dbfifo_int_thresh, int, 0644);
536 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
539 * usecs to sleep while draining the dbfifo
541 static int dbfifo_drain_delay = 1000;
542 module_param(dbfifo_drain_delay, int, 0644);
543 MODULE_PARM_DESC(dbfifo_drain_delay,
544 "usecs to sleep while draining the dbfifo");
547 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
548 * If @mtu is -1 it is left unchanged.
550 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
553 struct port_info *pi = netdev_priv(dev);
555 ret = set_addr_filters(dev, sleep_ok);
557 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
558 (dev->flags & IFF_PROMISC) ? 1 : 0,
559 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
565 * link_start - enable a port
566 * @dev: the port to enable
568 * Performs the MAC and PHY actions needed to enable a port.
570 static int link_start(struct net_device *dev)
573 struct port_info *pi = netdev_priv(dev);
574 unsigned int mb = pi->adapter->fn;
577 * We do not set address filters and promiscuity here, the stack does
578 * that step explicitly.
580 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
581 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
583 ret = t4_change_mac(pi->adapter, mb, pi->viid,
584 pi->xact_addr_filt, dev->dev_addr, true,
587 pi->xact_addr_filt = ret;
592 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
596 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
597 true, CXGB4_DCB_ENABLED);
604 int cxgb4_dcb_enabled(const struct net_device *dev)
606 #ifdef CONFIG_CHELSIO_T4_DCB
607 struct port_info *pi = netdev_priv(dev);
609 if (!pi->dcb.enabled)
612 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
613 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
618 EXPORT_SYMBOL(cxgb4_dcb_enabled);
620 #ifdef CONFIG_CHELSIO_T4_DCB
621 /* Handle a Data Center Bridging update message from the firmware. */
622 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
624 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
625 struct net_device *dev = adap->port[port];
626 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
629 cxgb4_dcb_handle_fw_update(adap, pcmd);
630 new_dcb_enabled = cxgb4_dcb_enabled(dev);
632 /* If the DCB has become enabled or disabled on the port then we're
633 * going to need to set up/tear down DCB Priority parameters for the
634 * TX Queues associated with the port.
636 if (new_dcb_enabled != old_dcb_enabled)
637 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
639 #endif /* CONFIG_CHELSIO_T4_DCB */
641 /* Clear a filter and release any of its resources that we own. This also
642 * clears the filter's "pending" status.
644 static void clear_filter(struct adapter *adap, struct filter_entry *f)
646 /* If the new or old filter have loopback rewriteing rules then we'll
647 * need to free any existing Layer Two Table (L2T) entries of the old
648 * filter rule. The firmware will handle freeing up any Source MAC
649 * Table (SMT) entries used for rewriting Source MAC Addresses in
653 cxgb4_l2t_release(f->l2t);
655 /* The zeroing of the filter rule below clears the filter valid,
656 * pending, locked flags, l2t pointer, etc. so it's all we need for
659 memset(f, 0, sizeof(*f));
662 /* Handle a filter write/deletion reply.
664 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
666 unsigned int idx = GET_TID(rpl);
667 unsigned int nidx = idx - adap->tids.ftid_base;
669 struct filter_entry *f;
671 if (idx >= adap->tids.ftid_base && nidx <
672 (adap->tids.nftids + adap->tids.nsftids)) {
674 ret = GET_TCB_COOKIE(rpl->cookie);
675 f = &adap->tids.ftid_tab[idx];
677 if (ret == FW_FILTER_WR_FLT_DELETED) {
678 /* Clear the filter when we get confirmation from the
679 * hardware that the filter has been deleted.
681 clear_filter(adap, f);
682 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
683 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
685 clear_filter(adap, f);
686 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
687 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
688 f->pending = 0; /* asynchronous setup completed */
691 /* Something went wrong. Issue a warning about the
692 * problem and clear everything out.
694 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
696 clear_filter(adap, f);
701 /* Response queue handler for the FW event queue.
703 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
704 const struct pkt_gl *gl)
706 u8 opcode = ((const struct rss_header *)rsp)->opcode;
708 rsp++; /* skip RSS header */
710 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
712 if (unlikely(opcode == CPL_FW4_MSG &&
713 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
715 opcode = ((const struct rss_header *)rsp)->opcode;
717 if (opcode != CPL_SGE_EGR_UPDATE) {
718 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
724 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
725 const struct cpl_sge_egr_update *p = (void *)rsp;
726 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
729 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
731 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
732 struct sge_eth_txq *eq;
734 eq = container_of(txq, struct sge_eth_txq, q);
735 netif_tx_wake_queue(eq->txq);
737 struct sge_ofld_txq *oq;
739 oq = container_of(txq, struct sge_ofld_txq, q);
740 tasklet_schedule(&oq->qresume_tsk);
742 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
743 const struct cpl_fw6_msg *p = (void *)rsp;
745 #ifdef CONFIG_CHELSIO_T4_DCB
746 const struct fw_port_cmd *pcmd = (const void *)p->data;
747 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
748 unsigned int action =
749 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
751 if (cmd == FW_PORT_CMD &&
752 action == FW_PORT_ACTION_GET_PORT_INFO) {
753 int port = FW_PORT_CMD_PORTID_G(
754 be32_to_cpu(pcmd->op_to_portid));
755 struct net_device *dev = q->adap->port[port];
756 int state_input = ((pcmd->u.info.dcbxdis_pkd &
757 FW_PORT_CMD_DCBXDIS_F)
758 ? CXGB4_DCB_INPUT_FW_DISABLED
759 : CXGB4_DCB_INPUT_FW_ENABLED);
761 cxgb4_dcb_state_fsm(dev, state_input);
764 if (cmd == FW_PORT_CMD &&
765 action == FW_PORT_ACTION_L2_DCB_CFG)
766 dcb_rpl(q->adap, pcmd);
770 t4_handle_fw_rpl(q->adap, p->data);
771 } else if (opcode == CPL_L2T_WRITE_RPL) {
772 const struct cpl_l2t_write_rpl *p = (void *)rsp;
774 do_l2t_write_rpl(q->adap, p);
775 } else if (opcode == CPL_SET_TCB_RPL) {
776 const struct cpl_set_tcb_rpl *p = (void *)rsp;
778 filter_rpl(q->adap, p);
780 dev_err(q->adap->pdev_dev,
781 "unexpected CPL %#x on FW event queue\n", opcode);
787 * uldrx_handler - response queue handler for ULD queues
788 * @q: the response queue that received the packet
789 * @rsp: the response queue descriptor holding the offload message
790 * @gl: the gather list of packet fragments
792 * Deliver an ingress offload packet to a ULD. All processing is done by
793 * the ULD, we just maintain statistics.
795 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
796 const struct pkt_gl *gl)
798 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
800 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
802 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
803 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
806 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
812 else if (gl == CXGB4_MSG_AN)
819 static void disable_msi(struct adapter *adapter)
821 if (adapter->flags & USING_MSIX) {
822 pci_disable_msix(adapter->pdev);
823 adapter->flags &= ~USING_MSIX;
824 } else if (adapter->flags & USING_MSI) {
825 pci_disable_msi(adapter->pdev);
826 adapter->flags &= ~USING_MSI;
831 * Interrupt handler for non-data events used with MSI-X.
833 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
835 struct adapter *adap = cookie;
837 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
840 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
842 t4_slow_intr_handler(adap);
847 * Name the MSI-X interrupts.
849 static void name_msix_vecs(struct adapter *adap)
851 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
853 /* non-data interrupts */
854 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
857 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
858 adap->port[0]->name);
860 /* Ethernet queues */
861 for_each_port(adap, j) {
862 struct net_device *d = adap->port[j];
863 const struct port_info *pi = netdev_priv(d);
865 for (i = 0; i < pi->nqsets; i++, msi_idx++)
866 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
871 for_each_ofldrxq(&adap->sge, i)
872 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
873 adap->port[0]->name, i);
875 for_each_rdmarxq(&adap->sge, i)
876 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
877 adap->port[0]->name, i);
879 for_each_rdmaciq(&adap->sge, i)
880 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
881 adap->port[0]->name, i);
884 static int request_msix_queue_irqs(struct adapter *adap)
886 struct sge *s = &adap->sge;
887 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
890 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
891 adap->msix_info[1].desc, &s->fw_evtq);
895 for_each_ethrxq(s, ethqidx) {
896 err = request_irq(adap->msix_info[msi_index].vec,
898 adap->msix_info[msi_index].desc,
899 &s->ethrxq[ethqidx].rspq);
904 for_each_ofldrxq(s, ofldqidx) {
905 err = request_irq(adap->msix_info[msi_index].vec,
907 adap->msix_info[msi_index].desc,
908 &s->ofldrxq[ofldqidx].rspq);
913 for_each_rdmarxq(s, rdmaqidx) {
914 err = request_irq(adap->msix_info[msi_index].vec,
916 adap->msix_info[msi_index].desc,
917 &s->rdmarxq[rdmaqidx].rspq);
922 for_each_rdmaciq(s, rdmaciqqidx) {
923 err = request_irq(adap->msix_info[msi_index].vec,
925 adap->msix_info[msi_index].desc,
926 &s->rdmaciq[rdmaciqqidx].rspq);
934 while (--rdmaciqqidx >= 0)
935 free_irq(adap->msix_info[--msi_index].vec,
936 &s->rdmaciq[rdmaciqqidx].rspq);
937 while (--rdmaqidx >= 0)
938 free_irq(adap->msix_info[--msi_index].vec,
939 &s->rdmarxq[rdmaqidx].rspq);
940 while (--ofldqidx >= 0)
941 free_irq(adap->msix_info[--msi_index].vec,
942 &s->ofldrxq[ofldqidx].rspq);
943 while (--ethqidx >= 0)
944 free_irq(adap->msix_info[--msi_index].vec,
945 &s->ethrxq[ethqidx].rspq);
946 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
950 static void free_msix_queue_irqs(struct adapter *adap)
952 int i, msi_index = 2;
953 struct sge *s = &adap->sge;
955 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
956 for_each_ethrxq(s, i)
957 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
958 for_each_ofldrxq(s, i)
959 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
960 for_each_rdmarxq(s, i)
961 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
962 for_each_rdmaciq(s, i)
963 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
967 * write_rss - write the RSS table for a given port
969 * @queues: array of queue indices for RSS
971 * Sets up the portion of the HW RSS table for the port's VI to distribute
972 * packets to the Rx queues in @queues.
974 static int write_rss(const struct port_info *pi, const u16 *queues)
978 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
980 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
984 /* map the queue indices to queue ids */
985 for (i = 0; i < pi->rss_size; i++, queues++)
986 rss[i] = q[*queues].rspq.abs_id;
988 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
989 pi->rss_size, rss, pi->rss_size);
995 * setup_rss - configure RSS
998 * Sets up RSS for each port.
1000 static int setup_rss(struct adapter *adap)
1004 for_each_port(adap, i) {
1005 const struct port_info *pi = adap2pinfo(adap, i);
1007 err = write_rss(pi, pi->rss);
1015 * Return the channel of the ingress queue with the given qid.
1017 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1019 qid -= p->ingr_start;
1020 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1024 * Wait until all NAPI handlers are descheduled.
1026 static void quiesce_rx(struct adapter *adap)
1030 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1031 struct sge_rspq *q = adap->sge.ingr_map[i];
1033 if (q && q->handler)
1034 napi_disable(&q->napi);
1039 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1041 static void enable_rx(struct adapter *adap)
1045 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1046 struct sge_rspq *q = adap->sge.ingr_map[i];
1051 napi_enable(&q->napi);
1052 /* 0-increment GTS to start the timer and enable interrupts */
1053 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1054 SEINTARM(q->intr_params) |
1055 INGRESSQID(q->cntxt_id));
1060 * setup_sge_queues - configure SGE Tx/Rx/response queues
1061 * @adap: the adapter
1063 * Determines how many sets of SGE queues to use and initializes them.
1064 * We support multiple queue sets per port if we have MSI-X, otherwise
1065 * just one queue set per port.
1067 static int setup_sge_queues(struct adapter *adap)
1069 int err, msi_idx, i, j;
1070 struct sge *s = &adap->sge;
1072 bitmap_zero(s->starving_fl, MAX_EGRQ);
1073 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1075 if (adap->flags & USING_MSIX)
1076 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1078 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1082 msi_idx = -((int)s->intrq.abs_id + 1);
1085 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1086 msi_idx, NULL, fwevtq_handler);
1088 freeout: t4_free_sge_resources(adap);
1092 for_each_port(adap, i) {
1093 struct net_device *dev = adap->port[i];
1094 struct port_info *pi = netdev_priv(dev);
1095 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1096 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1098 for (j = 0; j < pi->nqsets; j++, q++) {
1101 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1107 memset(&q->stats, 0, sizeof(q->stats));
1109 for (j = 0; j < pi->nqsets; j++, t++) {
1110 err = t4_sge_alloc_eth_txq(adap, t, dev,
1111 netdev_get_tx_queue(dev, j),
1112 s->fw_evtq.cntxt_id);
1118 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1119 for_each_ofldrxq(s, i) {
1120 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1121 struct net_device *dev = adap->port[i / j];
1125 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1126 q->fl.size ? &q->fl : NULL,
1130 memset(&q->stats, 0, sizeof(q->stats));
1131 s->ofld_rxq[i] = q->rspq.abs_id;
1132 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1133 s->fw_evtq.cntxt_id);
1138 for_each_rdmarxq(s, i) {
1139 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1143 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1144 msi_idx, q->fl.size ? &q->fl : NULL,
1148 memset(&q->stats, 0, sizeof(q->stats));
1149 s->rdma_rxq[i] = q->rspq.abs_id;
1152 for_each_rdmaciq(s, i) {
1153 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1157 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1158 msi_idx, q->fl.size ? &q->fl : NULL,
1162 memset(&q->stats, 0, sizeof(q->stats));
1163 s->rdma_ciq[i] = q->rspq.abs_id;
1166 for_each_port(adap, i) {
1168 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1169 * have RDMA queues, and that's the right value.
1171 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1172 s->fw_evtq.cntxt_id,
1173 s->rdmarxq[i].rspq.cntxt_id);
1178 t4_write_reg(adap, is_t4(adap->params.chip) ?
1179 MPS_TRC_RSS_CONTROL :
1180 MPS_T5_TRC_RSS_CONTROL,
1181 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1182 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1187 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1188 * The allocated memory is cleared.
1190 void *t4_alloc_mem(size_t size)
1192 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1200 * Free memory allocated through alloc_mem().
1202 void t4_free_mem(void *addr)
1204 if (is_vmalloc_addr(addr))
1210 /* Send a Work Request to write the filter at a specified index. We construct
1211 * a Firmware Filter Work Request to have the work done and put the indicated
1212 * filter into "pending" mode which will prevent any further actions against
1213 * it till we get a reply from the firmware on the completion status of the
1216 static int set_filter_wr(struct adapter *adapter, int fidx)
1218 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1219 struct sk_buff *skb;
1220 struct fw_filter_wr *fwr;
1223 /* If the new filter requires loopback Destination MAC and/or VLAN
1224 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1227 if (f->fs.newdmac || f->fs.newvlan) {
1228 /* allocate L2T entry for new filter */
1229 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1232 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1233 f->fs.eport, f->fs.dmac)) {
1234 cxgb4_l2t_release(f->l2t);
1240 ftid = adapter->tids.ftid_base + fidx;
1242 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1243 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1244 memset(fwr, 0, sizeof(*fwr));
1246 /* It would be nice to put most of the following in t4_hw.c but most
1247 * of the work is translating the cxgbtool ch_filter_specification
1248 * into the Work Request and the definition of that structure is
1249 * currently in cxgbtool.h which isn't appropriate to pull into the
1250 * common code. We may eventually try to come up with a more neutral
1251 * filter specification structure but for now it's easiest to simply
1252 * put this fairly direct code in line ...
1254 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1255 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
1257 htonl(FW_FILTER_WR_TID_V(ftid) |
1258 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1259 FW_FILTER_WR_NOREPLY_V(0) |
1260 FW_FILTER_WR_IQ_V(f->fs.iq));
1261 fwr->del_filter_to_l2tix =
1262 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1263 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1264 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1265 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1266 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1267 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1268 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1269 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1270 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
1271 f->fs.newvlan == VLAN_REWRITE) |
1272 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
1273 f->fs.newvlan == VLAN_REWRITE) |
1274 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1275 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1276 FW_FILTER_WR_PRIO_V(f->fs.prio) |
1277 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
1278 fwr->ethtype = htons(f->fs.val.ethtype);
1279 fwr->ethtypem = htons(f->fs.mask.ethtype);
1280 fwr->frag_to_ovlan_vldm =
1281 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1282 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1283 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1284 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1285 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1286 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
1288 fwr->rx_chan_rx_rpl_iq =
1289 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1290 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
1291 fwr->maci_to_matchtypem =
1292 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1293 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1294 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1295 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1296 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1297 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1298 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1299 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
1300 fwr->ptcl = f->fs.val.proto;
1301 fwr->ptclm = f->fs.mask.proto;
1302 fwr->ttyp = f->fs.val.tos;
1303 fwr->ttypm = f->fs.mask.tos;
1304 fwr->ivlan = htons(f->fs.val.ivlan);
1305 fwr->ivlanm = htons(f->fs.mask.ivlan);
1306 fwr->ovlan = htons(f->fs.val.ovlan);
1307 fwr->ovlanm = htons(f->fs.mask.ovlan);
1308 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1309 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1310 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1311 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1312 fwr->lp = htons(f->fs.val.lport);
1313 fwr->lpm = htons(f->fs.mask.lport);
1314 fwr->fp = htons(f->fs.val.fport);
1315 fwr->fpm = htons(f->fs.mask.fport);
1317 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1319 /* Mark the filter as "pending" and ship off the Filter Work Request.
1320 * When we get the Work Request Reply we'll clear the pending status.
1323 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1324 t4_ofld_send(adapter, skb);
1328 /* Delete the filter at a specified index.
1330 static int del_filter_wr(struct adapter *adapter, int fidx)
1332 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1333 struct sk_buff *skb;
1334 struct fw_filter_wr *fwr;
1335 unsigned int len, ftid;
1338 ftid = adapter->tids.ftid_base + fidx;
1340 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1341 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1342 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1344 /* Mark the filter as "pending" and ship off the Filter Work Request.
1345 * When we get the Work Request Reply we'll clear the pending status.
1348 t4_mgmt_tx(adapter, skb);
1352 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1353 void *accel_priv, select_queue_fallback_t fallback)
1357 #ifdef CONFIG_CHELSIO_T4_DCB
1358 /* If a Data Center Bridging has been successfully negotiated on this
1359 * link then we'll use the skb's priority to map it to a TX Queue.
1360 * The skb's priority is determined via the VLAN Tag Priority Code
1363 if (cxgb4_dcb_enabled(dev)) {
1367 err = vlan_get_tag(skb, &vlan_tci);
1368 if (unlikely(err)) {
1369 if (net_ratelimit())
1371 "TX Packet without VLAN Tag on DCB Link\n");
1374 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1378 #endif /* CONFIG_CHELSIO_T4_DCB */
1381 txq = (skb_rx_queue_recorded(skb)
1382 ? skb_get_rx_queue(skb)
1383 : smp_processor_id());
1385 while (unlikely(txq >= dev->real_num_tx_queues))
1386 txq -= dev->real_num_tx_queues;
1391 return fallback(dev, skb) % dev->real_num_tx_queues;
1394 static inline int is_offload(const struct adapter *adap)
1396 return adap->params.offload;
1400 * Implementation of ethtool operations.
1403 static u32 get_msglevel(struct net_device *dev)
1405 return netdev2adap(dev)->msg_enable;
1408 static void set_msglevel(struct net_device *dev, u32 val)
1410 netdev2adap(dev)->msg_enable = val;
1413 static char stats_strings[][ETH_GSTRING_LEN] = {
1416 "TxBroadcastFrames ",
1417 "TxMulticastFrames ",
1423 "TxFrames128To255 ",
1424 "TxFrames256To511 ",
1425 "TxFrames512To1023 ",
1426 "TxFrames1024To1518 ",
1427 "TxFrames1519ToMax ",
1442 "RxBroadcastFrames ",
1443 "RxMulticastFrames ",
1455 "RxFrames128To255 ",
1456 "RxFrames256To511 ",
1457 "RxFrames512To1023 ",
1458 "RxFrames1024To1518 ",
1459 "RxFrames1519ToMax ",
1471 "RxBG0FramesDropped ",
1472 "RxBG1FramesDropped ",
1473 "RxBG2FramesDropped ",
1474 "RxBG3FramesDropped ",
1475 "RxBG0FramesTrunc ",
1476 "RxBG1FramesTrunc ",
1477 "RxBG2FramesTrunc ",
1478 "RxBG3FramesTrunc ",
1487 "WriteCoalSuccess ",
1491 static int get_sset_count(struct net_device *dev, int sset)
1495 return ARRAY_SIZE(stats_strings);
1501 #define T4_REGMAP_SIZE (160 * 1024)
1502 #define T5_REGMAP_SIZE (332 * 1024)
1504 static int get_regs_len(struct net_device *dev)
1506 struct adapter *adap = netdev2adap(dev);
1507 if (is_t4(adap->params.chip))
1508 return T4_REGMAP_SIZE;
1510 return T5_REGMAP_SIZE;
1513 static int get_eeprom_len(struct net_device *dev)
1518 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1520 struct adapter *adapter = netdev2adap(dev);
1522 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1523 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1524 strlcpy(info->bus_info, pci_name(adapter->pdev),
1525 sizeof(info->bus_info));
1527 if (adapter->params.fw_vers)
1528 snprintf(info->fw_version, sizeof(info->fw_version),
1529 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1530 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
1531 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
1532 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
1533 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
1534 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
1535 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
1536 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
1537 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
1540 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1542 if (stringset == ETH_SS_STATS)
1543 memcpy(data, stats_strings, sizeof(stats_strings));
1547 * port stats maintained per queue of the port. They should be in the same
1548 * order as in stats_strings above.
1550 struct queue_port_stats {
1560 static void collect_sge_port_stats(const struct adapter *adap,
1561 const struct port_info *p, struct queue_port_stats *s)
1564 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1565 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1567 memset(s, 0, sizeof(*s));
1568 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1570 s->tx_csum += tx->tx_cso;
1571 s->rx_csum += rx->stats.rx_cso;
1572 s->vlan_ex += rx->stats.vlan_ex;
1573 s->vlan_ins += tx->vlan_ins;
1574 s->gro_pkts += rx->stats.lro_pkts;
1575 s->gro_merged += rx->stats.lro_merged;
1579 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1582 struct port_info *pi = netdev_priv(dev);
1583 struct adapter *adapter = pi->adapter;
1586 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1588 data += sizeof(struct port_stats) / sizeof(u64);
1589 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1590 data += sizeof(struct queue_port_stats) / sizeof(u64);
1591 if (!is_t4(adapter->params.chip)) {
1592 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1593 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1594 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1595 *data = val1 - val2;
1600 memset(data, 0, 2 * sizeof(u64));
1606 * Return a version number to identify the type of adapter. The scheme is:
1607 * - bits 0..9: chip version
1608 * - bits 10..15: chip revision
1609 * - bits 16..23: register dump version
1611 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1613 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1614 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1617 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1620 u32 *p = buf + start;
1622 for ( ; start <= end; start += sizeof(u32))
1623 *p++ = t4_read_reg(ap, start);
1626 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1629 static const unsigned int t4_reg_ranges[] = {
1850 static const unsigned int t5_reg_ranges[] = {
2279 struct adapter *ap = netdev2adap(dev);
2280 static const unsigned int *reg_ranges;
2281 int arr_size = 0, buf_size = 0;
2283 if (is_t4(ap->params.chip)) {
2284 reg_ranges = &t4_reg_ranges[0];
2285 arr_size = ARRAY_SIZE(t4_reg_ranges);
2286 buf_size = T4_REGMAP_SIZE;
2288 reg_ranges = &t5_reg_ranges[0];
2289 arr_size = ARRAY_SIZE(t5_reg_ranges);
2290 buf_size = T5_REGMAP_SIZE;
2293 regs->version = mk_adap_vers(ap);
2295 memset(buf, 0, buf_size);
2296 for (i = 0; i < arr_size; i += 2)
2297 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2300 static int restart_autoneg(struct net_device *dev)
2302 struct port_info *p = netdev_priv(dev);
2304 if (!netif_running(dev))
2306 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2308 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2312 static int identify_port(struct net_device *dev,
2313 enum ethtool_phys_id_state state)
2316 struct adapter *adap = netdev2adap(dev);
2318 if (state == ETHTOOL_ID_ACTIVE)
2320 else if (state == ETHTOOL_ID_INACTIVE)
2325 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2328 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2332 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2333 type == FW_PORT_TYPE_BT_XAUI) {
2335 if (caps & FW_PORT_CAP_SPEED_100M)
2336 v |= SUPPORTED_100baseT_Full;
2337 if (caps & FW_PORT_CAP_SPEED_1G)
2338 v |= SUPPORTED_1000baseT_Full;
2339 if (caps & FW_PORT_CAP_SPEED_10G)
2340 v |= SUPPORTED_10000baseT_Full;
2341 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2342 v |= SUPPORTED_Backplane;
2343 if (caps & FW_PORT_CAP_SPEED_1G)
2344 v |= SUPPORTED_1000baseKX_Full;
2345 if (caps & FW_PORT_CAP_SPEED_10G)
2346 v |= SUPPORTED_10000baseKX4_Full;
2347 } else if (type == FW_PORT_TYPE_KR)
2348 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2349 else if (type == FW_PORT_TYPE_BP_AP)
2350 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2351 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2352 else if (type == FW_PORT_TYPE_BP4_AP)
2353 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2354 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2355 SUPPORTED_10000baseKX4_Full;
2356 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2357 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
2358 v |= SUPPORTED_FIBRE;
2359 if (caps & FW_PORT_CAP_SPEED_1G)
2360 v |= SUPPORTED_1000baseT_Full;
2361 if (caps & FW_PORT_CAP_SPEED_10G)
2362 v |= SUPPORTED_10000baseT_Full;
2363 } else if (type == FW_PORT_TYPE_BP40_BA)
2364 v |= SUPPORTED_40000baseSR4_Full;
2366 if (caps & FW_PORT_CAP_ANEG)
2367 v |= SUPPORTED_Autoneg;
2371 static unsigned int to_fw_linkcaps(unsigned int caps)
2375 if (caps & ADVERTISED_100baseT_Full)
2376 v |= FW_PORT_CAP_SPEED_100M;
2377 if (caps & ADVERTISED_1000baseT_Full)
2378 v |= FW_PORT_CAP_SPEED_1G;
2379 if (caps & ADVERTISED_10000baseT_Full)
2380 v |= FW_PORT_CAP_SPEED_10G;
2381 if (caps & ADVERTISED_40000baseSR4_Full)
2382 v |= FW_PORT_CAP_SPEED_40G;
2386 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2388 const struct port_info *p = netdev_priv(dev);
2390 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2391 p->port_type == FW_PORT_TYPE_BT_XFI ||
2392 p->port_type == FW_PORT_TYPE_BT_XAUI)
2393 cmd->port = PORT_TP;
2394 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2395 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2396 cmd->port = PORT_FIBRE;
2397 else if (p->port_type == FW_PORT_TYPE_SFP ||
2398 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2399 p->port_type == FW_PORT_TYPE_QSFP) {
2400 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2401 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2402 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2403 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2404 cmd->port = PORT_FIBRE;
2405 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2406 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2407 cmd->port = PORT_DA;
2409 cmd->port = PORT_OTHER;
2411 cmd->port = PORT_OTHER;
2413 if (p->mdio_addr >= 0) {
2414 cmd->phy_address = p->mdio_addr;
2415 cmd->transceiver = XCVR_EXTERNAL;
2416 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2417 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2419 cmd->phy_address = 0; /* not really, but no better option */
2420 cmd->transceiver = XCVR_INTERNAL;
2421 cmd->mdio_support = 0;
2424 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2425 cmd->advertising = from_fw_linkcaps(p->port_type,
2426 p->link_cfg.advertising);
2427 ethtool_cmd_speed_set(cmd,
2428 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2429 cmd->duplex = DUPLEX_FULL;
2430 cmd->autoneg = p->link_cfg.autoneg;
2436 static unsigned int speed_to_caps(int speed)
2439 return FW_PORT_CAP_SPEED_100M;
2441 return FW_PORT_CAP_SPEED_1G;
2443 return FW_PORT_CAP_SPEED_10G;
2445 return FW_PORT_CAP_SPEED_40G;
2449 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2452 struct port_info *p = netdev_priv(dev);
2453 struct link_config *lc = &p->link_cfg;
2454 u32 speed = ethtool_cmd_speed(cmd);
2456 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2459 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2461 * PHY offers a single speed. See if that's what's
2464 if (cmd->autoneg == AUTONEG_DISABLE &&
2465 (lc->supported & speed_to_caps(speed)))
2470 if (cmd->autoneg == AUTONEG_DISABLE) {
2471 cap = speed_to_caps(speed);
2473 if (!(lc->supported & cap) ||
2478 lc->requested_speed = cap;
2479 lc->advertising = 0;
2481 cap = to_fw_linkcaps(cmd->advertising);
2482 if (!(lc->supported & cap))
2484 lc->requested_speed = 0;
2485 lc->advertising = cap | FW_PORT_CAP_ANEG;
2487 lc->autoneg = cmd->autoneg;
2489 if (netif_running(dev))
2490 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2495 static void get_pauseparam(struct net_device *dev,
2496 struct ethtool_pauseparam *epause)
2498 struct port_info *p = netdev_priv(dev);
2500 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2501 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2502 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2505 static int set_pauseparam(struct net_device *dev,
2506 struct ethtool_pauseparam *epause)
2508 struct port_info *p = netdev_priv(dev);
2509 struct link_config *lc = &p->link_cfg;
2511 if (epause->autoneg == AUTONEG_DISABLE)
2512 lc->requested_fc = 0;
2513 else if (lc->supported & FW_PORT_CAP_ANEG)
2514 lc->requested_fc = PAUSE_AUTONEG;
2518 if (epause->rx_pause)
2519 lc->requested_fc |= PAUSE_RX;
2520 if (epause->tx_pause)
2521 lc->requested_fc |= PAUSE_TX;
2522 if (netif_running(dev))
2523 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2528 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2530 const struct port_info *pi = netdev_priv(dev);
2531 const struct sge *s = &pi->adapter->sge;
2533 e->rx_max_pending = MAX_RX_BUFFERS;
2534 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2535 e->rx_jumbo_max_pending = 0;
2536 e->tx_max_pending = MAX_TXQ_ENTRIES;
2538 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2539 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2540 e->rx_jumbo_pending = 0;
2541 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2544 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2547 const struct port_info *pi = netdev_priv(dev);
2548 struct adapter *adapter = pi->adapter;
2549 struct sge *s = &adapter->sge;
2551 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2552 e->tx_pending > MAX_TXQ_ENTRIES ||
2553 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2554 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2555 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2558 if (adapter->flags & FULL_INIT_DONE)
2561 for (i = 0; i < pi->nqsets; ++i) {
2562 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2563 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2564 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2569 static int closest_timer(const struct sge *s, int time)
2571 int i, delta, match = 0, min_delta = INT_MAX;
2573 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2574 delta = time - s->timer_val[i];
2577 if (delta < min_delta) {
2585 static int closest_thres(const struct sge *s, int thres)
2587 int i, delta, match = 0, min_delta = INT_MAX;
2589 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2590 delta = thres - s->counter_val[i];
2593 if (delta < min_delta) {
2602 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2604 static unsigned int qtimer_val(const struct adapter *adap,
2605 const struct sge_rspq *q)
2607 unsigned int idx = q->intr_params >> 1;
2609 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2613 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2615 * @us: the hold-off time in us, or 0 to disable timer
2616 * @cnt: the hold-off packet count, or 0 to disable counter
2618 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2619 * one of the two needs to be enabled for the queue to generate interrupts.
2621 static int set_rspq_intr_params(struct sge_rspq *q,
2622 unsigned int us, unsigned int cnt)
2624 struct adapter *adap = q->adap;
2626 if ((us | cnt) == 0)
2633 new_idx = closest_thres(&adap->sge, cnt);
2634 if (q->desc && q->pktcnt_idx != new_idx) {
2635 /* the queue has already been created, update it */
2636 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2637 FW_PARAMS_PARAM_X_V(
2638 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2639 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
2640 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2645 q->pktcnt_idx = new_idx;
2648 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2649 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2654 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2655 * @dev: the network device
2656 * @us: the hold-off time in us, or 0 to disable timer
2657 * @cnt: the hold-off packet count, or 0 to disable counter
2659 * Set the RX interrupt hold-off parameters for a network device.
2661 static int set_rx_intr_params(struct net_device *dev,
2662 unsigned int us, unsigned int cnt)
2665 struct port_info *pi = netdev_priv(dev);
2666 struct adapter *adap = pi->adapter;
2667 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2669 for (i = 0; i < pi->nqsets; i++, q++) {
2670 err = set_rspq_intr_params(&q->rspq, us, cnt);
2677 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2680 struct port_info *pi = netdev_priv(dev);
2681 struct adapter *adap = pi->adapter;
2682 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2684 for (i = 0; i < pi->nqsets; i++, q++)
2685 q->rspq.adaptive_rx = adaptive_rx;
2690 static int get_adaptive_rx_setting(struct net_device *dev)
2692 struct port_info *pi = netdev_priv(dev);
2693 struct adapter *adap = pi->adapter;
2694 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2696 return q->rspq.adaptive_rx;
2699 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2701 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
2702 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2703 c->rx_max_coalesced_frames);
2706 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2708 const struct port_info *pi = netdev_priv(dev);
2709 const struct adapter *adap = pi->adapter;
2710 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2712 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2713 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2714 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2715 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
2720 * eeprom_ptov - translate a physical EEPROM address to virtual
2721 * @phys_addr: the physical EEPROM address
2722 * @fn: the PCI function number
2723 * @sz: size of function-specific area
2725 * Translate a physical EEPROM address to virtual. The first 1K is
2726 * accessed through virtual addresses starting at 31K, the rest is
2727 * accessed through virtual addresses starting at 0.
2729 * The mapping is as follows:
2730 * [0..1K) -> [31K..32K)
2731 * [1K..1K+A) -> [31K-A..31K)
2732 * [1K+A..ES) -> [0..ES-A-1K)
2734 * where A = @fn * @sz, and ES = EEPROM size.
2736 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2739 if (phys_addr < 1024)
2740 return phys_addr + (31 << 10);
2741 if (phys_addr < 1024 + fn)
2742 return 31744 - fn + phys_addr - 1024;
2743 if (phys_addr < EEPROMSIZE)
2744 return phys_addr - 1024 - fn;
2749 * The next two routines implement eeprom read/write from physical addresses.
2751 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2753 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2756 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2757 return vaddr < 0 ? vaddr : 0;
2760 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2762 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2765 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2766 return vaddr < 0 ? vaddr : 0;
2769 #define EEPROM_MAGIC 0x38E2F10C
2771 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2775 struct adapter *adapter = netdev2adap(dev);
2777 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2781 e->magic = EEPROM_MAGIC;
2782 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2783 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2786 memcpy(data, buf + e->offset, e->len);
2791 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2796 u32 aligned_offset, aligned_len, *p;
2797 struct adapter *adapter = netdev2adap(dev);
2799 if (eeprom->magic != EEPROM_MAGIC)
2802 aligned_offset = eeprom->offset & ~3;
2803 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2805 if (adapter->fn > 0) {
2806 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2808 if (aligned_offset < start ||
2809 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2813 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2815 * RMW possibly needed for first or last words.
2817 buf = kmalloc(aligned_len, GFP_KERNEL);
2820 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2821 if (!err && aligned_len > 4)
2822 err = eeprom_rd_phys(adapter,
2823 aligned_offset + aligned_len - 4,
2824 (u32 *)&buf[aligned_len - 4]);
2827 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2831 err = t4_seeprom_wp(adapter, false);
2835 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2836 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2837 aligned_offset += 4;
2841 err = t4_seeprom_wp(adapter, true);
2848 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2851 const struct firmware *fw;
2852 struct adapter *adap = netdev2adap(netdev);
2853 unsigned int mbox = PCIE_FW_MASTER_M + 1;
2855 ef->data[sizeof(ef->data) - 1] = '\0';
2856 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2860 /* If the adapter has been fully initialized then we'll go ahead and
2861 * try to get the firmware's cooperation in upgrading to the new
2862 * firmware image otherwise we'll try to do the entire job from the
2863 * host ... and we always "force" the operation in this path.
2865 if (adap->flags & FULL_INIT_DONE)
2868 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
2869 release_firmware(fw);
2871 dev_info(adap->pdev_dev, "loaded firmware %s,"
2872 " reload cxgb4 driver\n", ef->data);
2876 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2877 #define BCAST_CRC 0xa0ccc1a6
2879 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2881 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2882 wol->wolopts = netdev2adap(dev)->wol;
2883 memset(&wol->sopass, 0, sizeof(wol->sopass));
2886 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2889 struct port_info *pi = netdev_priv(dev);
2891 if (wol->wolopts & ~WOL_SUPPORTED)
2893 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2894 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2895 if (wol->wolopts & WAKE_BCAST) {
2896 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2899 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2900 ~6ULL, ~0ULL, BCAST_CRC, true);
2902 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2906 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2908 const struct port_info *pi = netdev_priv(dev);
2909 netdev_features_t changed = dev->features ^ features;
2912 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2915 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2917 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2919 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2923 static u32 get_rss_table_size(struct net_device *dev)
2925 const struct port_info *pi = netdev_priv(dev);
2927 return pi->rss_size;
2930 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
2932 const struct port_info *pi = netdev_priv(dev);
2933 unsigned int n = pi->rss_size;
2936 *hfunc = ETH_RSS_HASH_TOP;
2944 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
2948 struct port_info *pi = netdev_priv(dev);
2950 /* We require at least one supported parameter to be changed and no
2951 * change in any of the unsupported parameters
2954 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
2959 for (i = 0; i < pi->rss_size; i++)
2961 if (pi->adapter->flags & FULL_INIT_DONE)
2962 return write_rss(pi, pi->rss);
2966 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2969 const struct port_info *pi = netdev_priv(dev);
2971 switch (info->cmd) {
2972 case ETHTOOL_GRXFH: {
2973 unsigned int v = pi->rss_mode;
2976 switch (info->flow_type) {
2978 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
2979 info->data = RXH_IP_SRC | RXH_IP_DST |
2980 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2981 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2982 info->data = RXH_IP_SRC | RXH_IP_DST;
2985 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
2986 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
2987 info->data = RXH_IP_SRC | RXH_IP_DST |
2988 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2989 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2990 info->data = RXH_IP_SRC | RXH_IP_DST;
2993 case AH_ESP_V4_FLOW:
2995 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2996 info->data = RXH_IP_SRC | RXH_IP_DST;
2999 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
3000 info->data = RXH_IP_SRC | RXH_IP_DST |
3001 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3002 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
3003 info->data = RXH_IP_SRC | RXH_IP_DST;
3006 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
3007 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
3008 info->data = RXH_IP_SRC | RXH_IP_DST |
3009 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3010 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
3011 info->data = RXH_IP_SRC | RXH_IP_DST;
3014 case AH_ESP_V6_FLOW:
3016 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
3017 info->data = RXH_IP_SRC | RXH_IP_DST;
3022 case ETHTOOL_GRXRINGS:
3023 info->data = pi->nqsets;
3029 static const struct ethtool_ops cxgb_ethtool_ops = {
3030 .get_settings = get_settings,
3031 .set_settings = set_settings,
3032 .get_drvinfo = get_drvinfo,
3033 .get_msglevel = get_msglevel,
3034 .set_msglevel = set_msglevel,
3035 .get_ringparam = get_sge_param,
3036 .set_ringparam = set_sge_param,
3037 .get_coalesce = get_coalesce,
3038 .set_coalesce = set_coalesce,
3039 .get_eeprom_len = get_eeprom_len,
3040 .get_eeprom = get_eeprom,
3041 .set_eeprom = set_eeprom,
3042 .get_pauseparam = get_pauseparam,
3043 .set_pauseparam = set_pauseparam,
3044 .get_link = ethtool_op_get_link,
3045 .get_strings = get_strings,
3046 .set_phys_id = identify_port,
3047 .nway_reset = restart_autoneg,
3048 .get_sset_count = get_sset_count,
3049 .get_ethtool_stats = get_stats,
3050 .get_regs_len = get_regs_len,
3051 .get_regs = get_regs,
3054 .get_rxnfc = get_rxnfc,
3055 .get_rxfh_indir_size = get_rss_table_size,
3056 .get_rxfh = get_rss_table,
3057 .set_rxfh = set_rss_table,
3058 .flash_device = set_flash,
3061 static int setup_debugfs(struct adapter *adap)
3063 if (IS_ERR_OR_NULL(adap->debugfs_root))
3066 #ifdef CONFIG_DEBUG_FS
3067 t4_setup_debugfs(adap);
3073 * upper-layer driver support
3077 * Allocate an active-open TID and set it to the supplied value.
3079 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3083 spin_lock_bh(&t->atid_lock);
3085 union aopen_entry *p = t->afree;
3087 atid = (p - t->atid_tab) + t->atid_base;
3092 spin_unlock_bh(&t->atid_lock);
3095 EXPORT_SYMBOL(cxgb4_alloc_atid);
3098 * Release an active-open TID.
3100 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3102 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3104 spin_lock_bh(&t->atid_lock);
3108 spin_unlock_bh(&t->atid_lock);
3110 EXPORT_SYMBOL(cxgb4_free_atid);
3113 * Allocate a server TID and set it to the supplied value.
3115 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3119 spin_lock_bh(&t->stid_lock);
3120 if (family == PF_INET) {
3121 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3122 if (stid < t->nstids)
3123 __set_bit(stid, t->stid_bmap);
3127 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3132 t->stid_tab[stid].data = data;
3133 stid += t->stid_base;
3134 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3135 * This is equivalent to 4 TIDs. With CLIP enabled it
3138 if (family == PF_INET)
3141 t->stids_in_use += 4;
3143 spin_unlock_bh(&t->stid_lock);
3146 EXPORT_SYMBOL(cxgb4_alloc_stid);
3148 /* Allocate a server filter TID and set it to the supplied value.
3150 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3154 spin_lock_bh(&t->stid_lock);
3155 if (family == PF_INET) {
3156 stid = find_next_zero_bit(t->stid_bmap,
3157 t->nstids + t->nsftids, t->nstids);
3158 if (stid < (t->nstids + t->nsftids))
3159 __set_bit(stid, t->stid_bmap);
3166 t->stid_tab[stid].data = data;
3168 stid += t->sftid_base;
3171 spin_unlock_bh(&t->stid_lock);
3174 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3176 /* Release a server TID.
3178 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3180 /* Is it a server filter TID? */
3181 if (t->nsftids && (stid >= t->sftid_base)) {
3182 stid -= t->sftid_base;
3185 stid -= t->stid_base;
3188 spin_lock_bh(&t->stid_lock);
3189 if (family == PF_INET)
3190 __clear_bit(stid, t->stid_bmap);
3192 bitmap_release_region(t->stid_bmap, stid, 2);
3193 t->stid_tab[stid].data = NULL;
3194 if (family == PF_INET)
3197 t->stids_in_use -= 4;
3198 spin_unlock_bh(&t->stid_lock);
3200 EXPORT_SYMBOL(cxgb4_free_stid);
3203 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3205 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3208 struct cpl_tid_release *req;
3210 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3211 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3212 INIT_TP_WR(req, tid);
3213 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3217 * Queue a TID release request and if necessary schedule a work queue to
3220 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3223 void **p = &t->tid_tab[tid];
3224 struct adapter *adap = container_of(t, struct adapter, tids);
3226 spin_lock_bh(&adap->tid_release_lock);
3227 *p = adap->tid_release_head;
3228 /* Low 2 bits encode the Tx channel number */
3229 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3230 if (!adap->tid_release_task_busy) {
3231 adap->tid_release_task_busy = true;
3232 queue_work(adap->workq, &adap->tid_release_task);
3234 spin_unlock_bh(&adap->tid_release_lock);
3238 * Process the list of pending TID release requests.
3240 static void process_tid_release_list(struct work_struct *work)
3242 struct sk_buff *skb;
3243 struct adapter *adap;
3245 adap = container_of(work, struct adapter, tid_release_task);
3247 spin_lock_bh(&adap->tid_release_lock);
3248 while (adap->tid_release_head) {
3249 void **p = adap->tid_release_head;
3250 unsigned int chan = (uintptr_t)p & 3;
3251 p = (void *)p - chan;
3253 adap->tid_release_head = *p;
3255 spin_unlock_bh(&adap->tid_release_lock);
3257 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3259 schedule_timeout_uninterruptible(1);
3261 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3262 t4_ofld_send(adap, skb);
3263 spin_lock_bh(&adap->tid_release_lock);
3265 adap->tid_release_task_busy = false;
3266 spin_unlock_bh(&adap->tid_release_lock);
3270 * Release a TID and inform HW. If we are unable to allocate the release
3271 * message we defer to a work queue.
3273 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3276 struct sk_buff *skb;
3277 struct adapter *adap = container_of(t, struct adapter, tids);
3279 old = t->tid_tab[tid];
3280 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3282 t->tid_tab[tid] = NULL;
3283 mk_tid_release(skb, chan, tid);
3284 t4_ofld_send(adap, skb);
3286 cxgb4_queue_tid_release(t, chan, tid);
3288 atomic_dec(&t->tids_in_use);
3290 EXPORT_SYMBOL(cxgb4_remove_tid);
3293 * Allocate and initialize the TID tables. Returns 0 on success.
3295 static int tid_init(struct tid_info *t)
3298 unsigned int stid_bmap_size;
3299 unsigned int natids = t->natids;
3300 struct adapter *adap = container_of(t, struct adapter, tids);
3302 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3303 size = t->ntids * sizeof(*t->tid_tab) +
3304 natids * sizeof(*t->atid_tab) +
3305 t->nstids * sizeof(*t->stid_tab) +
3306 t->nsftids * sizeof(*t->stid_tab) +
3307 stid_bmap_size * sizeof(long) +
3308 t->nftids * sizeof(*t->ftid_tab) +
3309 t->nsftids * sizeof(*t->ftid_tab);
3311 t->tid_tab = t4_alloc_mem(size);
3315 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3316 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3317 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3318 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3319 spin_lock_init(&t->stid_lock);
3320 spin_lock_init(&t->atid_lock);
3322 t->stids_in_use = 0;
3324 t->atids_in_use = 0;
3325 atomic_set(&t->tids_in_use, 0);
3327 /* Setup the free list for atid_tab and clear the stid bitmap. */
3330 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3331 t->afree = t->atid_tab;
3333 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3334 /* Reserve stid 0 for T4/T5 adapters */
3335 if (!t->stid_base &&
3336 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3337 __set_bit(0, t->stid_bmap);
3342 int cxgb4_clip_get(const struct net_device *dev,
3343 const struct in6_addr *lip)
3345 struct adapter *adap;
3346 struct fw_clip_cmd c;
3348 adap = netdev2adap(dev);
3349 memset(&c, 0, sizeof(c));
3350 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3351 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3352 c.alloc_to_len16 = htonl(FW_CLIP_CMD_ALLOC_F | FW_LEN16(c));
3353 c.ip_hi = *(__be64 *)(lip->s6_addr);
3354 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3355 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3357 EXPORT_SYMBOL(cxgb4_clip_get);
3359 int cxgb4_clip_release(const struct net_device *dev,
3360 const struct in6_addr *lip)
3362 struct adapter *adap;
3363 struct fw_clip_cmd c;
3365 adap = netdev2adap(dev);
3366 memset(&c, 0, sizeof(c));
3367 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3368 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3369 c.alloc_to_len16 = htonl(FW_CLIP_CMD_FREE_F | FW_LEN16(c));
3370 c.ip_hi = *(__be64 *)(lip->s6_addr);
3371 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3372 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3374 EXPORT_SYMBOL(cxgb4_clip_release);
3377 * cxgb4_create_server - create an IP server
3379 * @stid: the server TID
3380 * @sip: local IP address to bind server to
3381 * @sport: the server's TCP port
3382 * @queue: queue to direct messages from this server to
3384 * Create an IP server for the given port and address.
3385 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3387 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3388 __be32 sip, __be16 sport, __be16 vlan,
3392 struct sk_buff *skb;
3393 struct adapter *adap;
3394 struct cpl_pass_open_req *req;
3397 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3401 adap = netdev2adap(dev);
3402 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3404 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3405 req->local_port = sport;
3406 req->peer_port = htons(0);
3407 req->local_ip = sip;
3408 req->peer_ip = htonl(0);
3409 chan = rxq_to_chan(&adap->sge, queue);
3410 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3411 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3412 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3413 ret = t4_mgmt_tx(adap, skb);
3414 return net_xmit_eval(ret);
3416 EXPORT_SYMBOL(cxgb4_create_server);
3418 /* cxgb4_create_server6 - create an IPv6 server
3420 * @stid: the server TID
3421 * @sip: local IPv6 address to bind server to
3422 * @sport: the server's TCP port
3423 * @queue: queue to direct messages from this server to
3425 * Create an IPv6 server for the given port and address.
3426 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3428 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3429 const struct in6_addr *sip, __be16 sport,
3433 struct sk_buff *skb;
3434 struct adapter *adap;
3435 struct cpl_pass_open_req6 *req;
3438 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3442 adap = netdev2adap(dev);
3443 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3445 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3446 req->local_port = sport;
3447 req->peer_port = htons(0);
3448 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3449 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3450 req->peer_ip_hi = cpu_to_be64(0);
3451 req->peer_ip_lo = cpu_to_be64(0);
3452 chan = rxq_to_chan(&adap->sge, queue);
3453 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3454 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3455 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3456 ret = t4_mgmt_tx(adap, skb);
3457 return net_xmit_eval(ret);
3459 EXPORT_SYMBOL(cxgb4_create_server6);
3461 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3462 unsigned int queue, bool ipv6)
3464 struct sk_buff *skb;
3465 struct adapter *adap;
3466 struct cpl_close_listsvr_req *req;
3469 adap = netdev2adap(dev);
3471 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3475 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3477 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3478 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3479 LISTSVR_IPV6(0)) | QUEUENO(queue));
3480 ret = t4_mgmt_tx(adap, skb);
3481 return net_xmit_eval(ret);
3483 EXPORT_SYMBOL(cxgb4_remove_server);
3486 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3487 * @mtus: the HW MTU table
3488 * @mtu: the target MTU
3489 * @idx: index of selected entry in the MTU table
3491 * Returns the index and the value in the HW MTU table that is closest to
3492 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3493 * table, in which case that smallest available value is selected.
3495 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3500 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3506 EXPORT_SYMBOL(cxgb4_best_mtu);
3509 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3510 * @mtus: the HW MTU table
3511 * @header_size: Header Size
3512 * @data_size_max: maximum Data Segment Size
3513 * @data_size_align: desired Data Segment Size Alignment (2^N)
3514 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3516 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3517 * MTU Table based solely on a Maximum MTU parameter, we break that
3518 * parameter up into a Header Size and Maximum Data Segment Size, and
3519 * provide a desired Data Segment Size Alignment. If we find an MTU in
3520 * the Hardware MTU Table which will result in a Data Segment Size with
3521 * the requested alignment _and_ that MTU isn't "too far" from the
3522 * closest MTU, then we'll return that rather than the closest MTU.
3524 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3525 unsigned short header_size,
3526 unsigned short data_size_max,
3527 unsigned short data_size_align,
3528 unsigned int *mtu_idxp)
3530 unsigned short max_mtu = header_size + data_size_max;
3531 unsigned short data_size_align_mask = data_size_align - 1;
3532 int mtu_idx, aligned_mtu_idx;
3534 /* Scan the MTU Table till we find an MTU which is larger than our
3535 * Maximum MTU or we reach the end of the table. Along the way,
3536 * record the last MTU found, if any, which will result in a Data
3537 * Segment Length matching the requested alignment.
3539 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3540 unsigned short data_size = mtus[mtu_idx] - header_size;
3542 /* If this MTU minus the Header Size would result in a
3543 * Data Segment Size of the desired alignment, remember it.
3545 if ((data_size & data_size_align_mask) == 0)
3546 aligned_mtu_idx = mtu_idx;
3548 /* If we're not at the end of the Hardware MTU Table and the
3549 * next element is larger than our Maximum MTU, drop out of
3552 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3556 /* If we fell out of the loop because we ran to the end of the table,
3557 * then we just have to use the last [largest] entry.
3559 if (mtu_idx == NMTUS)
3562 /* If we found an MTU which resulted in the requested Data Segment
3563 * Length alignment and that's "not far" from the largest MTU which is
3564 * less than or equal to the maximum MTU, then use that.
3566 if (aligned_mtu_idx >= 0 &&
3567 mtu_idx - aligned_mtu_idx <= 1)
3568 mtu_idx = aligned_mtu_idx;
3570 /* If the caller has passed in an MTU Index pointer, pass the
3571 * MTU Index back. Return the MTU value.
3574 *mtu_idxp = mtu_idx;
3575 return mtus[mtu_idx];
3577 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3580 * cxgb4_port_chan - get the HW channel of a port
3581 * @dev: the net device for the port
3583 * Return the HW Tx channel of the given port.
3585 unsigned int cxgb4_port_chan(const struct net_device *dev)
3587 return netdev2pinfo(dev)->tx_chan;
3589 EXPORT_SYMBOL(cxgb4_port_chan);
3591 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3593 struct adapter *adap = netdev2adap(dev);
3594 u32 v1, v2, lp_count, hp_count;
3596 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3597 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3598 if (is_t4(adap->params.chip)) {
3599 lp_count = G_LP_COUNT(v1);
3600 hp_count = G_HP_COUNT(v1);
3602 lp_count = G_LP_COUNT_T5(v1);
3603 hp_count = G_HP_COUNT_T5(v2);
3605 return lpfifo ? lp_count : hp_count;
3607 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3610 * cxgb4_port_viid - get the VI id of a port
3611 * @dev: the net device for the port
3613 * Return the VI id of the given port.
3615 unsigned int cxgb4_port_viid(const struct net_device *dev)
3617 return netdev2pinfo(dev)->viid;
3619 EXPORT_SYMBOL(cxgb4_port_viid);
3622 * cxgb4_port_idx - get the index of a port
3623 * @dev: the net device for the port
3625 * Return the index of the given port.
3627 unsigned int cxgb4_port_idx(const struct net_device *dev)
3629 return netdev2pinfo(dev)->port_id;
3631 EXPORT_SYMBOL(cxgb4_port_idx);
3633 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3634 struct tp_tcp_stats *v6)
3636 struct adapter *adap = pci_get_drvdata(pdev);
3638 spin_lock(&adap->stats_lock);
3639 t4_tp_get_tcp_stats(adap, v4, v6);
3640 spin_unlock(&adap->stats_lock);
3642 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3644 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3645 const unsigned int *pgsz_order)
3647 struct adapter *adap = netdev2adap(dev);
3649 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3650 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3651 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3652 HPZ3(pgsz_order[3]));
3654 EXPORT_SYMBOL(cxgb4_iscsi_init);
3656 int cxgb4_flush_eq_cache(struct net_device *dev)
3658 struct adapter *adap = netdev2adap(dev);
3661 ret = t4_fwaddrspace_write(adap, adap->mbox,
3662 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3665 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3667 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3669 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3673 spin_lock(&adap->win0_lock);
3674 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3675 sizeof(indices), (__be32 *)&indices,
3677 spin_unlock(&adap->win0_lock);
3679 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3680 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3685 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3688 struct adapter *adap = netdev2adap(dev);
3689 u16 hw_pidx, hw_cidx;
3692 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3696 if (pidx != hw_pidx) {
3699 if (pidx >= hw_pidx)
3700 delta = pidx - hw_pidx;
3702 delta = size - hw_pidx + pidx;
3704 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3705 QID(qid) | PIDX(delta));
3710 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3712 void cxgb4_disable_db_coalescing(struct net_device *dev)
3714 struct adapter *adap;
3716 adap = netdev2adap(dev);
3717 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3720 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3722 void cxgb4_enable_db_coalescing(struct net_device *dev)
3724 struct adapter *adap;
3726 adap = netdev2adap(dev);
3727 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3729 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3731 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3733 struct adapter *adap;
3734 u32 offset, memtype, memaddr;
3735 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
3736 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3739 adap = netdev2adap(dev);
3741 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3743 /* Figure out where the offset lands in the Memory Type/Address scheme.
3744 * This code assumes that the memory is laid out starting at offset 0
3745 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3746 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3747 * MC0, and some have both MC0 and MC1.
3749 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
3750 edc0_size = EDRAM0_SIZE_G(size) << 20;
3751 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
3752 edc1_size = EDRAM1_SIZE_G(size) << 20;
3753 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
3754 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
3756 edc0_end = edc0_size;
3757 edc1_end = edc0_end + edc1_size;
3758 mc0_end = edc1_end + mc0_size;
3760 if (offset < edc0_end) {
3763 } else if (offset < edc1_end) {
3765 memaddr = offset - edc0_end;
3767 if (offset < mc0_end) {
3769 memaddr = offset - edc1_end;
3770 } else if (is_t4(adap->params.chip)) {
3771 /* T4 only has a single memory channel */
3774 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3775 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
3776 mc1_end = mc0_end + mc1_size;
3777 if (offset < mc1_end) {
3779 memaddr = offset - mc0_end;
3781 /* offset beyond the end of any memory */
3787 spin_lock(&adap->win0_lock);
3788 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3789 spin_unlock(&adap->win0_lock);
3793 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3797 EXPORT_SYMBOL(cxgb4_read_tpte);
3799 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3802 struct adapter *adap;
3804 adap = netdev2adap(dev);
3805 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3806 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3808 return ((u64)hi << 32) | (u64)lo;
3810 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3812 int cxgb4_bar2_sge_qregs(struct net_device *dev,
3814 enum cxgb4_bar2_qtype qtype,
3816 unsigned int *pbar2_qid)
3818 return t4_bar2_sge_qregs(netdev2adap(dev),
3820 (qtype == CXGB4_BAR2_QTYPE_EGRESS
3821 ? T4_BAR2_QTYPE_EGRESS
3822 : T4_BAR2_QTYPE_INGRESS),
3826 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
3828 static struct pci_driver cxgb4_driver;
3830 static void check_neigh_update(struct neighbour *neigh)
3832 const struct device *parent;
3833 const struct net_device *netdev = neigh->dev;
3835 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3836 netdev = vlan_dev_real_dev(netdev);
3837 parent = netdev->dev.parent;
3838 if (parent && parent->driver == &cxgb4_driver.driver)
3839 t4_l2t_update(dev_get_drvdata(parent), neigh);
3842 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3846 case NETEVENT_NEIGH_UPDATE:
3847 check_neigh_update(data);
3849 case NETEVENT_REDIRECT:
3856 static bool netevent_registered;
3857 static struct notifier_block cxgb4_netevent_nb = {
3858 .notifier_call = netevent_cb
3861 static void drain_db_fifo(struct adapter *adap, int usecs)
3863 u32 v1, v2, lp_count, hp_count;
3866 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3867 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3868 if (is_t4(adap->params.chip)) {
3869 lp_count = G_LP_COUNT(v1);
3870 hp_count = G_HP_COUNT(v1);
3872 lp_count = G_LP_COUNT_T5(v1);
3873 hp_count = G_HP_COUNT_T5(v2);
3876 if (lp_count == 0 && hp_count == 0)
3878 set_current_state(TASK_UNINTERRUPTIBLE);
3879 schedule_timeout(usecs_to_jiffies(usecs));
3883 static void disable_txq_db(struct sge_txq *q)
3885 unsigned long flags;
3887 spin_lock_irqsave(&q->db_lock, flags);
3889 spin_unlock_irqrestore(&q->db_lock, flags);
3892 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3894 spin_lock_irq(&q->db_lock);
3895 if (q->db_pidx_inc) {
3896 /* Make sure that all writes to the TX descriptors
3897 * are committed before we tell HW about them.
3900 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3901 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3905 spin_unlock_irq(&q->db_lock);
3908 static void disable_dbs(struct adapter *adap)
3912 for_each_ethrxq(&adap->sge, i)
3913 disable_txq_db(&adap->sge.ethtxq[i].q);
3914 for_each_ofldrxq(&adap->sge, i)
3915 disable_txq_db(&adap->sge.ofldtxq[i].q);
3916 for_each_port(adap, i)
3917 disable_txq_db(&adap->sge.ctrlq[i].q);
3920 static void enable_dbs(struct adapter *adap)
3924 for_each_ethrxq(&adap->sge, i)
3925 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3926 for_each_ofldrxq(&adap->sge, i)
3927 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3928 for_each_port(adap, i)
3929 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3932 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3934 if (adap->uld_handle[CXGB4_ULD_RDMA])
3935 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3939 static void process_db_full(struct work_struct *work)
3941 struct adapter *adap;
3943 adap = container_of(work, struct adapter, db_full_task);
3945 drain_db_fifo(adap, dbfifo_drain_delay);
3947 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3948 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3949 DBFIFO_HP_INT | DBFIFO_LP_INT,
3950 DBFIFO_HP_INT | DBFIFO_LP_INT);
3953 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3955 u16 hw_pidx, hw_cidx;
3958 spin_lock_irq(&q->db_lock);
3959 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3962 if (q->db_pidx != hw_pidx) {
3965 if (q->db_pidx >= hw_pidx)
3966 delta = q->db_pidx - hw_pidx;
3968 delta = q->size - hw_pidx + q->db_pidx;
3970 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3971 QID(q->cntxt_id) | PIDX(delta));
3976 spin_unlock_irq(&q->db_lock);
3978 CH_WARN(adap, "DB drop recovery failed.\n");
3980 static void recover_all_queues(struct adapter *adap)
3984 for_each_ethrxq(&adap->sge, i)
3985 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3986 for_each_ofldrxq(&adap->sge, i)
3987 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3988 for_each_port(adap, i)
3989 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3992 static void process_db_drop(struct work_struct *work)
3994 struct adapter *adap;
3996 adap = container_of(work, struct adapter, db_drop_task);
3998 if (is_t4(adap->params.chip)) {
3999 drain_db_fifo(adap, dbfifo_drain_delay);
4000 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
4001 drain_db_fifo(adap, dbfifo_drain_delay);
4002 recover_all_queues(adap);
4003 drain_db_fifo(adap, dbfifo_drain_delay);
4005 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4007 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4008 u16 qid = (dropped_db >> 15) & 0x1ffff;
4009 u16 pidx_inc = dropped_db & 0x1fff;
4011 unsigned int bar2_qid;
4014 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
4015 &bar2_qoffset, &bar2_qid);
4017 dev_err(adap->pdev_dev, "doorbell drop recovery: "
4018 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
4020 writel(PIDX_T5(pidx_inc) | QID(bar2_qid),
4021 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
4023 /* Re-enable BAR2 WC */
4024 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4027 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4030 void t4_db_full(struct adapter *adap)
4032 if (is_t4(adap->params.chip)) {
4034 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4035 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4036 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4037 queue_work(adap->workq, &adap->db_full_task);
4041 void t4_db_dropped(struct adapter *adap)
4043 if (is_t4(adap->params.chip)) {
4045 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4047 queue_work(adap->workq, &adap->db_drop_task);
4050 static void uld_attach(struct adapter *adap, unsigned int uld)
4053 struct cxgb4_lld_info lli;
4056 lli.pdev = adap->pdev;
4058 lli.l2t = adap->l2t;
4059 lli.tids = &adap->tids;
4060 lli.ports = adap->port;
4061 lli.vr = &adap->vres;
4062 lli.mtus = adap->params.mtus;
4063 if (uld == CXGB4_ULD_RDMA) {
4064 lli.rxq_ids = adap->sge.rdma_rxq;
4065 lli.ciq_ids = adap->sge.rdma_ciq;
4066 lli.nrxq = adap->sge.rdmaqs;
4067 lli.nciq = adap->sge.rdmaciqs;
4068 } else if (uld == CXGB4_ULD_ISCSI) {
4069 lli.rxq_ids = adap->sge.ofld_rxq;
4070 lli.nrxq = adap->sge.ofldqsets;
4072 lli.ntxq = adap->sge.ofldqsets;
4073 lli.nchan = adap->params.nports;
4074 lli.nports = adap->params.nports;
4075 lli.wr_cred = adap->params.ofldq_wr_cred;
4076 lli.adapter_type = adap->params.chip;
4077 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4078 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4079 lli.udb_density = 1 << adap->params.sge.eq_qpp;
4080 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
4081 lli.filt_mode = adap->params.tp.vlan_pri_map;
4082 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4083 for (i = 0; i < NCHAN; i++)
4085 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4086 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4087 lli.fw_vers = adap->params.fw_vers;
4088 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4089 lli.sge_ingpadboundary = adap->sge.fl_align;
4090 lli.sge_egrstatuspagesize = adap->sge.stat_len;
4091 lli.sge_pktshift = adap->sge.pktshift;
4092 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4093 lli.max_ordird_qp = adap->params.max_ordird_qp;
4094 lli.max_ird_adapter = adap->params.max_ird_adapter;
4095 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4097 handle = ulds[uld].add(&lli);
4098 if (IS_ERR(handle)) {
4099 dev_warn(adap->pdev_dev,
4100 "could not attach to the %s driver, error %ld\n",
4101 uld_str[uld], PTR_ERR(handle));
4105 adap->uld_handle[uld] = handle;
4107 if (!netevent_registered) {
4108 register_netevent_notifier(&cxgb4_netevent_nb);
4109 netevent_registered = true;
4112 if (adap->flags & FULL_INIT_DONE)
4113 ulds[uld].state_change(handle, CXGB4_STATE_UP);
4116 static void attach_ulds(struct adapter *adap)
4120 spin_lock(&adap_rcu_lock);
4121 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4122 spin_unlock(&adap_rcu_lock);
4124 mutex_lock(&uld_mutex);
4125 list_add_tail(&adap->list_node, &adapter_list);
4126 for (i = 0; i < CXGB4_ULD_MAX; i++)
4128 uld_attach(adap, i);
4129 mutex_unlock(&uld_mutex);
4132 static void detach_ulds(struct adapter *adap)
4136 mutex_lock(&uld_mutex);
4137 list_del(&adap->list_node);
4138 for (i = 0; i < CXGB4_ULD_MAX; i++)
4139 if (adap->uld_handle[i]) {
4140 ulds[i].state_change(adap->uld_handle[i],
4141 CXGB4_STATE_DETACH);
4142 adap->uld_handle[i] = NULL;
4144 if (netevent_registered && list_empty(&adapter_list)) {
4145 unregister_netevent_notifier(&cxgb4_netevent_nb);
4146 netevent_registered = false;
4148 mutex_unlock(&uld_mutex);
4150 spin_lock(&adap_rcu_lock);
4151 list_del_rcu(&adap->rcu_node);
4152 spin_unlock(&adap_rcu_lock);
4155 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4159 mutex_lock(&uld_mutex);
4160 for (i = 0; i < CXGB4_ULD_MAX; i++)
4161 if (adap->uld_handle[i])
4162 ulds[i].state_change(adap->uld_handle[i], new_state);
4163 mutex_unlock(&uld_mutex);
4167 * cxgb4_register_uld - register an upper-layer driver
4168 * @type: the ULD type
4169 * @p: the ULD methods
4171 * Registers an upper-layer driver with this driver and notifies the ULD
4172 * about any presently available devices that support its type. Returns
4173 * %-EBUSY if a ULD of the same type is already registered.
4175 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4178 struct adapter *adap;
4180 if (type >= CXGB4_ULD_MAX)
4182 mutex_lock(&uld_mutex);
4183 if (ulds[type].add) {
4188 list_for_each_entry(adap, &adapter_list, list_node)
4189 uld_attach(adap, type);
4190 out: mutex_unlock(&uld_mutex);
4193 EXPORT_SYMBOL(cxgb4_register_uld);
4196 * cxgb4_unregister_uld - unregister an upper-layer driver
4197 * @type: the ULD type
4199 * Unregisters an existing upper-layer driver.
4201 int cxgb4_unregister_uld(enum cxgb4_uld type)
4203 struct adapter *adap;
4205 if (type >= CXGB4_ULD_MAX)
4207 mutex_lock(&uld_mutex);
4208 list_for_each_entry(adap, &adapter_list, list_node)
4209 adap->uld_handle[type] = NULL;
4210 ulds[type].add = NULL;
4211 mutex_unlock(&uld_mutex);
4214 EXPORT_SYMBOL(cxgb4_unregister_uld);
4216 /* Check if netdev on which event is occured belongs to us or not. Return
4217 * success (true) if it belongs otherwise failure (false).
4218 * Called with rcu_read_lock() held.
4220 #if IS_ENABLED(CONFIG_IPV6)
4221 static bool cxgb4_netdev(const struct net_device *netdev)
4223 struct adapter *adap;
4226 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4227 for (i = 0; i < MAX_NPORTS; i++)
4228 if (adap->port[i] == netdev)
4233 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4234 unsigned long event)
4236 int ret = NOTIFY_DONE;
4239 if (cxgb4_netdev(event_dev)) {
4242 ret = cxgb4_clip_get(event_dev, &ifa->addr);
4250 cxgb4_clip_release(event_dev, &ifa->addr);
4261 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4262 unsigned long event, void *data)
4264 struct inet6_ifaddr *ifa = data;
4265 struct net_device *event_dev;
4266 int ret = NOTIFY_DONE;
4267 struct bonding *bond = netdev_priv(ifa->idev->dev);
4268 struct list_head *iter;
4269 struct slave *slave;
4270 struct pci_dev *first_pdev = NULL;
4272 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4273 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4274 ret = clip_add(event_dev, ifa, event);
4275 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4276 /* It is possible that two different adapters are bonded in one
4277 * bond. We need to find such different adapters and add clip
4278 * in all of them only once.
4280 bond_for_each_slave(bond, slave, iter) {
4282 ret = clip_add(slave->dev, ifa, event);
4283 /* If clip_add is success then only initialize
4284 * first_pdev since it means it is our device
4286 if (ret == NOTIFY_OK)
4287 first_pdev = to_pci_dev(
4288 slave->dev->dev.parent);
4289 } else if (first_pdev !=
4290 to_pci_dev(slave->dev->dev.parent))
4291 ret = clip_add(slave->dev, ifa, event);
4294 ret = clip_add(ifa->idev->dev, ifa, event);
4299 static struct notifier_block cxgb4_inet6addr_notifier = {
4300 .notifier_call = cxgb4_inet6addr_handler
4303 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4304 * a physical device.
4305 * The physical device reference is needed to send the actul CLIP command.
4307 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4309 struct inet6_dev *idev = NULL;
4310 struct inet6_ifaddr *ifa;
4313 idev = __in6_dev_get(root_dev);
4317 read_lock_bh(&idev->lock);
4318 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4319 ret = cxgb4_clip_get(dev, &ifa->addr);
4323 read_unlock_bh(&idev->lock);
4328 static int update_root_dev_clip(struct net_device *dev)
4330 struct net_device *root_dev = NULL;
4333 /* First populate the real net device's IPv6 addresses */
4334 ret = update_dev_clip(dev, dev);
4338 /* Parse all bond and vlan devices layered on top of the physical dev */
4339 root_dev = netdev_master_upper_dev_get_rcu(dev);
4341 ret = update_dev_clip(root_dev, dev);
4346 for (i = 0; i < VLAN_N_VID; i++) {
4347 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4351 ret = update_dev_clip(root_dev, dev);
4358 static void update_clip(const struct adapter *adap)
4361 struct net_device *dev;
4366 for (i = 0; i < MAX_NPORTS; i++) {
4367 dev = adap->port[i];
4371 ret = update_root_dev_clip(dev);
4378 #endif /* IS_ENABLED(CONFIG_IPV6) */
4381 * cxgb_up - enable the adapter
4382 * @adap: adapter being enabled
4384 * Called when the first port is enabled, this function performs the
4385 * actions necessary to make an adapter operational, such as completing
4386 * the initialization of HW modules, and enabling interrupts.
4388 * Must be called with the rtnl lock held.
4390 static int cxgb_up(struct adapter *adap)
4394 err = setup_sge_queues(adap);
4397 err = setup_rss(adap);
4401 if (adap->flags & USING_MSIX) {
4402 name_msix_vecs(adap);
4403 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4404 adap->msix_info[0].desc, adap);
4408 err = request_msix_queue_irqs(adap);
4410 free_irq(adap->msix_info[0].vec, adap);
4414 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4415 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4416 adap->port[0]->name, adap);
4422 t4_intr_enable(adap);
4423 adap->flags |= FULL_INIT_DONE;
4424 notify_ulds(adap, CXGB4_STATE_UP);
4425 #if IS_ENABLED(CONFIG_IPV6)
4431 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4433 t4_free_sge_resources(adap);
4437 static void cxgb_down(struct adapter *adapter)
4439 t4_intr_disable(adapter);
4440 cancel_work_sync(&adapter->tid_release_task);
4441 cancel_work_sync(&adapter->db_full_task);
4442 cancel_work_sync(&adapter->db_drop_task);
4443 adapter->tid_release_task_busy = false;
4444 adapter->tid_release_head = NULL;
4446 if (adapter->flags & USING_MSIX) {
4447 free_msix_queue_irqs(adapter);
4448 free_irq(adapter->msix_info[0].vec, adapter);
4450 free_irq(adapter->pdev->irq, adapter);
4451 quiesce_rx(adapter);
4452 t4_sge_stop(adapter);
4453 t4_free_sge_resources(adapter);
4454 adapter->flags &= ~FULL_INIT_DONE;
4458 * net_device operations
4460 static int cxgb_open(struct net_device *dev)
4463 struct port_info *pi = netdev_priv(dev);
4464 struct adapter *adapter = pi->adapter;
4466 netif_carrier_off(dev);
4468 if (!(adapter->flags & FULL_INIT_DONE)) {
4469 err = cxgb_up(adapter);
4474 err = link_start(dev);
4476 netif_tx_start_all_queues(dev);
4480 static int cxgb_close(struct net_device *dev)
4482 struct port_info *pi = netdev_priv(dev);
4483 struct adapter *adapter = pi->adapter;
4485 netif_tx_stop_all_queues(dev);
4486 netif_carrier_off(dev);
4487 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4490 /* Return an error number if the indicated filter isn't writable ...
4492 static int writable_filter(struct filter_entry *f)
4502 /* Delete the filter at the specified index (if valid). The checks for all
4503 * the common problems with doing this like the filter being locked, currently
4504 * pending in another operation, etc.
4506 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4508 struct filter_entry *f;
4511 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4514 f = &adapter->tids.ftid_tab[fidx];
4515 ret = writable_filter(f);
4519 return del_filter_wr(adapter, fidx);
4524 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4525 __be32 sip, __be16 sport, __be16 vlan,
4526 unsigned int queue, unsigned char port, unsigned char mask)
4529 struct filter_entry *f;
4530 struct adapter *adap;
4534 adap = netdev2adap(dev);
4536 /* Adjust stid to correct filter index */
4537 stid -= adap->tids.sftid_base;
4538 stid += adap->tids.nftids;
4540 /* Check to make sure the filter requested is writable ...
4542 f = &adap->tids.ftid_tab[stid];
4543 ret = writable_filter(f);
4547 /* Clear out any old resources being used by the filter before
4548 * we start constructing the new filter.
4551 clear_filter(adap, f);
4553 /* Clear out filter specifications */
4554 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4555 f->fs.val.lport = cpu_to_be16(sport);
4556 f->fs.mask.lport = ~0;
4558 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4559 for (i = 0; i < 4; i++) {
4560 f->fs.val.lip[i] = val[i];
4561 f->fs.mask.lip[i] = ~0;
4563 if (adap->params.tp.vlan_pri_map & F_PORT) {
4564 f->fs.val.iport = port;
4565 f->fs.mask.iport = mask;
4569 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4570 f->fs.val.proto = IPPROTO_TCP;
4571 f->fs.mask.proto = ~0;
4576 /* Mark filter as locked */
4580 ret = set_filter_wr(adap, stid);
4582 clear_filter(adap, f);
4588 EXPORT_SYMBOL(cxgb4_create_server_filter);
4590 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4591 unsigned int queue, bool ipv6)
4594 struct filter_entry *f;
4595 struct adapter *adap;
4597 adap = netdev2adap(dev);
4599 /* Adjust stid to correct filter index */
4600 stid -= adap->tids.sftid_base;
4601 stid += adap->tids.nftids;
4603 f = &adap->tids.ftid_tab[stid];
4604 /* Unlock the filter */
4607 ret = delete_filter(adap, stid);
4613 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4615 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4616 struct rtnl_link_stats64 *ns)
4618 struct port_stats stats;
4619 struct port_info *p = netdev_priv(dev);
4620 struct adapter *adapter = p->adapter;
4622 /* Block retrieving statistics during EEH error
4623 * recovery. Otherwise, the recovery might fail
4624 * and the PCI device will be removed permanently
4626 spin_lock(&adapter->stats_lock);
4627 if (!netif_device_present(dev)) {
4628 spin_unlock(&adapter->stats_lock);
4631 t4_get_port_stats(adapter, p->tx_chan, &stats);
4632 spin_unlock(&adapter->stats_lock);
4634 ns->tx_bytes = stats.tx_octets;
4635 ns->tx_packets = stats.tx_frames;
4636 ns->rx_bytes = stats.rx_octets;
4637 ns->rx_packets = stats.rx_frames;
4638 ns->multicast = stats.rx_mcast_frames;
4640 /* detailed rx_errors */
4641 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4643 ns->rx_over_errors = 0;
4644 ns->rx_crc_errors = stats.rx_fcs_err;
4645 ns->rx_frame_errors = stats.rx_symbol_err;
4646 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4647 stats.rx_ovflow2 + stats.rx_ovflow3 +
4648 stats.rx_trunc0 + stats.rx_trunc1 +
4649 stats.rx_trunc2 + stats.rx_trunc3;
4650 ns->rx_missed_errors = 0;
4652 /* detailed tx_errors */
4653 ns->tx_aborted_errors = 0;
4654 ns->tx_carrier_errors = 0;
4655 ns->tx_fifo_errors = 0;
4656 ns->tx_heartbeat_errors = 0;
4657 ns->tx_window_errors = 0;
4659 ns->tx_errors = stats.tx_error_frames;
4660 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4661 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4665 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4668 int ret = 0, prtad, devad;
4669 struct port_info *pi = netdev_priv(dev);
4670 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4674 if (pi->mdio_addr < 0)
4676 data->phy_id = pi->mdio_addr;
4680 if (mdio_phy_id_is_c45(data->phy_id)) {
4681 prtad = mdio_phy_id_prtad(data->phy_id);
4682 devad = mdio_phy_id_devad(data->phy_id);
4683 } else if (data->phy_id < 32) {
4684 prtad = data->phy_id;
4686 data->reg_num &= 0x1f;
4690 mbox = pi->adapter->fn;
4691 if (cmd == SIOCGMIIREG)
4692 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4693 data->reg_num, &data->val_out);
4695 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4696 data->reg_num, data->val_in);
4704 static void cxgb_set_rxmode(struct net_device *dev)
4706 /* unfortunately we can't return errors to the stack */
4707 set_rxmode(dev, -1, false);
4710 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4713 struct port_info *pi = netdev_priv(dev);
4715 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4717 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4724 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4727 struct sockaddr *addr = p;
4728 struct port_info *pi = netdev_priv(dev);
4730 if (!is_valid_ether_addr(addr->sa_data))
4731 return -EADDRNOTAVAIL;
4733 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4734 pi->xact_addr_filt, addr->sa_data, true, true);
4738 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4739 pi->xact_addr_filt = ret;
4743 #ifdef CONFIG_NET_POLL_CONTROLLER
4744 static void cxgb_netpoll(struct net_device *dev)
4746 struct port_info *pi = netdev_priv(dev);
4747 struct adapter *adap = pi->adapter;
4749 if (adap->flags & USING_MSIX) {
4751 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4753 for (i = pi->nqsets; i; i--, rx++)
4754 t4_sge_intr_msix(0, &rx->rspq);
4756 t4_intr_handler(adap)(0, adap);
4760 static const struct net_device_ops cxgb4_netdev_ops = {
4761 .ndo_open = cxgb_open,
4762 .ndo_stop = cxgb_close,
4763 .ndo_start_xmit = t4_eth_xmit,
4764 .ndo_select_queue = cxgb_select_queue,
4765 .ndo_get_stats64 = cxgb_get_stats,
4766 .ndo_set_rx_mode = cxgb_set_rxmode,
4767 .ndo_set_mac_address = cxgb_set_mac_addr,
4768 .ndo_set_features = cxgb_set_features,
4769 .ndo_validate_addr = eth_validate_addr,
4770 .ndo_do_ioctl = cxgb_ioctl,
4771 .ndo_change_mtu = cxgb_change_mtu,
4772 #ifdef CONFIG_NET_POLL_CONTROLLER
4773 .ndo_poll_controller = cxgb_netpoll,
4777 void t4_fatal_err(struct adapter *adap)
4779 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4780 t4_intr_disable(adap);
4781 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4784 /* Return the specified PCI-E Configuration Space register from our Physical
4785 * Function. We try first via a Firmware LDST Command since we prefer to let
4786 * the firmware own all of these registers, but if that fails we go for it
4787 * directly ourselves.
4789 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4791 struct fw_ldst_cmd ldst_cmd;
4795 /* Construct and send the Firmware LDST Command to retrieve the
4796 * specified PCI-E Configuration Space register.
4798 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4799 ldst_cmd.op_to_addrspace =
4800 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
4803 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
4804 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4805 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
4806 ldst_cmd.u.pcie.ctrl_to_fn =
4807 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
4808 ldst_cmd.u.pcie.r = reg;
4809 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4812 /* If the LDST Command suucceeded, exctract the returned register
4813 * value. Otherwise read it directly ourself.
4816 val = ntohl(ldst_cmd.u.pcie.data[0]);
4818 t4_hw_pci_read_cfg4(adap, reg, &val);
4823 static void setup_memwin(struct adapter *adap)
4825 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4827 if (is_t4(adap->params.chip)) {
4830 /* Truncation intentional: we only read the bottom 32-bits of
4831 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4832 * mechanism to read BAR0 instead of using
4833 * pci_resource_start() because we could be operating from
4834 * within a Virtual Machine which is trapping our accesses to
4835 * our Configuration Space and we need to set up the PCI-E
4836 * Memory Window decoders with the actual addresses which will
4837 * be coming across the PCI-E link.
4839 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4840 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4841 adap->t4_bar0 = bar0;
4843 mem_win0_base = bar0 + MEMWIN0_BASE;
4844 mem_win1_base = bar0 + MEMWIN1_BASE;
4845 mem_win2_base = bar0 + MEMWIN2_BASE;
4846 mem_win2_aperture = MEMWIN2_APERTURE;
4848 /* For T5, only relative offset inside the PCIe BAR is passed */
4849 mem_win0_base = MEMWIN0_BASE;
4850 mem_win1_base = MEMWIN1_BASE;
4851 mem_win2_base = MEMWIN2_BASE_T5;
4852 mem_win2_aperture = MEMWIN2_APERTURE_T5;
4854 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4855 mem_win0_base | BIR(0) |
4856 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4857 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4858 mem_win1_base | BIR(0) |
4859 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4860 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4861 mem_win2_base | BIR(0) |
4862 WINDOW(ilog2(mem_win2_aperture) - 10));
4863 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
4866 static void setup_memwin_rdma(struct adapter *adap)
4868 if (adap->vres.ocq.size) {
4872 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4873 start &= PCI_BASE_ADDRESS_MEM_MASK;
4874 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4875 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4877 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4878 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4880 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4881 adap->vres.ocq.start);
4883 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4887 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4892 /* get device capabilities */
4893 memset(c, 0, sizeof(*c));
4894 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4895 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4896 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4897 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4901 /* select capabilities we'll be using */
4902 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4904 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4906 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4907 } else if (vf_acls) {
4908 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4911 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4912 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4913 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4917 ret = t4_config_glbl_rss(adap, adap->fn,
4918 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4919 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4920 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
4924 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4925 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4931 /* tweak some settings */
4932 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4933 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4934 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4935 v = t4_read_reg(adap, TP_PIO_DATA);
4936 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4938 /* first 4 Tx modulation queues point to consecutive Tx channels */
4939 adap->params.tp.tx_modq_map = 0xE4;
4940 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4941 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4943 /* associate each Tx modulation queue with consecutive Tx channels */
4945 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4946 &v, 1, A_TP_TX_SCHED_HDR);
4947 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4948 &v, 1, A_TP_TX_SCHED_FIFO);
4949 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4950 &v, 1, A_TP_TX_SCHED_PCMD);
4952 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4953 if (is_offload(adap)) {
4954 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4955 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4956 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4957 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4958 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4959 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4960 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4961 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4962 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4963 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4966 /* get basic stuff going */
4967 return t4_early_init(adap, adap->fn);
4971 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4973 #define MAX_ATIDS 8192U
4976 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4978 * If the firmware we're dealing with has Configuration File support, then
4979 * we use that to perform all configuration
4983 * Tweak configuration based on module parameters, etc. Most of these have
4984 * defaults assigned to them by Firmware Configuration Files (if we're using
4985 * them) but need to be explicitly set if we're using hard-coded
4986 * initialization. But even in the case of using Firmware Configuration
4987 * Files, we'd like to expose the ability to change these via module
4988 * parameters so these are essentially common tweaks/settings for
4989 * Configuration Files and hard-coded initialization ...
4991 static int adap_init0_tweaks(struct adapter *adapter)
4994 * Fix up various Host-Dependent Parameters like Page Size, Cache
4995 * Line Size, etc. The firmware default is for a 4KB Page Size and
4996 * 64B Cache Line Size ...
4998 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
5001 * Process module parameters which affect early initialization.
5003 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5004 dev_err(&adapter->pdev->dev,
5005 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5009 t4_set_reg_field(adapter, SGE_CONTROL,
5011 PKTSHIFT(rx_dma_offset));
5014 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5015 * adds the pseudo header itself.
5017 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5018 CSUM_HAS_PSEUDO_HDR, 0);
5024 * Attempt to initialize the adapter via a Firmware Configuration File.
5026 static int adap_init0_config(struct adapter *adapter, int reset)
5028 struct fw_caps_config_cmd caps_cmd;
5029 const struct firmware *cf;
5030 unsigned long mtype = 0, maddr = 0;
5031 u32 finiver, finicsum, cfcsum;
5033 int config_issued = 0;
5034 char *fw_config_file, fw_config_file_path[256];
5035 char *config_name = NULL;
5038 * Reset device if necessary.
5041 ret = t4_fw_reset(adapter, adapter->mbox,
5042 PIORSTMODE | PIORST);
5048 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5049 * then use that. Otherwise, use the configuration file stored
5050 * in the adapter flash ...
5052 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
5054 fw_config_file = FW4_CFNAME;
5057 fw_config_file = FW5_CFNAME;
5060 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5061 adapter->pdev->device);
5066 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5068 config_name = "On FLASH";
5069 mtype = FW_MEMTYPE_CF_FLASH;
5070 maddr = t4_flash_cfg_addr(adapter);
5072 u32 params[7], val[7];
5074 sprintf(fw_config_file_path,
5075 "/lib/firmware/%s", fw_config_file);
5076 config_name = fw_config_file_path;
5078 if (cf->size >= FLASH_CFG_MAX_SIZE)
5081 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5082 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
5083 ret = t4_query_params(adapter, adapter->mbox,
5084 adapter->fn, 0, 1, params, val);
5087 * For t4_memory_rw() below addresses and
5088 * sizes have to be in terms of multiples of 4
5089 * bytes. So, if the Configuration File isn't
5090 * a multiple of 4 bytes in length we'll have
5091 * to write that out separately since we can't
5092 * guarantee that the bytes following the
5093 * residual byte in the buffer returned by
5094 * request_firmware() are zeroed out ...
5096 size_t resid = cf->size & 0x3;
5097 size_t size = cf->size & ~0x3;
5098 __be32 *data = (__be32 *)cf->data;
5100 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
5101 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
5103 spin_lock(&adapter->win0_lock);
5104 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5105 size, data, T4_MEMORY_WRITE);
5106 if (ret == 0 && resid != 0) {
5113 last.word = data[size >> 2];
5114 for (i = resid; i < 4; i++)
5116 ret = t4_memory_rw(adapter, 0, mtype,
5121 spin_unlock(&adapter->win0_lock);
5125 release_firmware(cf);
5131 * Issue a Capability Configuration command to the firmware to get it
5132 * to parse the Configuration File. We don't use t4_fw_config_file()
5133 * because we want the ability to modify various features after we've
5134 * processed the configuration file ...
5136 memset(&caps_cmd, 0, sizeof(caps_cmd));
5137 caps_cmd.op_to_write =
5138 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5141 caps_cmd.cfvalid_to_len16 =
5142 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
5143 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
5144 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
5145 FW_LEN16(caps_cmd));
5146 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5149 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5150 * Configuration File in FLASH), our last gasp effort is to use the
5151 * Firmware Configuration File which is embedded in the firmware. A
5152 * very few early versions of the firmware didn't have one embedded
5153 * but we can ignore those.
5155 if (ret == -ENOENT) {
5156 memset(&caps_cmd, 0, sizeof(caps_cmd));
5157 caps_cmd.op_to_write =
5158 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5161 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5162 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5163 sizeof(caps_cmd), &caps_cmd);
5164 config_name = "Firmware Default";
5171 finiver = ntohl(caps_cmd.finiver);
5172 finicsum = ntohl(caps_cmd.finicsum);
5173 cfcsum = ntohl(caps_cmd.cfcsum);
5174 if (finicsum != cfcsum)
5175 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5176 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5180 * And now tell the firmware to use the configuration we just loaded.
5182 caps_cmd.op_to_write =
5183 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5186 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5187 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5193 * Tweak configuration based on system architecture, module
5196 ret = adap_init0_tweaks(adapter);
5201 * And finally tell the firmware to initialize itself using the
5202 * parameters from the Configuration File.
5204 ret = t4_fw_initialize(adapter, adapter->mbox);
5209 * Return successfully and note that we're operating with parameters
5210 * not supplied by the driver, rather than from hard-wired
5211 * initialization constants burried in the driver.
5213 adapter->flags |= USING_SOFT_PARAMS;
5214 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5215 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5216 config_name, finiver, cfcsum);
5220 * Something bad happened. Return the error ... (If the "error"
5221 * is that there's no Configuration File on the adapter we don't
5222 * want to issue a warning since this is fairly common.)
5225 if (config_issued && ret != -ENOENT)
5226 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5232 * Attempt to initialize the adapter via hard-coded, driver supplied
5235 static int adap_init0_no_config(struct adapter *adapter, int reset)
5237 struct sge *s = &adapter->sge;
5238 struct fw_caps_config_cmd caps_cmd;
5243 * Reset device if necessary
5246 ret = t4_fw_reset(adapter, adapter->mbox,
5247 PIORSTMODE | PIORST);
5253 * Get device capabilities and select which we'll be using.
5255 memset(&caps_cmd, 0, sizeof(caps_cmd));
5256 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5257 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5258 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5259 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5264 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5266 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5268 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5269 } else if (vf_acls) {
5270 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5273 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5274 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5275 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5281 * Tweak configuration based on system architecture, module
5284 ret = adap_init0_tweaks(adapter);
5289 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5290 * mode which maps each Virtual Interface to its own section of
5291 * the RSS Table and we turn on all map and hash enables ...
5293 adapter->flags |= RSS_TNLALLLOOKUP;
5294 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5295 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5296 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
5297 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F |
5298 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5299 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F : 0));
5304 * Set up our own fundamental resource provisioning ...
5306 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5307 PFRES_NEQ, PFRES_NETHCTRL,
5308 PFRES_NIQFLINT, PFRES_NIQ,
5309 PFRES_TC, PFRES_NVI,
5310 FW_PFVF_CMD_CMASK_M,
5311 pfvfres_pmask(adapter, adapter->fn, 0),
5313 PFRES_R_CAPS, PFRES_WX_CAPS);
5318 * Perform low level SGE initialization. We need to do this before we
5319 * send the firmware the INITIALIZE command because that will cause
5320 * any other PF Drivers which are waiting for the Master
5321 * Initialization to proceed forward.
5323 for (i = 0; i < SGE_NTIMERS - 1; i++)
5324 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5325 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5326 s->counter_val[0] = 1;
5327 for (i = 1; i < SGE_NCOUNTERS; i++)
5328 s->counter_val[i] = min(intr_cnt[i - 1],
5329 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5330 t4_sge_init(adapter);
5332 #ifdef CONFIG_PCI_IOV
5334 * Provision resource limits for Virtual Functions. We currently
5335 * grant them all the same static resource limits except for the Port
5336 * Access Rights Mask which we're assigning based on the PF. All of
5337 * the static provisioning stuff for both the PF and VF really needs
5338 * to be managed in a persistent manner for each device which the
5339 * firmware controls.
5344 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5345 if (num_vf[pf] <= 0)
5348 /* VF numbering starts at 1! */
5349 for (vf = 1; vf <= num_vf[pf]; vf++) {
5350 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5352 VFRES_NEQ, VFRES_NETHCTRL,
5353 VFRES_NIQFLINT, VFRES_NIQ,
5354 VFRES_TC, VFRES_NVI,
5355 FW_PFVF_CMD_CMASK_M,
5359 VFRES_R_CAPS, VFRES_WX_CAPS);
5361 dev_warn(adapter->pdev_dev,
5363 "provision pf/vf=%d/%d; "
5364 "err=%d\n", pf, vf, ret);
5371 * Set up the default filter mode. Later we'll want to implement this
5372 * via a firmware command, etc. ... This needs to be done before the
5373 * firmare initialization command ... If the selected set of fields
5374 * isn't equal to the default value, we'll need to make sure that the
5375 * field selections will fit in the 36-bit budget.
5377 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5380 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5381 switch (tp_vlan_pri_map & (1 << j)) {
5383 /* compressed filter field not enabled */
5403 case ETHERTYPE_MASK:
5409 case MPSHITTYPE_MASK:
5412 case FRAGMENTATION_MASK:
5418 dev_err(adapter->pdev_dev,
5419 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5420 " using %#x\n", tp_vlan_pri_map, bits,
5421 TP_VLAN_PRI_MAP_DEFAULT);
5422 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5425 v = tp_vlan_pri_map;
5426 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5427 &v, 1, TP_VLAN_PRI_MAP);
5430 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5431 * to support any of the compressed filter fields above. Newer
5432 * versions of the firmware do this automatically but it doesn't hurt
5433 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5434 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5435 * since the firmware automatically turns this on and off when we have
5436 * a non-zero number of filters active (since it does have a
5437 * performance impact).
5439 if (tp_vlan_pri_map)
5440 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5441 FIVETUPLELOOKUP_MASK,
5442 FIVETUPLELOOKUP_MASK);
5445 * Tweak some settings.
5447 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5448 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5449 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5450 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5453 * Get basic stuff going by issuing the Firmware Initialize command.
5454 * Note that this _must_ be after all PFVF commands ...
5456 ret = t4_fw_initialize(adapter, adapter->mbox);
5461 * Return successfully!
5463 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5464 "driver parameters\n");
5468 * Something bad happened. Return the error ...
5474 static struct fw_info fw_info_array[] = {
5477 .fs_name = FW4_CFNAME,
5478 .fw_mod_name = FW4_FNAME,
5480 .chip = FW_HDR_CHIP_T4,
5481 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5482 .intfver_nic = FW_INTFVER(T4, NIC),
5483 .intfver_vnic = FW_INTFVER(T4, VNIC),
5484 .intfver_ri = FW_INTFVER(T4, RI),
5485 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5486 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5490 .fs_name = FW5_CFNAME,
5491 .fw_mod_name = FW5_FNAME,
5493 .chip = FW_HDR_CHIP_T5,
5494 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5495 .intfver_nic = FW_INTFVER(T5, NIC),
5496 .intfver_vnic = FW_INTFVER(T5, VNIC),
5497 .intfver_ri = FW_INTFVER(T5, RI),
5498 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5499 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5504 static struct fw_info *find_fw_info(int chip)
5508 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5509 if (fw_info_array[i].chip == chip)
5510 return &fw_info_array[i];
5516 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5518 static int adap_init0(struct adapter *adap)
5522 enum dev_state state;
5523 u32 params[7], val[7];
5524 struct fw_caps_config_cmd caps_cmd;
5528 * Contact FW, advertising Master capability (and potentially forcing
5529 * ourselves as the Master PF if our module parameter force_init is
5532 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5533 force_init ? MASTER_MUST : MASTER_MAY,
5536 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5540 if (ret == adap->mbox)
5541 adap->flags |= MASTER_PF;
5542 if (force_init && state == DEV_STATE_INIT)
5543 state = DEV_STATE_UNINIT;
5546 * If we're the Master PF Driver and the device is uninitialized,
5547 * then let's consider upgrading the firmware ... (We always want
5548 * to check the firmware version number in order to A. get it for
5549 * later reporting and B. to warn if the currently loaded firmware
5550 * is excessively mismatched relative to the driver.)
5552 t4_get_fw_version(adap, &adap->params.fw_vers);
5553 t4_get_tp_version(adap, &adap->params.tp_vers);
5554 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5555 struct fw_info *fw_info;
5556 struct fw_hdr *card_fw;
5557 const struct firmware *fw;
5558 const u8 *fw_data = NULL;
5559 unsigned int fw_size = 0;
5561 /* This is the firmware whose headers the driver was compiled
5564 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5565 if (fw_info == NULL) {
5566 dev_err(adap->pdev_dev,
5567 "unable to get firmware info for chip %d.\n",
5568 CHELSIO_CHIP_VERSION(adap->params.chip));
5572 /* allocate memory to read the header of the firmware on the
5575 card_fw = t4_alloc_mem(sizeof(*card_fw));
5577 /* Get FW from from /lib/firmware/ */
5578 ret = request_firmware(&fw, fw_info->fw_mod_name,
5581 dev_err(adap->pdev_dev,
5582 "unable to load firmware image %s, error %d\n",
5583 fw_info->fw_mod_name, ret);
5589 /* upgrade FW logic */
5590 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5595 release_firmware(fw);
5596 t4_free_mem(card_fw);
5603 * Grab VPD parameters. This should be done after we establish a
5604 * connection to the firmware since some of the VPD parameters
5605 * (notably the Core Clock frequency) are retrieved via requests to
5606 * the firmware. On the other hand, we need these fairly early on
5607 * so we do this right after getting ahold of the firmware.
5609 ret = get_vpd_params(adap, &adap->params.vpd);
5614 * Find out what ports are available to us. Note that we need to do
5615 * this before calling adap_init0_no_config() since it needs nports
5619 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5620 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
5621 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5625 adap->params.nports = hweight32(port_vec);
5626 adap->params.portvec = port_vec;
5629 * If the firmware is initialized already (and we're not forcing a
5630 * master initialization), note that we're living with existing
5631 * adapter parameters. Otherwise, it's time to try initializing the
5634 if (state == DEV_STATE_INIT) {
5635 dev_info(adap->pdev_dev, "Coming up as %s: "\
5636 "Adapter already initialized\n",
5637 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5638 adap->flags |= USING_SOFT_PARAMS;
5640 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5641 "Initializing adapter\n");
5643 * If the firmware doesn't support Configuration
5644 * Files warn user and exit,
5647 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5648 "configuration file.\n");
5650 ret = adap_init0_no_config(adap, reset);
5653 * Find out whether we're dealing with a version of
5654 * the firmware which has configuration file support.
5656 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5657 FW_PARAMS_PARAM_X_V(
5658 FW_PARAMS_PARAM_DEV_CF));
5659 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5663 * If the firmware doesn't support Configuration
5664 * Files, use the old Driver-based, hard-wired
5665 * initialization. Otherwise, try using the
5666 * Configuration File support and fall back to the
5667 * Driver-based initialization if there's no
5668 * Configuration File found.
5671 ret = adap_init0_no_config(adap, reset);
5674 * The firmware provides us with a memory
5675 * buffer where we can load a Configuration
5676 * File from the host if we want to override
5677 * the Configuration File in flash.
5680 ret = adap_init0_config(adap, reset);
5681 if (ret == -ENOENT) {
5682 dev_info(adap->pdev_dev,
5683 "No Configuration File present "
5684 "on adapter. Using hard-wired "
5685 "configuration parameters.\n");
5686 ret = adap_init0_no_config(adap, reset);
5691 dev_err(adap->pdev_dev,
5692 "could not initialize adapter, error %d\n",
5699 * If we're living with non-hard-coded parameters (either from a
5700 * Firmware Configuration File or values programmed by a different PF
5701 * Driver), give the SGE code a chance to pull in anything that it
5702 * needs ... Note that this must be called after we retrieve our VPD
5703 * parameters in order to know how to convert core ticks to seconds.
5705 if (adap->flags & USING_SOFT_PARAMS) {
5706 ret = t4_sge_init(adap);
5711 if (is_bypass_device(adap->pdev->device))
5712 adap->params.bypass = 1;
5715 * Grab some of our basic fundamental operating parameters.
5717 #define FW_PARAM_DEV(param) \
5718 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
5719 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
5721 #define FW_PARAM_PFVF(param) \
5722 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
5723 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
5724 FW_PARAMS_PARAM_Y_V(0) | \
5725 FW_PARAMS_PARAM_Z_V(0)
5727 params[0] = FW_PARAM_PFVF(EQ_START);
5728 params[1] = FW_PARAM_PFVF(L2T_START);
5729 params[2] = FW_PARAM_PFVF(L2T_END);
5730 params[3] = FW_PARAM_PFVF(FILTER_START);
5731 params[4] = FW_PARAM_PFVF(FILTER_END);
5732 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5733 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5736 adap->sge.egr_start = val[0];
5737 adap->l2t_start = val[1];
5738 adap->l2t_end = val[2];
5739 adap->tids.ftid_base = val[3];
5740 adap->tids.nftids = val[4] - val[3] + 1;
5741 adap->sge.ingr_start = val[5];
5743 /* query params related to active filter region */
5744 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5745 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5746 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5747 /* If Active filter size is set we enable establishing
5748 * offload connection through firmware work request
5750 if ((val[0] != val[1]) && (ret >= 0)) {
5751 adap->flags |= FW_OFLD_CONN;
5752 adap->tids.aftid_base = val[0];
5753 adap->tids.aftid_end = val[1];
5756 /* If we're running on newer firmware, let it know that we're
5757 * prepared to deal with encapsulated CPL messages. Older
5758 * firmware won't understand this and we'll just get
5759 * unencapsulated messages ...
5761 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5763 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5766 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5767 * capability. Earlier versions of the firmware didn't have the
5768 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5769 * permission to use ULPTX MEMWRITE DSGL.
5771 if (is_t4(adap->params.chip)) {
5772 adap->params.ulptx_memwrite_dsgl = false;
5774 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5775 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5777 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5781 * Get device capabilities so we can determine what resources we need
5784 memset(&caps_cmd, 0, sizeof(caps_cmd));
5785 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5786 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5787 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5788 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5793 if (caps_cmd.ofldcaps) {
5794 /* query offload-related parameters */
5795 params[0] = FW_PARAM_DEV(NTID);
5796 params[1] = FW_PARAM_PFVF(SERVER_START);
5797 params[2] = FW_PARAM_PFVF(SERVER_END);
5798 params[3] = FW_PARAM_PFVF(TDDP_START);
5799 params[4] = FW_PARAM_PFVF(TDDP_END);
5800 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5801 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5805 adap->tids.ntids = val[0];
5806 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5807 adap->tids.stid_base = val[1];
5808 adap->tids.nstids = val[2] - val[1] + 1;
5810 * Setup server filter region. Divide the availble filter
5811 * region into two parts. Regular filters get 1/3rd and server
5812 * filters get 2/3rd part. This is only enabled if workarond
5814 * 1. For regular filters.
5815 * 2. Server filter: This are special filters which are used
5816 * to redirect SYN packets to offload queue.
5818 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5819 adap->tids.sftid_base = adap->tids.ftid_base +
5820 DIV_ROUND_UP(adap->tids.nftids, 3);
5821 adap->tids.nsftids = adap->tids.nftids -
5822 DIV_ROUND_UP(adap->tids.nftids, 3);
5823 adap->tids.nftids = adap->tids.sftid_base -
5824 adap->tids.ftid_base;
5826 adap->vres.ddp.start = val[3];
5827 adap->vres.ddp.size = val[4] - val[3] + 1;
5828 adap->params.ofldq_wr_cred = val[5];
5830 adap->params.offload = 1;
5832 if (caps_cmd.rdmacaps) {
5833 params[0] = FW_PARAM_PFVF(STAG_START);
5834 params[1] = FW_PARAM_PFVF(STAG_END);
5835 params[2] = FW_PARAM_PFVF(RQ_START);
5836 params[3] = FW_PARAM_PFVF(RQ_END);
5837 params[4] = FW_PARAM_PFVF(PBL_START);
5838 params[5] = FW_PARAM_PFVF(PBL_END);
5839 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5843 adap->vres.stag.start = val[0];
5844 adap->vres.stag.size = val[1] - val[0] + 1;
5845 adap->vres.rq.start = val[2];
5846 adap->vres.rq.size = val[3] - val[2] + 1;
5847 adap->vres.pbl.start = val[4];
5848 adap->vres.pbl.size = val[5] - val[4] + 1;
5850 params[0] = FW_PARAM_PFVF(SQRQ_START);
5851 params[1] = FW_PARAM_PFVF(SQRQ_END);
5852 params[2] = FW_PARAM_PFVF(CQ_START);
5853 params[3] = FW_PARAM_PFVF(CQ_END);
5854 params[4] = FW_PARAM_PFVF(OCQ_START);
5855 params[5] = FW_PARAM_PFVF(OCQ_END);
5856 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5860 adap->vres.qp.start = val[0];
5861 adap->vres.qp.size = val[1] - val[0] + 1;
5862 adap->vres.cq.start = val[2];
5863 adap->vres.cq.size = val[3] - val[2] + 1;
5864 adap->vres.ocq.start = val[4];
5865 adap->vres.ocq.size = val[5] - val[4] + 1;
5867 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5868 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5869 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5872 adap->params.max_ordird_qp = 8;
5873 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5876 adap->params.max_ordird_qp = val[0];
5877 adap->params.max_ird_adapter = val[1];
5879 dev_info(adap->pdev_dev,
5880 "max_ordird_qp %d max_ird_adapter %d\n",
5881 adap->params.max_ordird_qp,
5882 adap->params.max_ird_adapter);
5884 if (caps_cmd.iscsicaps) {
5885 params[0] = FW_PARAM_PFVF(ISCSI_START);
5886 params[1] = FW_PARAM_PFVF(ISCSI_END);
5887 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5891 adap->vres.iscsi.start = val[0];
5892 adap->vres.iscsi.size = val[1] - val[0] + 1;
5894 #undef FW_PARAM_PFVF
5897 /* The MTU/MSS Table is initialized by now, so load their values. If
5898 * we're initializing the adapter, then we'll make any modifications
5899 * we want to the MTU/MSS Table and also initialize the congestion
5902 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5903 if (state != DEV_STATE_INIT) {
5906 /* The default MTU Table contains values 1492 and 1500.
5907 * However, for TCP, it's better to have two values which are
5908 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5909 * This allows us to have a TCP Data Payload which is a
5910 * multiple of 8 regardless of what combination of TCP Options
5911 * are in use (always a multiple of 4 bytes) which is
5912 * important for performance reasons. For instance, if no
5913 * options are in use, then we have a 20-byte IP header and a
5914 * 20-byte TCP header. In this case, a 1500-byte MSS would
5915 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5916 * which is not a multiple of 8. So using an MSS of 1488 in
5917 * this case results in a TCP Data Payload of 1448 bytes which
5918 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5919 * Stamps have been negotiated, then an MTU of 1500 bytes
5920 * results in a TCP Data Payload of 1448 bytes which, as
5921 * above, is a multiple of 8 bytes ...
5923 for (i = 0; i < NMTUS; i++)
5924 if (adap->params.mtus[i] == 1492) {
5925 adap->params.mtus[i] = 1488;
5929 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5930 adap->params.b_wnd);
5932 t4_init_sge_params(adap);
5933 t4_init_tp_params(adap);
5934 adap->flags |= FW_OK;
5938 * Something bad happened. If a command timed out or failed with EIO
5939 * FW does not operate within its spec or something catastrophic
5940 * happened to HW/FW, stop issuing commands.
5943 if (ret != -ETIMEDOUT && ret != -EIO)
5944 t4_fw_bye(adap, adap->mbox);
5950 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5951 pci_channel_state_t state)
5954 struct adapter *adap = pci_get_drvdata(pdev);
5960 adap->flags &= ~FW_OK;
5961 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5962 spin_lock(&adap->stats_lock);
5963 for_each_port(adap, i) {
5964 struct net_device *dev = adap->port[i];
5966 netif_device_detach(dev);
5967 netif_carrier_off(dev);
5969 spin_unlock(&adap->stats_lock);
5970 if (adap->flags & FULL_INIT_DONE)
5973 if ((adap->flags & DEV_ENABLED)) {
5974 pci_disable_device(pdev);
5975 adap->flags &= ~DEV_ENABLED;
5977 out: return state == pci_channel_io_perm_failure ?
5978 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5981 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5984 struct fw_caps_config_cmd c;
5985 struct adapter *adap = pci_get_drvdata(pdev);
5988 pci_restore_state(pdev);
5989 pci_save_state(pdev);
5990 return PCI_ERS_RESULT_RECOVERED;
5993 if (!(adap->flags & DEV_ENABLED)) {
5994 if (pci_enable_device(pdev)) {
5995 dev_err(&pdev->dev, "Cannot reenable PCI "
5996 "device after reset\n");
5997 return PCI_ERS_RESULT_DISCONNECT;
5999 adap->flags |= DEV_ENABLED;
6002 pci_set_master(pdev);
6003 pci_restore_state(pdev);
6004 pci_save_state(pdev);
6005 pci_cleanup_aer_uncorrect_error_status(pdev);
6007 if (t4_wait_dev_ready(adap->regs) < 0)
6008 return PCI_ERS_RESULT_DISCONNECT;
6009 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
6010 return PCI_ERS_RESULT_DISCONNECT;
6011 adap->flags |= FW_OK;
6012 if (adap_init1(adap, &c))
6013 return PCI_ERS_RESULT_DISCONNECT;
6015 for_each_port(adap, i) {
6016 struct port_info *p = adap2pinfo(adap, i);
6018 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6021 return PCI_ERS_RESULT_DISCONNECT;
6023 p->xact_addr_filt = -1;
6026 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6027 adap->params.b_wnd);
6030 return PCI_ERS_RESULT_DISCONNECT;
6031 return PCI_ERS_RESULT_RECOVERED;
6034 static void eeh_resume(struct pci_dev *pdev)
6037 struct adapter *adap = pci_get_drvdata(pdev);
6043 for_each_port(adap, i) {
6044 struct net_device *dev = adap->port[i];
6046 if (netif_running(dev)) {
6048 cxgb_set_rxmode(dev);
6050 netif_device_attach(dev);
6055 static const struct pci_error_handlers cxgb4_eeh = {
6056 .error_detected = eeh_err_detected,
6057 .slot_reset = eeh_slot_reset,
6058 .resume = eeh_resume,
6061 static inline bool is_x_10g_port(const struct link_config *lc)
6063 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6064 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
6067 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6068 unsigned int us, unsigned int cnt,
6069 unsigned int size, unsigned int iqe_size)
6072 set_rspq_intr_params(q, us, cnt);
6073 q->iqe_len = iqe_size;
6078 * Perform default configuration of DMA queues depending on the number and type
6079 * of ports we found and the number of available CPUs. Most settings can be
6080 * modified by the admin prior to actual use.
6082 static void cfg_queues(struct adapter *adap)
6084 struct sge *s = &adap->sge;
6085 int i, n10g = 0, qidx = 0;
6086 #ifndef CONFIG_CHELSIO_T4_DCB
6091 for_each_port(adap, i)
6092 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6093 #ifdef CONFIG_CHELSIO_T4_DCB
6094 /* For Data Center Bridging support we need to be able to support up
6095 * to 8 Traffic Priorities; each of which will be assigned to its
6096 * own TX Queue in order to prevent Head-Of-Line Blocking.
6098 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6099 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6100 MAX_ETH_QSETS, adap->params.nports * 8);
6104 for_each_port(adap, i) {
6105 struct port_info *pi = adap2pinfo(adap, i);
6107 pi->first_qset = qidx;
6111 #else /* !CONFIG_CHELSIO_T4_DCB */
6113 * We default to 1 queue per non-10G port and up to # of cores queues
6117 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6118 if (q10g > netif_get_num_default_rss_queues())
6119 q10g = netif_get_num_default_rss_queues();
6121 for_each_port(adap, i) {
6122 struct port_info *pi = adap2pinfo(adap, i);
6124 pi->first_qset = qidx;
6125 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6128 #endif /* !CONFIG_CHELSIO_T4_DCB */
6131 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6133 if (is_offload(adap)) {
6135 * For offload we use 1 queue/channel if all ports are up to 1G,
6136 * otherwise we divide all available queues amongst the channels
6137 * capped by the number of available cores.
6140 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6142 s->ofldqsets = roundup(i, adap->params.nports);
6144 s->ofldqsets = adap->params.nports;
6145 /* For RDMA one Rx queue per channel suffices */
6146 s->rdmaqs = adap->params.nports;
6147 s->rdmaciqs = adap->params.nports;
6150 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6151 struct sge_eth_rxq *r = &s->ethrxq[i];
6153 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6157 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6158 s->ethtxq[i].q.size = 1024;
6160 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6161 s->ctrlq[i].q.size = 512;
6163 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6164 s->ofldtxq[i].q.size = 1024;
6166 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6167 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6169 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6170 r->rspq.uld = CXGB4_ULD_ISCSI;
6174 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6175 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6177 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6178 r->rspq.uld = CXGB4_ULD_RDMA;
6182 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6183 if (ciq_size > SGE_MAX_IQ_SIZE) {
6184 CH_WARN(adap, "CIQ size too small for available IQs\n");
6185 ciq_size = SGE_MAX_IQ_SIZE;
6188 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6189 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6191 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6192 r->rspq.uld = CXGB4_ULD_RDMA;
6195 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6196 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6200 * Reduce the number of Ethernet queues across all ports to at most n.
6201 * n provides at least one queue per port.
6203 static void reduce_ethqs(struct adapter *adap, int n)
6206 struct port_info *pi;
6208 while (n < adap->sge.ethqsets)
6209 for_each_port(adap, i) {
6210 pi = adap2pinfo(adap, i);
6211 if (pi->nqsets > 1) {
6213 adap->sge.ethqsets--;
6214 if (adap->sge.ethqsets <= n)
6220 for_each_port(adap, i) {
6221 pi = adap2pinfo(adap, i);
6227 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6228 #define EXTRA_VECS 2
6230 static int enable_msix(struct adapter *adap)
6234 struct sge *s = &adap->sge;
6235 unsigned int nchan = adap->params.nports;
6236 struct msix_entry entries[MAX_INGQ + 1];
6238 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6239 entries[i].entry = i;
6241 want = s->max_ethqsets + EXTRA_VECS;
6242 if (is_offload(adap)) {
6243 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6244 /* need nchan for each possible ULD */
6245 ofld_need = 3 * nchan;
6247 #ifdef CONFIG_CHELSIO_T4_DCB
6248 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6251 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6253 need = adap->params.nports + EXTRA_VECS + ofld_need;
6255 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6260 * Distribute available vectors to the various queue groups.
6261 * Every group gets its minimum requirement and NIC gets top
6262 * priority for leftovers.
6264 i = want - EXTRA_VECS - ofld_need;
6265 if (i < s->max_ethqsets) {
6266 s->max_ethqsets = i;
6267 if (i < s->ethqsets)
6268 reduce_ethqs(adap, i);
6270 if (is_offload(adap)) {
6271 i = want - EXTRA_VECS - s->max_ethqsets;
6272 i -= ofld_need - nchan;
6273 s->ofldqsets = (i / nchan) * nchan; /* round down */
6275 for (i = 0; i < want; ++i)
6276 adap->msix_info[i].vec = entries[i].vector;
6283 static int init_rss(struct adapter *adap)
6287 for_each_port(adap, i) {
6288 struct port_info *pi = adap2pinfo(adap, i);
6290 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6293 for (j = 0; j < pi->rss_size; j++)
6294 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6299 static void print_port_info(const struct net_device *dev)
6303 const char *spd = "";
6304 const struct port_info *pi = netdev_priv(dev);
6305 const struct adapter *adap = pi->adapter;
6307 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6309 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6311 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6314 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6315 bufp += sprintf(bufp, "100/");
6316 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6317 bufp += sprintf(bufp, "1000/");
6318 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6319 bufp += sprintf(bufp, "10G/");
6320 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6321 bufp += sprintf(bufp, "40G/");
6324 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6326 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6327 adap->params.vpd.id,
6328 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6329 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6330 (adap->flags & USING_MSIX) ? " MSI-X" :
6331 (adap->flags & USING_MSI) ? " MSI" : "");
6332 netdev_info(dev, "S/N: %s, P/N: %s\n",
6333 adap->params.vpd.sn, adap->params.vpd.pn);
6336 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6338 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6342 * Free the following resources:
6343 * - memory used for tables
6346 * - resources FW is holding for us
6348 static void free_some_resources(struct adapter *adapter)
6352 t4_free_mem(adapter->l2t);
6353 t4_free_mem(adapter->tids.tid_tab);
6354 disable_msi(adapter);
6356 for_each_port(adapter, i)
6357 if (adapter->port[i]) {
6358 kfree(adap2pinfo(adapter, i)->rss);
6359 free_netdev(adapter->port[i]);
6361 if (adapter->flags & FW_OK)
6362 t4_fw_bye(adapter, adapter->fn);
6365 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6366 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6367 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6368 #define SEGMENT_SIZE 128
6370 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6372 int func, i, err, s_qpp, qpp, num_seg;
6373 struct port_info *pi;
6374 bool highdma = false;
6375 struct adapter *adapter = NULL;
6378 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6380 err = pci_request_regions(pdev, KBUILD_MODNAME);
6382 /* Just info, some other driver may have claimed the device. */
6383 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6387 err = pci_enable_device(pdev);
6389 dev_err(&pdev->dev, "cannot enable PCI device\n");
6390 goto out_release_regions;
6393 regs = pci_ioremap_bar(pdev, 0);
6395 dev_err(&pdev->dev, "cannot map device registers\n");
6397 goto out_disable_device;
6400 err = t4_wait_dev_ready(regs);
6402 goto out_unmap_bar0;
6404 /* We control everything through one PF */
6405 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6406 if (func != ent->driver_data) {
6408 pci_disable_device(pdev);
6409 pci_save_state(pdev); /* to restore SR-IOV later */
6413 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6415 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6417 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6418 "coherent allocations\n");
6419 goto out_unmap_bar0;
6422 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6424 dev_err(&pdev->dev, "no usable DMA configuration\n");
6425 goto out_unmap_bar0;
6429 pci_enable_pcie_error_reporting(pdev);
6430 enable_pcie_relaxed_ordering(pdev);
6431 pci_set_master(pdev);
6432 pci_save_state(pdev);
6434 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6437 goto out_unmap_bar0;
6440 adapter->workq = create_singlethread_workqueue("cxgb4");
6441 if (!adapter->workq) {
6443 goto out_free_adapter;
6446 /* PCI device has been enabled */
6447 adapter->flags |= DEV_ENABLED;
6449 adapter->regs = regs;
6450 adapter->pdev = pdev;
6451 adapter->pdev_dev = &pdev->dev;
6452 adapter->mbox = func;
6454 adapter->msg_enable = dflt_msg_enable;
6455 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6457 spin_lock_init(&adapter->stats_lock);
6458 spin_lock_init(&adapter->tid_release_lock);
6459 spin_lock_init(&adapter->win0_lock);
6461 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6462 INIT_WORK(&adapter->db_full_task, process_db_full);
6463 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6465 err = t4_prep_adapter(adapter);
6467 goto out_free_adapter;
6470 if (!is_t4(adapter->params.chip)) {
6471 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6472 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6473 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6474 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6476 /* Each segment size is 128B. Write coalescing is enabled only
6477 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6478 * queue is less no of segments that can be accommodated in
6481 if (qpp > num_seg) {
6483 "Incorrect number of egress queues per page\n");
6485 goto out_free_adapter;
6487 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6488 pci_resource_len(pdev, 2));
6489 if (!adapter->bar2) {
6490 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6492 goto out_free_adapter;
6496 setup_memwin(adapter);
6497 err = adap_init0(adapter);
6498 setup_memwin_rdma(adapter);
6502 for_each_port(adapter, i) {
6503 struct net_device *netdev;
6505 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6512 SET_NETDEV_DEV(netdev, &pdev->dev);
6514 adapter->port[i] = netdev;
6515 pi = netdev_priv(netdev);
6516 pi->adapter = adapter;
6517 pi->xact_addr_filt = -1;
6519 netdev->irq = pdev->irq;
6521 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6522 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6523 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6524 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6526 netdev->hw_features |= NETIF_F_HIGHDMA;
6527 netdev->features |= netdev->hw_features;
6528 netdev->vlan_features = netdev->features & VLAN_FEAT;
6530 netdev->priv_flags |= IFF_UNICAST_FLT;
6532 netdev->netdev_ops = &cxgb4_netdev_ops;
6533 #ifdef CONFIG_CHELSIO_T4_DCB
6534 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6535 cxgb4_dcb_state_init(netdev);
6537 netdev->ethtool_ops = &cxgb_ethtool_ops;
6540 pci_set_drvdata(pdev, adapter);
6542 if (adapter->flags & FW_OK) {
6543 err = t4_port_init(adapter, func, func, 0);
6549 * Configure queues and allocate tables now, they can be needed as
6550 * soon as the first register_netdev completes.
6552 cfg_queues(adapter);
6554 adapter->l2t = t4_init_l2t();
6555 if (!adapter->l2t) {
6556 /* We tolerate a lack of L2T, giving up some functionality */
6557 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6558 adapter->params.offload = 0;
6561 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6562 dev_warn(&pdev->dev, "could not allocate TID table, "
6564 adapter->params.offload = 0;
6567 /* See what interrupts we'll be using */
6568 if (msi > 1 && enable_msix(adapter) == 0)
6569 adapter->flags |= USING_MSIX;
6570 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6571 adapter->flags |= USING_MSI;
6573 err = init_rss(adapter);
6578 * The card is now ready to go. If any errors occur during device
6579 * registration we do not fail the whole card but rather proceed only
6580 * with the ports we manage to register successfully. However we must
6581 * register at least one net device.
6583 for_each_port(adapter, i) {
6584 pi = adap2pinfo(adapter, i);
6585 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6586 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6588 err = register_netdev(adapter->port[i]);
6591 adapter->chan_map[pi->tx_chan] = i;
6592 print_port_info(adapter->port[i]);
6595 dev_err(&pdev->dev, "could not register any net devices\n");
6599 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6603 if (cxgb4_debugfs_root) {
6604 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6605 cxgb4_debugfs_root);
6606 setup_debugfs(adapter);
6609 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6610 pdev->needs_freset = 1;
6612 if (is_offload(adapter))
6613 attach_ulds(adapter);
6616 #ifdef CONFIG_PCI_IOV
6617 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6618 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6619 dev_info(&pdev->dev,
6620 "instantiated %u virtual functions\n",
6626 free_some_resources(adapter);
6628 if (!is_t4(adapter->params.chip))
6629 iounmap(adapter->bar2);
6632 destroy_workqueue(adapter->workq);
6638 pci_disable_pcie_error_reporting(pdev);
6639 pci_disable_device(pdev);
6640 out_release_regions:
6641 pci_release_regions(pdev);
6645 static void remove_one(struct pci_dev *pdev)
6647 struct adapter *adapter = pci_get_drvdata(pdev);
6649 #ifdef CONFIG_PCI_IOV
6650 pci_disable_sriov(pdev);
6657 /* Tear down per-adapter Work Queue first since it can contain
6658 * references to our adapter data structure.
6660 destroy_workqueue(adapter->workq);
6662 if (is_offload(adapter))
6663 detach_ulds(adapter);
6665 for_each_port(adapter, i)
6666 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6667 unregister_netdev(adapter->port[i]);
6669 debugfs_remove_recursive(adapter->debugfs_root);
6671 /* If we allocated filters, free up state associated with any
6674 if (adapter->tids.ftid_tab) {
6675 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6676 for (i = 0; i < (adapter->tids.nftids +
6677 adapter->tids.nsftids); i++, f++)
6679 clear_filter(adapter, f);
6682 if (adapter->flags & FULL_INIT_DONE)
6685 free_some_resources(adapter);
6686 iounmap(adapter->regs);
6687 if (!is_t4(adapter->params.chip))
6688 iounmap(adapter->bar2);
6689 pci_disable_pcie_error_reporting(pdev);
6690 if ((adapter->flags & DEV_ENABLED)) {
6691 pci_disable_device(pdev);
6692 adapter->flags &= ~DEV_ENABLED;
6694 pci_release_regions(pdev);
6698 pci_release_regions(pdev);
6701 static struct pci_driver cxgb4_driver = {
6702 .name = KBUILD_MODNAME,
6703 .id_table = cxgb4_pci_tbl,
6705 .remove = remove_one,
6706 .shutdown = remove_one,
6707 .err_handler = &cxgb4_eeh,
6710 static int __init cxgb4_init_module(void)
6714 /* Debugfs support is optional, just warn if this fails */
6715 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6716 if (!cxgb4_debugfs_root)
6717 pr_warn("could not create debugfs entry, continuing\n");
6719 ret = pci_register_driver(&cxgb4_driver);
6721 debugfs_remove(cxgb4_debugfs_root);
6723 #if IS_ENABLED(CONFIG_IPV6)
6724 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6730 static void __exit cxgb4_cleanup_module(void)
6732 #if IS_ENABLED(CONFIG_IPV6)
6733 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6735 pci_unregister_driver(&cxgb4_driver);
6736 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6739 module_init(cxgb4_init_module);
6740 module_exit(cxgb4_cleanup_module);