2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
70 #include "cxgb4_dcb.h"
71 #include "cxgb4_debugfs.h"
74 #include <../drivers/net/bonding/bonding.h>
79 #define DRV_VERSION "2.0.0-ko"
80 #define DRV_DESC "Chelsio T4/T5 Network Driver"
83 * Max interrupt hold-off timer value in us. Queues fall back to this value
84 * under extreme memory pressure so it's largish to give the system time to
87 #define MAX_SGE_TIMERVAL 200U
91 * Physical Function provisioning constants.
93 PFRES_NVI = 4, /* # of Virtual Interfaces */
94 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
95 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
97 PFRES_NEQ = 256, /* # of egress queues */
98 PFRES_NIQ = 0, /* # of ingress queues */
99 PFRES_TC = 0, /* PCI-E traffic class */
100 PFRES_NEXACTF = 128, /* # of exact MPS filters */
102 PFRES_R_CAPS = FW_CMD_CAP_PF,
103 PFRES_WX_CAPS = FW_CMD_CAP_PF,
105 #ifdef CONFIG_PCI_IOV
107 * Virtual Function provisioning constants. We need two extra Ingress
108 * Queues with Interrupt capability to serve as the VF's Firmware
109 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
110 * neither will have Free Lists associated with them). For each
111 * Ethernet/Control Egress Queue and for each Free List, we need an
114 VFRES_NPORTS = 1, /* # of "ports" per VF */
115 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
117 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
118 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
119 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
120 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
121 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
122 VFRES_TC = 0, /* PCI-E traffic class */
123 VFRES_NEXACTF = 16, /* # of exact MPS filters */
125 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
126 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
131 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
132 * static and likely not to be useful in the long run. We really need to
133 * implement some form of persistent configuration which the firmware
136 static unsigned int pfvfres_pmask(struct adapter *adapter,
137 unsigned int pf, unsigned int vf)
139 unsigned int portn, portvec;
142 * Give PF's access to all of the ports.
145 return FW_PFVF_CMD_PMASK_MASK;
148 * For VFs, we'll assign them access to the ports based purely on the
149 * PF. We assign active ports in order, wrapping around if there are
150 * fewer active ports than PFs: e.g. active port[pf % nports].
151 * Unfortunately the adapter's port_info structs haven't been
152 * initialized yet so we have to compute this.
154 if (adapter->params.nports == 0)
157 portn = pf % adapter->params.nports;
158 portvec = adapter->params.portvec;
161 * Isolate the lowest set bit in the port vector. If we're at
162 * the port number that we want, return that as the pmask.
163 * otherwise mask that bit out of the port vector and
164 * decrement our port number ...
166 unsigned int pmask = portvec ^ (portvec & (portvec-1));
176 MAX_TXQ_ENTRIES = 16384,
177 MAX_CTRL_TXQ_ENTRIES = 1024,
178 MAX_RSPQ_ENTRIES = 16384,
179 MAX_RX_BUFFERS = 16384,
180 MIN_TXQ_ENTRIES = 32,
181 MIN_CTRL_TXQ_ENTRIES = 32,
182 MIN_RSPQ_ENTRIES = 128,
186 /* Host shadow copy of ingress filter entry. This is in host native format
187 * and doesn't match the ordering or bit order, etc. of the hardware of the
188 * firmware command. The use of bit-field structure elements is purely to
189 * remind ourselves of the field size limitations and save memory in the case
190 * where the filter table is large.
192 struct filter_entry {
193 /* Administrative fields for filter.
195 u32 valid:1; /* filter allocated and valid */
196 u32 locked:1; /* filter is administratively locked */
198 u32 pending:1; /* filter action is pending firmware reply */
199 u32 smtidx:8; /* Source MAC Table index for smac */
200 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
202 /* The filter itself. Most of this is a straight copy of information
203 * provided by the extended ioctl(). Some fields are translated to
204 * internal forms -- for instance the Ingress Queue ID passed in from
205 * the ioctl() is translated into the Absolute Ingress Queue ID.
207 struct ch_filter_specification fs;
210 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
211 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
212 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
214 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
216 static const struct pci_device_id cxgb4_pci_tbl[] = {
217 CH_DEVICE(0xa000, 0), /* PE10K */
218 CH_DEVICE(0x4001, -1),
219 CH_DEVICE(0x4002, -1),
220 CH_DEVICE(0x4003, -1),
221 CH_DEVICE(0x4004, -1),
222 CH_DEVICE(0x4005, -1),
223 CH_DEVICE(0x4006, -1),
224 CH_DEVICE(0x4007, -1),
225 CH_DEVICE(0x4008, -1),
226 CH_DEVICE(0x4009, -1),
227 CH_DEVICE(0x400a, -1),
228 CH_DEVICE(0x400d, -1),
229 CH_DEVICE(0x400e, -1),
230 CH_DEVICE(0x4080, -1),
231 CH_DEVICE(0x4081, -1),
232 CH_DEVICE(0x4082, -1),
233 CH_DEVICE(0x4083, -1),
234 CH_DEVICE(0x4084, -1),
235 CH_DEVICE(0x4085, -1),
236 CH_DEVICE(0x4086, -1),
237 CH_DEVICE(0x4087, -1),
238 CH_DEVICE(0x4088, -1),
239 CH_DEVICE(0x4401, 4),
240 CH_DEVICE(0x4402, 4),
241 CH_DEVICE(0x4403, 4),
242 CH_DEVICE(0x4404, 4),
243 CH_DEVICE(0x4405, 4),
244 CH_DEVICE(0x4406, 4),
245 CH_DEVICE(0x4407, 4),
246 CH_DEVICE(0x4408, 4),
247 CH_DEVICE(0x4409, 4),
248 CH_DEVICE(0x440a, 4),
249 CH_DEVICE(0x440d, 4),
250 CH_DEVICE(0x440e, 4),
251 CH_DEVICE(0x4480, 4),
252 CH_DEVICE(0x4481, 4),
253 CH_DEVICE(0x4482, 4),
254 CH_DEVICE(0x4483, 4),
255 CH_DEVICE(0x4484, 4),
256 CH_DEVICE(0x4485, 4),
257 CH_DEVICE(0x4486, 4),
258 CH_DEVICE(0x4487, 4),
259 CH_DEVICE(0x4488, 4),
260 CH_DEVICE(0x5001, 4),
261 CH_DEVICE(0x5002, 4),
262 CH_DEVICE(0x5003, 4),
263 CH_DEVICE(0x5004, 4),
264 CH_DEVICE(0x5005, 4),
265 CH_DEVICE(0x5006, 4),
266 CH_DEVICE(0x5007, 4),
267 CH_DEVICE(0x5008, 4),
268 CH_DEVICE(0x5009, 4),
269 CH_DEVICE(0x500A, 4),
270 CH_DEVICE(0x500B, 4),
271 CH_DEVICE(0x500C, 4),
272 CH_DEVICE(0x500D, 4),
273 CH_DEVICE(0x500E, 4),
274 CH_DEVICE(0x500F, 4),
275 CH_DEVICE(0x5010, 4),
276 CH_DEVICE(0x5011, 4),
277 CH_DEVICE(0x5012, 4),
278 CH_DEVICE(0x5013, 4),
279 CH_DEVICE(0x5014, 4),
280 CH_DEVICE(0x5015, 4),
281 CH_DEVICE(0x5080, 4),
282 CH_DEVICE(0x5081, 4),
283 CH_DEVICE(0x5082, 4),
284 CH_DEVICE(0x5083, 4),
285 CH_DEVICE(0x5084, 4),
286 CH_DEVICE(0x5085, 4),
287 CH_DEVICE(0x5086, 4),
288 CH_DEVICE(0x5087, 4),
289 CH_DEVICE(0x5088, 4),
290 CH_DEVICE(0x5401, 4),
291 CH_DEVICE(0x5402, 4),
292 CH_DEVICE(0x5403, 4),
293 CH_DEVICE(0x5404, 4),
294 CH_DEVICE(0x5405, 4),
295 CH_DEVICE(0x5406, 4),
296 CH_DEVICE(0x5407, 4),
297 CH_DEVICE(0x5408, 4),
298 CH_DEVICE(0x5409, 4),
299 CH_DEVICE(0x540A, 4),
300 CH_DEVICE(0x540B, 4),
301 CH_DEVICE(0x540C, 4),
302 CH_DEVICE(0x540D, 4),
303 CH_DEVICE(0x540E, 4),
304 CH_DEVICE(0x540F, 4),
305 CH_DEVICE(0x5410, 4),
306 CH_DEVICE(0x5411, 4),
307 CH_DEVICE(0x5412, 4),
308 CH_DEVICE(0x5413, 4),
309 CH_DEVICE(0x5414, 4),
310 CH_DEVICE(0x5415, 4),
311 CH_DEVICE(0x5480, 4),
312 CH_DEVICE(0x5481, 4),
313 CH_DEVICE(0x5482, 4),
314 CH_DEVICE(0x5483, 4),
315 CH_DEVICE(0x5484, 4),
316 CH_DEVICE(0x5485, 4),
317 CH_DEVICE(0x5486, 4),
318 CH_DEVICE(0x5487, 4),
319 CH_DEVICE(0x5488, 4),
323 #define FW4_FNAME "cxgb4/t4fw.bin"
324 #define FW5_FNAME "cxgb4/t5fw.bin"
325 #define FW4_CFNAME "cxgb4/t4-config.txt"
326 #define FW5_CFNAME "cxgb4/t5-config.txt"
328 MODULE_DESCRIPTION(DRV_DESC);
329 MODULE_AUTHOR("Chelsio Communications");
330 MODULE_LICENSE("Dual BSD/GPL");
331 MODULE_VERSION(DRV_VERSION);
332 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
333 MODULE_FIRMWARE(FW4_FNAME);
334 MODULE_FIRMWARE(FW5_FNAME);
337 * Normally we're willing to become the firmware's Master PF but will be happy
338 * if another PF has already become the Master and initialized the adapter.
339 * Setting "force_init" will cause this driver to forcibly establish itself as
340 * the Master PF and initialize the adapter.
342 static uint force_init;
344 module_param(force_init, uint, 0644);
345 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
348 * Normally if the firmware we connect to has Configuration File support, we
349 * use that and only fall back to the old Driver-based initialization if the
350 * Configuration File fails for some reason. If force_old_init is set, then
351 * we'll always use the old Driver-based initialization sequence.
353 static uint force_old_init;
355 module_param(force_old_init, uint, 0644);
356 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
358 static int dflt_msg_enable = DFLT_MSG_ENABLE;
360 module_param(dflt_msg_enable, int, 0644);
361 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
364 * The driver uses the best interrupt scheme available on a platform in the
365 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
366 * of these schemes the driver may consider as follows:
368 * msi = 2: choose from among all three options
369 * msi = 1: only consider MSI and INTx interrupts
370 * msi = 0: force INTx interrupts
374 module_param(msi, int, 0644);
375 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
378 * Queue interrupt hold-off timer values. Queues default to the first of these
381 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
383 module_param_array(intr_holdoff, uint, NULL, 0644);
384 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
385 "0..4 in microseconds");
387 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
389 module_param_array(intr_cnt, uint, NULL, 0644);
390 MODULE_PARM_DESC(intr_cnt,
391 "thresholds 1..3 for queue interrupt packet counters");
394 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
395 * offset by 2 bytes in order to have the IP headers line up on 4-byte
396 * boundaries. This is a requirement for many architectures which will throw
397 * a machine check fault if an attempt is made to access one of the 4-byte IP
398 * header fields on a non-4-byte boundary. And it's a major performance issue
399 * even on some architectures which allow it like some implementations of the
400 * x86 ISA. However, some architectures don't mind this and for some very
401 * edge-case performance sensitive applications (like forwarding large volumes
402 * of small packets), setting this DMA offset to 0 will decrease the number of
403 * PCI-E Bus transfers enough to measurably affect performance.
405 static int rx_dma_offset = 2;
409 #ifdef CONFIG_PCI_IOV
410 module_param(vf_acls, bool, 0644);
411 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
413 /* Configure the number of PCI-E Virtual Function which are to be instantiated
414 * on SR-IOV Capable Physical Functions.
416 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
418 module_param_array(num_vf, uint, NULL, 0644);
419 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
422 /* TX Queue select used to determine what algorithm to use for selecting TX
423 * queue. Select between the kernel provided function (select_queue=0) or user
424 * cxgb_select_queue function (select_queue=1)
426 * Default: select_queue=0
428 static int select_queue;
429 module_param(select_queue, int, 0644);
430 MODULE_PARM_DESC(select_queue,
431 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
434 * The filter TCAM has a fixed portion and a variable portion. The fixed
435 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
436 * ports. The variable portion is 36 bits which can include things like Exact
437 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
438 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
439 * far exceed the 36-bit budget for this "compressed" header portion of the
440 * filter. Thus, we have a scarce resource which must be carefully managed.
442 * By default we set this up to mostly match the set of filter matching
443 * capabilities of T3 but with accommodations for some of T4's more
444 * interesting features:
446 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
447 * [Inner] VLAN (17), Port (3), FCoE (1) }
450 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
451 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
452 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
455 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
457 module_param(tp_vlan_pri_map, uint, 0644);
458 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
460 static struct dentry *cxgb4_debugfs_root;
462 static LIST_HEAD(adapter_list);
463 static DEFINE_MUTEX(uld_mutex);
464 /* Adapter list to be accessed from atomic context */
465 static LIST_HEAD(adap_rcu_list);
466 static DEFINE_SPINLOCK(adap_rcu_lock);
467 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
468 static const char *uld_str[] = { "RDMA", "iSCSI" };
470 static void link_report(struct net_device *dev)
472 if (!netif_carrier_ok(dev))
473 netdev_info(dev, "link down\n");
475 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
477 const char *s = "10Mbps";
478 const struct port_info *p = netdev_priv(dev);
480 switch (p->link_cfg.speed) {
495 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
500 #ifdef CONFIG_CHELSIO_T4_DCB
501 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
502 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
504 struct port_info *pi = netdev_priv(dev);
505 struct adapter *adap = pi->adapter;
506 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
509 /* We use a simple mapping of Port TX Queue Index to DCB
510 * Priority when we're enabling DCB.
512 for (i = 0; i < pi->nqsets; i++, txq++) {
516 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
517 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
518 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
519 value = enable ? i : 0xffffffff;
521 /* Since we can be called while atomic (from "interrupt
522 * level") we need to issue the Set Parameters Commannd
523 * without sleeping (timeout < 0).
525 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
529 dev_err(adap->pdev_dev,
530 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
531 enable ? "set" : "unset", pi->port_id, i, -err);
533 txq->dcb_prio = value;
536 #endif /* CONFIG_CHELSIO_T4_DCB */
538 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
540 struct net_device *dev = adapter->port[port_id];
542 /* Skip changes from disabled ports. */
543 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
545 netif_carrier_on(dev);
547 #ifdef CONFIG_CHELSIO_T4_DCB
548 cxgb4_dcb_state_init(dev);
549 dcb_tx_queue_prio_enable(dev, false);
550 #endif /* CONFIG_CHELSIO_T4_DCB */
551 netif_carrier_off(dev);
558 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
560 static const char *mod_str[] = {
561 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
564 const struct net_device *dev = adap->port[port_id];
565 const struct port_info *pi = netdev_priv(dev);
567 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
568 netdev_info(dev, "port module unplugged\n");
569 else if (pi->mod_type < ARRAY_SIZE(mod_str))
570 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
574 * Configure the exact and hash address filters to handle a port's multicast
575 * and secondary unicast MAC addresses.
577 static int set_addr_filters(const struct net_device *dev, bool sleep)
585 const struct netdev_hw_addr *ha;
586 int uc_cnt = netdev_uc_count(dev);
587 int mc_cnt = netdev_mc_count(dev);
588 const struct port_info *pi = netdev_priv(dev);
589 unsigned int mb = pi->adapter->fn;
591 /* first do the secondary unicast addresses */
592 netdev_for_each_uc_addr(ha, dev) {
593 addr[naddr++] = ha->addr;
594 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
595 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
596 naddr, addr, filt_idx, &uhash, sleep);
605 /* next set up the multicast addresses */
606 netdev_for_each_mc_addr(ha, dev) {
607 addr[naddr++] = ha->addr;
608 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
609 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
610 naddr, addr, filt_idx, &mhash, sleep);
619 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
620 uhash | mhash, sleep);
623 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
624 module_param(dbfifo_int_thresh, int, 0644);
625 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
628 * usecs to sleep while draining the dbfifo
630 static int dbfifo_drain_delay = 1000;
631 module_param(dbfifo_drain_delay, int, 0644);
632 MODULE_PARM_DESC(dbfifo_drain_delay,
633 "usecs to sleep while draining the dbfifo");
636 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
637 * If @mtu is -1 it is left unchanged.
639 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
642 struct port_info *pi = netdev_priv(dev);
644 ret = set_addr_filters(dev, sleep_ok);
646 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
647 (dev->flags & IFF_PROMISC) ? 1 : 0,
648 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
654 * link_start - enable a port
655 * @dev: the port to enable
657 * Performs the MAC and PHY actions needed to enable a port.
659 static int link_start(struct net_device *dev)
662 struct port_info *pi = netdev_priv(dev);
663 unsigned int mb = pi->adapter->fn;
666 * We do not set address filters and promiscuity here, the stack does
667 * that step explicitly.
669 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
670 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
672 ret = t4_change_mac(pi->adapter, mb, pi->viid,
673 pi->xact_addr_filt, dev->dev_addr, true,
676 pi->xact_addr_filt = ret;
681 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
685 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
686 true, CXGB4_DCB_ENABLED);
693 int cxgb4_dcb_enabled(const struct net_device *dev)
695 #ifdef CONFIG_CHELSIO_T4_DCB
696 struct port_info *pi = netdev_priv(dev);
698 if (!pi->dcb.enabled)
701 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
702 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
707 EXPORT_SYMBOL(cxgb4_dcb_enabled);
709 #ifdef CONFIG_CHELSIO_T4_DCB
710 /* Handle a Data Center Bridging update message from the firmware. */
711 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
713 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
714 struct net_device *dev = adap->port[port];
715 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
718 cxgb4_dcb_handle_fw_update(adap, pcmd);
719 new_dcb_enabled = cxgb4_dcb_enabled(dev);
721 /* If the DCB has become enabled or disabled on the port then we're
722 * going to need to set up/tear down DCB Priority parameters for the
723 * TX Queues associated with the port.
725 if (new_dcb_enabled != old_dcb_enabled)
726 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
728 #endif /* CONFIG_CHELSIO_T4_DCB */
730 /* Clear a filter and release any of its resources that we own. This also
731 * clears the filter's "pending" status.
733 static void clear_filter(struct adapter *adap, struct filter_entry *f)
735 /* If the new or old filter have loopback rewriteing rules then we'll
736 * need to free any existing Layer Two Table (L2T) entries of the old
737 * filter rule. The firmware will handle freeing up any Source MAC
738 * Table (SMT) entries used for rewriting Source MAC Addresses in
742 cxgb4_l2t_release(f->l2t);
744 /* The zeroing of the filter rule below clears the filter valid,
745 * pending, locked flags, l2t pointer, etc. so it's all we need for
748 memset(f, 0, sizeof(*f));
751 /* Handle a filter write/deletion reply.
753 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
755 unsigned int idx = GET_TID(rpl);
756 unsigned int nidx = idx - adap->tids.ftid_base;
758 struct filter_entry *f;
760 if (idx >= adap->tids.ftid_base && nidx <
761 (adap->tids.nftids + adap->tids.nsftids)) {
763 ret = GET_TCB_COOKIE(rpl->cookie);
764 f = &adap->tids.ftid_tab[idx];
766 if (ret == FW_FILTER_WR_FLT_DELETED) {
767 /* Clear the filter when we get confirmation from the
768 * hardware that the filter has been deleted.
770 clear_filter(adap, f);
771 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
772 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
774 clear_filter(adap, f);
775 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
776 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
777 f->pending = 0; /* asynchronous setup completed */
780 /* Something went wrong. Issue a warning about the
781 * problem and clear everything out.
783 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
785 clear_filter(adap, f);
790 /* Response queue handler for the FW event queue.
792 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
793 const struct pkt_gl *gl)
795 u8 opcode = ((const struct rss_header *)rsp)->opcode;
797 rsp++; /* skip RSS header */
799 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
801 if (unlikely(opcode == CPL_FW4_MSG &&
802 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
804 opcode = ((const struct rss_header *)rsp)->opcode;
806 if (opcode != CPL_SGE_EGR_UPDATE) {
807 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
813 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
814 const struct cpl_sge_egr_update *p = (void *)rsp;
815 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
818 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
820 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
821 struct sge_eth_txq *eq;
823 eq = container_of(txq, struct sge_eth_txq, q);
824 netif_tx_wake_queue(eq->txq);
826 struct sge_ofld_txq *oq;
828 oq = container_of(txq, struct sge_ofld_txq, q);
829 tasklet_schedule(&oq->qresume_tsk);
831 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
832 const struct cpl_fw6_msg *p = (void *)rsp;
834 #ifdef CONFIG_CHELSIO_T4_DCB
835 const struct fw_port_cmd *pcmd = (const void *)p->data;
836 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
837 unsigned int action =
838 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
840 if (cmd == FW_PORT_CMD &&
841 action == FW_PORT_ACTION_GET_PORT_INFO) {
842 int port = FW_PORT_CMD_PORTID_GET(
843 be32_to_cpu(pcmd->op_to_portid));
844 struct net_device *dev = q->adap->port[port];
845 int state_input = ((pcmd->u.info.dcbxdis_pkd &
847 ? CXGB4_DCB_INPUT_FW_DISABLED
848 : CXGB4_DCB_INPUT_FW_ENABLED);
850 cxgb4_dcb_state_fsm(dev, state_input);
853 if (cmd == FW_PORT_CMD &&
854 action == FW_PORT_ACTION_L2_DCB_CFG)
855 dcb_rpl(q->adap, pcmd);
859 t4_handle_fw_rpl(q->adap, p->data);
860 } else if (opcode == CPL_L2T_WRITE_RPL) {
861 const struct cpl_l2t_write_rpl *p = (void *)rsp;
863 do_l2t_write_rpl(q->adap, p);
864 } else if (opcode == CPL_SET_TCB_RPL) {
865 const struct cpl_set_tcb_rpl *p = (void *)rsp;
867 filter_rpl(q->adap, p);
869 dev_err(q->adap->pdev_dev,
870 "unexpected CPL %#x on FW event queue\n", opcode);
876 * uldrx_handler - response queue handler for ULD queues
877 * @q: the response queue that received the packet
878 * @rsp: the response queue descriptor holding the offload message
879 * @gl: the gather list of packet fragments
881 * Deliver an ingress offload packet to a ULD. All processing is done by
882 * the ULD, we just maintain statistics.
884 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
885 const struct pkt_gl *gl)
887 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
889 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
891 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
892 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
895 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
901 else if (gl == CXGB4_MSG_AN)
908 static void disable_msi(struct adapter *adapter)
910 if (adapter->flags & USING_MSIX) {
911 pci_disable_msix(adapter->pdev);
912 adapter->flags &= ~USING_MSIX;
913 } else if (adapter->flags & USING_MSI) {
914 pci_disable_msi(adapter->pdev);
915 adapter->flags &= ~USING_MSI;
920 * Interrupt handler for non-data events used with MSI-X.
922 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
924 struct adapter *adap = cookie;
926 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
929 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
931 t4_slow_intr_handler(adap);
936 * Name the MSI-X interrupts.
938 static void name_msix_vecs(struct adapter *adap)
940 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
942 /* non-data interrupts */
943 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
946 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
947 adap->port[0]->name);
949 /* Ethernet queues */
950 for_each_port(adap, j) {
951 struct net_device *d = adap->port[j];
952 const struct port_info *pi = netdev_priv(d);
954 for (i = 0; i < pi->nqsets; i++, msi_idx++)
955 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
960 for_each_ofldrxq(&adap->sge, i)
961 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
962 adap->port[0]->name, i);
964 for_each_rdmarxq(&adap->sge, i)
965 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
966 adap->port[0]->name, i);
968 for_each_rdmaciq(&adap->sge, i)
969 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
970 adap->port[0]->name, i);
973 static int request_msix_queue_irqs(struct adapter *adap)
975 struct sge *s = &adap->sge;
976 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
979 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
980 adap->msix_info[1].desc, &s->fw_evtq);
984 for_each_ethrxq(s, ethqidx) {
985 err = request_irq(adap->msix_info[msi_index].vec,
987 adap->msix_info[msi_index].desc,
988 &s->ethrxq[ethqidx].rspq);
993 for_each_ofldrxq(s, ofldqidx) {
994 err = request_irq(adap->msix_info[msi_index].vec,
996 adap->msix_info[msi_index].desc,
997 &s->ofldrxq[ofldqidx].rspq);
1002 for_each_rdmarxq(s, rdmaqidx) {
1003 err = request_irq(adap->msix_info[msi_index].vec,
1004 t4_sge_intr_msix, 0,
1005 adap->msix_info[msi_index].desc,
1006 &s->rdmarxq[rdmaqidx].rspq);
1011 for_each_rdmaciq(s, rdmaciqqidx) {
1012 err = request_irq(adap->msix_info[msi_index].vec,
1013 t4_sge_intr_msix, 0,
1014 adap->msix_info[msi_index].desc,
1015 &s->rdmaciq[rdmaciqqidx].rspq);
1023 while (--rdmaciqqidx >= 0)
1024 free_irq(adap->msix_info[--msi_index].vec,
1025 &s->rdmaciq[rdmaciqqidx].rspq);
1026 while (--rdmaqidx >= 0)
1027 free_irq(adap->msix_info[--msi_index].vec,
1028 &s->rdmarxq[rdmaqidx].rspq);
1029 while (--ofldqidx >= 0)
1030 free_irq(adap->msix_info[--msi_index].vec,
1031 &s->ofldrxq[ofldqidx].rspq);
1032 while (--ethqidx >= 0)
1033 free_irq(adap->msix_info[--msi_index].vec,
1034 &s->ethrxq[ethqidx].rspq);
1035 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1039 static void free_msix_queue_irqs(struct adapter *adap)
1041 int i, msi_index = 2;
1042 struct sge *s = &adap->sge;
1044 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1045 for_each_ethrxq(s, i)
1046 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
1047 for_each_ofldrxq(s, i)
1048 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
1049 for_each_rdmarxq(s, i)
1050 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
1051 for_each_rdmaciq(s, i)
1052 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
1056 * write_rss - write the RSS table for a given port
1058 * @queues: array of queue indices for RSS
1060 * Sets up the portion of the HW RSS table for the port's VI to distribute
1061 * packets to the Rx queues in @queues.
1063 static int write_rss(const struct port_info *pi, const u16 *queues)
1067 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1069 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1073 /* map the queue indices to queue ids */
1074 for (i = 0; i < pi->rss_size; i++, queues++)
1075 rss[i] = q[*queues].rspq.abs_id;
1077 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1078 pi->rss_size, rss, pi->rss_size);
1084 * setup_rss - configure RSS
1085 * @adap: the adapter
1087 * Sets up RSS for each port.
1089 static int setup_rss(struct adapter *adap)
1093 for_each_port(adap, i) {
1094 const struct port_info *pi = adap2pinfo(adap, i);
1096 err = write_rss(pi, pi->rss);
1104 * Return the channel of the ingress queue with the given qid.
1106 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1108 qid -= p->ingr_start;
1109 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1113 * Wait until all NAPI handlers are descheduled.
1115 static void quiesce_rx(struct adapter *adap)
1119 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1120 struct sge_rspq *q = adap->sge.ingr_map[i];
1122 if (q && q->handler)
1123 napi_disable(&q->napi);
1128 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1130 static void enable_rx(struct adapter *adap)
1134 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1135 struct sge_rspq *q = adap->sge.ingr_map[i];
1140 napi_enable(&q->napi);
1141 /* 0-increment GTS to start the timer and enable interrupts */
1142 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1143 SEINTARM(q->intr_params) |
1144 INGRESSQID(q->cntxt_id));
1149 * setup_sge_queues - configure SGE Tx/Rx/response queues
1150 * @adap: the adapter
1152 * Determines how many sets of SGE queues to use and initializes them.
1153 * We support multiple queue sets per port if we have MSI-X, otherwise
1154 * just one queue set per port.
1156 static int setup_sge_queues(struct adapter *adap)
1158 int err, msi_idx, i, j;
1159 struct sge *s = &adap->sge;
1161 bitmap_zero(s->starving_fl, MAX_EGRQ);
1162 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1164 if (adap->flags & USING_MSIX)
1165 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1167 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1171 msi_idx = -((int)s->intrq.abs_id + 1);
1174 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1175 msi_idx, NULL, fwevtq_handler);
1177 freeout: t4_free_sge_resources(adap);
1181 for_each_port(adap, i) {
1182 struct net_device *dev = adap->port[i];
1183 struct port_info *pi = netdev_priv(dev);
1184 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1185 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1187 for (j = 0; j < pi->nqsets; j++, q++) {
1190 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1196 memset(&q->stats, 0, sizeof(q->stats));
1198 for (j = 0; j < pi->nqsets; j++, t++) {
1199 err = t4_sge_alloc_eth_txq(adap, t, dev,
1200 netdev_get_tx_queue(dev, j),
1201 s->fw_evtq.cntxt_id);
1207 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1208 for_each_ofldrxq(s, i) {
1209 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1210 struct net_device *dev = adap->port[i / j];
1214 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1215 q->fl.size ? &q->fl : NULL,
1219 memset(&q->stats, 0, sizeof(q->stats));
1220 s->ofld_rxq[i] = q->rspq.abs_id;
1221 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1222 s->fw_evtq.cntxt_id);
1227 for_each_rdmarxq(s, i) {
1228 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1232 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1233 msi_idx, q->fl.size ? &q->fl : NULL,
1237 memset(&q->stats, 0, sizeof(q->stats));
1238 s->rdma_rxq[i] = q->rspq.abs_id;
1241 for_each_rdmaciq(s, i) {
1242 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1246 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1247 msi_idx, q->fl.size ? &q->fl : NULL,
1251 memset(&q->stats, 0, sizeof(q->stats));
1252 s->rdma_ciq[i] = q->rspq.abs_id;
1255 for_each_port(adap, i) {
1257 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1258 * have RDMA queues, and that's the right value.
1260 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1261 s->fw_evtq.cntxt_id,
1262 s->rdmarxq[i].rspq.cntxt_id);
1267 t4_write_reg(adap, is_t4(adap->params.chip) ?
1268 MPS_TRC_RSS_CONTROL :
1269 MPS_T5_TRC_RSS_CONTROL,
1270 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1271 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1276 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1277 * The allocated memory is cleared.
1279 void *t4_alloc_mem(size_t size)
1281 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1289 * Free memory allocated through alloc_mem().
1291 void t4_free_mem(void *addr)
1293 if (is_vmalloc_addr(addr))
1299 /* Send a Work Request to write the filter at a specified index. We construct
1300 * a Firmware Filter Work Request to have the work done and put the indicated
1301 * filter into "pending" mode which will prevent any further actions against
1302 * it till we get a reply from the firmware on the completion status of the
1305 static int set_filter_wr(struct adapter *adapter, int fidx)
1307 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1308 struct sk_buff *skb;
1309 struct fw_filter_wr *fwr;
1312 /* If the new filter requires loopback Destination MAC and/or VLAN
1313 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1316 if (f->fs.newdmac || f->fs.newvlan) {
1317 /* allocate L2T entry for new filter */
1318 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1321 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1322 f->fs.eport, f->fs.dmac)) {
1323 cxgb4_l2t_release(f->l2t);
1329 ftid = adapter->tids.ftid_base + fidx;
1331 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1332 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1333 memset(fwr, 0, sizeof(*fwr));
1335 /* It would be nice to put most of the following in t4_hw.c but most
1336 * of the work is translating the cxgbtool ch_filter_specification
1337 * into the Work Request and the definition of that structure is
1338 * currently in cxgbtool.h which isn't appropriate to pull into the
1339 * common code. We may eventually try to come up with a more neutral
1340 * filter specification structure but for now it's easiest to simply
1341 * put this fairly direct code in line ...
1343 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1344 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
1346 htonl(V_FW_FILTER_WR_TID(ftid) |
1347 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1348 V_FW_FILTER_WR_NOREPLY(0) |
1349 V_FW_FILTER_WR_IQ(f->fs.iq));
1350 fwr->del_filter_to_l2tix =
1351 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1352 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1353 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1354 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1355 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1356 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1357 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1358 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1359 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1360 f->fs.newvlan == VLAN_REWRITE) |
1361 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1362 f->fs.newvlan == VLAN_REWRITE) |
1363 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1364 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1365 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1366 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1367 fwr->ethtype = htons(f->fs.val.ethtype);
1368 fwr->ethtypem = htons(f->fs.mask.ethtype);
1369 fwr->frag_to_ovlan_vldm =
1370 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1371 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1372 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1373 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1374 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1375 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1377 fwr->rx_chan_rx_rpl_iq =
1378 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1379 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1380 fwr->maci_to_matchtypem =
1381 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1382 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1383 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1384 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1385 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1386 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1387 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1388 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1389 fwr->ptcl = f->fs.val.proto;
1390 fwr->ptclm = f->fs.mask.proto;
1391 fwr->ttyp = f->fs.val.tos;
1392 fwr->ttypm = f->fs.mask.tos;
1393 fwr->ivlan = htons(f->fs.val.ivlan);
1394 fwr->ivlanm = htons(f->fs.mask.ivlan);
1395 fwr->ovlan = htons(f->fs.val.ovlan);
1396 fwr->ovlanm = htons(f->fs.mask.ovlan);
1397 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1398 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1399 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1400 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1401 fwr->lp = htons(f->fs.val.lport);
1402 fwr->lpm = htons(f->fs.mask.lport);
1403 fwr->fp = htons(f->fs.val.fport);
1404 fwr->fpm = htons(f->fs.mask.fport);
1406 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1408 /* Mark the filter as "pending" and ship off the Filter Work Request.
1409 * When we get the Work Request Reply we'll clear the pending status.
1412 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1413 t4_ofld_send(adapter, skb);
1417 /* Delete the filter at a specified index.
1419 static int del_filter_wr(struct adapter *adapter, int fidx)
1421 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1422 struct sk_buff *skb;
1423 struct fw_filter_wr *fwr;
1424 unsigned int len, ftid;
1427 ftid = adapter->tids.ftid_base + fidx;
1429 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1430 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1431 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1433 /* Mark the filter as "pending" and ship off the Filter Work Request.
1434 * When we get the Work Request Reply we'll clear the pending status.
1437 t4_mgmt_tx(adapter, skb);
1441 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1442 void *accel_priv, select_queue_fallback_t fallback)
1446 #ifdef CONFIG_CHELSIO_T4_DCB
1447 /* If a Data Center Bridging has been successfully negotiated on this
1448 * link then we'll use the skb's priority to map it to a TX Queue.
1449 * The skb's priority is determined via the VLAN Tag Priority Code
1452 if (cxgb4_dcb_enabled(dev)) {
1456 err = vlan_get_tag(skb, &vlan_tci);
1457 if (unlikely(err)) {
1458 if (net_ratelimit())
1460 "TX Packet without VLAN Tag on DCB Link\n");
1463 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1467 #endif /* CONFIG_CHELSIO_T4_DCB */
1470 txq = (skb_rx_queue_recorded(skb)
1471 ? skb_get_rx_queue(skb)
1472 : smp_processor_id());
1474 while (unlikely(txq >= dev->real_num_tx_queues))
1475 txq -= dev->real_num_tx_queues;
1480 return fallback(dev, skb) % dev->real_num_tx_queues;
1483 static inline int is_offload(const struct adapter *adap)
1485 return adap->params.offload;
1489 * Implementation of ethtool operations.
1492 static u32 get_msglevel(struct net_device *dev)
1494 return netdev2adap(dev)->msg_enable;
1497 static void set_msglevel(struct net_device *dev, u32 val)
1499 netdev2adap(dev)->msg_enable = val;
1502 static char stats_strings[][ETH_GSTRING_LEN] = {
1505 "TxBroadcastFrames ",
1506 "TxMulticastFrames ",
1512 "TxFrames128To255 ",
1513 "TxFrames256To511 ",
1514 "TxFrames512To1023 ",
1515 "TxFrames1024To1518 ",
1516 "TxFrames1519ToMax ",
1531 "RxBroadcastFrames ",
1532 "RxMulticastFrames ",
1544 "RxFrames128To255 ",
1545 "RxFrames256To511 ",
1546 "RxFrames512To1023 ",
1547 "RxFrames1024To1518 ",
1548 "RxFrames1519ToMax ",
1560 "RxBG0FramesDropped ",
1561 "RxBG1FramesDropped ",
1562 "RxBG2FramesDropped ",
1563 "RxBG3FramesDropped ",
1564 "RxBG0FramesTrunc ",
1565 "RxBG1FramesTrunc ",
1566 "RxBG2FramesTrunc ",
1567 "RxBG3FramesTrunc ",
1576 "WriteCoalSuccess ",
1580 static int get_sset_count(struct net_device *dev, int sset)
1584 return ARRAY_SIZE(stats_strings);
1590 #define T4_REGMAP_SIZE (160 * 1024)
1591 #define T5_REGMAP_SIZE (332 * 1024)
1593 static int get_regs_len(struct net_device *dev)
1595 struct adapter *adap = netdev2adap(dev);
1596 if (is_t4(adap->params.chip))
1597 return T4_REGMAP_SIZE;
1599 return T5_REGMAP_SIZE;
1602 static int get_eeprom_len(struct net_device *dev)
1607 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1609 struct adapter *adapter = netdev2adap(dev);
1611 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1612 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1613 strlcpy(info->bus_info, pci_name(adapter->pdev),
1614 sizeof(info->bus_info));
1616 if (adapter->params.fw_vers)
1617 snprintf(info->fw_version, sizeof(info->fw_version),
1618 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1619 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1620 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1621 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1622 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1623 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1624 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1625 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1626 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1629 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1631 if (stringset == ETH_SS_STATS)
1632 memcpy(data, stats_strings, sizeof(stats_strings));
1636 * port stats maintained per queue of the port. They should be in the same
1637 * order as in stats_strings above.
1639 struct queue_port_stats {
1649 static void collect_sge_port_stats(const struct adapter *adap,
1650 const struct port_info *p, struct queue_port_stats *s)
1653 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1654 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1656 memset(s, 0, sizeof(*s));
1657 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1659 s->tx_csum += tx->tx_cso;
1660 s->rx_csum += rx->stats.rx_cso;
1661 s->vlan_ex += rx->stats.vlan_ex;
1662 s->vlan_ins += tx->vlan_ins;
1663 s->gro_pkts += rx->stats.lro_pkts;
1664 s->gro_merged += rx->stats.lro_merged;
1668 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1671 struct port_info *pi = netdev_priv(dev);
1672 struct adapter *adapter = pi->adapter;
1675 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1677 data += sizeof(struct port_stats) / sizeof(u64);
1678 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1679 data += sizeof(struct queue_port_stats) / sizeof(u64);
1680 if (!is_t4(adapter->params.chip)) {
1681 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1682 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1683 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1684 *data = val1 - val2;
1689 memset(data, 0, 2 * sizeof(u64));
1695 * Return a version number to identify the type of adapter. The scheme is:
1696 * - bits 0..9: chip version
1697 * - bits 10..15: chip revision
1698 * - bits 16..23: register dump version
1700 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1702 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1703 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1706 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1709 u32 *p = buf + start;
1711 for ( ; start <= end; start += sizeof(u32))
1712 *p++ = t4_read_reg(ap, start);
1715 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1718 static const unsigned int t4_reg_ranges[] = {
1939 static const unsigned int t5_reg_ranges[] = {
2368 struct adapter *ap = netdev2adap(dev);
2369 static const unsigned int *reg_ranges;
2370 int arr_size = 0, buf_size = 0;
2372 if (is_t4(ap->params.chip)) {
2373 reg_ranges = &t4_reg_ranges[0];
2374 arr_size = ARRAY_SIZE(t4_reg_ranges);
2375 buf_size = T4_REGMAP_SIZE;
2377 reg_ranges = &t5_reg_ranges[0];
2378 arr_size = ARRAY_SIZE(t5_reg_ranges);
2379 buf_size = T5_REGMAP_SIZE;
2382 regs->version = mk_adap_vers(ap);
2384 memset(buf, 0, buf_size);
2385 for (i = 0; i < arr_size; i += 2)
2386 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2389 static int restart_autoneg(struct net_device *dev)
2391 struct port_info *p = netdev_priv(dev);
2393 if (!netif_running(dev))
2395 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2397 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2401 static int identify_port(struct net_device *dev,
2402 enum ethtool_phys_id_state state)
2405 struct adapter *adap = netdev2adap(dev);
2407 if (state == ETHTOOL_ID_ACTIVE)
2409 else if (state == ETHTOOL_ID_INACTIVE)
2414 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2417 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2421 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2422 type == FW_PORT_TYPE_BT_XAUI) {
2424 if (caps & FW_PORT_CAP_SPEED_100M)
2425 v |= SUPPORTED_100baseT_Full;
2426 if (caps & FW_PORT_CAP_SPEED_1G)
2427 v |= SUPPORTED_1000baseT_Full;
2428 if (caps & FW_PORT_CAP_SPEED_10G)
2429 v |= SUPPORTED_10000baseT_Full;
2430 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2431 v |= SUPPORTED_Backplane;
2432 if (caps & FW_PORT_CAP_SPEED_1G)
2433 v |= SUPPORTED_1000baseKX_Full;
2434 if (caps & FW_PORT_CAP_SPEED_10G)
2435 v |= SUPPORTED_10000baseKX4_Full;
2436 } else if (type == FW_PORT_TYPE_KR)
2437 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2438 else if (type == FW_PORT_TYPE_BP_AP)
2439 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2440 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2441 else if (type == FW_PORT_TYPE_BP4_AP)
2442 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2443 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2444 SUPPORTED_10000baseKX4_Full;
2445 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2446 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2447 v |= SUPPORTED_FIBRE;
2448 else if (type == FW_PORT_TYPE_BP40_BA)
2449 v |= SUPPORTED_40000baseSR4_Full;
2451 if (caps & FW_PORT_CAP_ANEG)
2452 v |= SUPPORTED_Autoneg;
2456 static unsigned int to_fw_linkcaps(unsigned int caps)
2460 if (caps & ADVERTISED_100baseT_Full)
2461 v |= FW_PORT_CAP_SPEED_100M;
2462 if (caps & ADVERTISED_1000baseT_Full)
2463 v |= FW_PORT_CAP_SPEED_1G;
2464 if (caps & ADVERTISED_10000baseT_Full)
2465 v |= FW_PORT_CAP_SPEED_10G;
2466 if (caps & ADVERTISED_40000baseSR4_Full)
2467 v |= FW_PORT_CAP_SPEED_40G;
2471 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2473 const struct port_info *p = netdev_priv(dev);
2475 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2476 p->port_type == FW_PORT_TYPE_BT_XFI ||
2477 p->port_type == FW_PORT_TYPE_BT_XAUI)
2478 cmd->port = PORT_TP;
2479 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2480 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2481 cmd->port = PORT_FIBRE;
2482 else if (p->port_type == FW_PORT_TYPE_SFP ||
2483 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2484 p->port_type == FW_PORT_TYPE_QSFP) {
2485 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2486 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2487 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2488 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2489 cmd->port = PORT_FIBRE;
2490 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2491 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2492 cmd->port = PORT_DA;
2494 cmd->port = PORT_OTHER;
2496 cmd->port = PORT_OTHER;
2498 if (p->mdio_addr >= 0) {
2499 cmd->phy_address = p->mdio_addr;
2500 cmd->transceiver = XCVR_EXTERNAL;
2501 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2502 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2504 cmd->phy_address = 0; /* not really, but no better option */
2505 cmd->transceiver = XCVR_INTERNAL;
2506 cmd->mdio_support = 0;
2509 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2510 cmd->advertising = from_fw_linkcaps(p->port_type,
2511 p->link_cfg.advertising);
2512 ethtool_cmd_speed_set(cmd,
2513 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2514 cmd->duplex = DUPLEX_FULL;
2515 cmd->autoneg = p->link_cfg.autoneg;
2521 static unsigned int speed_to_caps(int speed)
2524 return FW_PORT_CAP_SPEED_100M;
2526 return FW_PORT_CAP_SPEED_1G;
2528 return FW_PORT_CAP_SPEED_10G;
2530 return FW_PORT_CAP_SPEED_40G;
2534 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2537 struct port_info *p = netdev_priv(dev);
2538 struct link_config *lc = &p->link_cfg;
2539 u32 speed = ethtool_cmd_speed(cmd);
2541 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2544 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2546 * PHY offers a single speed. See if that's what's
2549 if (cmd->autoneg == AUTONEG_DISABLE &&
2550 (lc->supported & speed_to_caps(speed)))
2555 if (cmd->autoneg == AUTONEG_DISABLE) {
2556 cap = speed_to_caps(speed);
2558 if (!(lc->supported & cap) ||
2563 lc->requested_speed = cap;
2564 lc->advertising = 0;
2566 cap = to_fw_linkcaps(cmd->advertising);
2567 if (!(lc->supported & cap))
2569 lc->requested_speed = 0;
2570 lc->advertising = cap | FW_PORT_CAP_ANEG;
2572 lc->autoneg = cmd->autoneg;
2574 if (netif_running(dev))
2575 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2580 static void get_pauseparam(struct net_device *dev,
2581 struct ethtool_pauseparam *epause)
2583 struct port_info *p = netdev_priv(dev);
2585 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2586 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2587 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2590 static int set_pauseparam(struct net_device *dev,
2591 struct ethtool_pauseparam *epause)
2593 struct port_info *p = netdev_priv(dev);
2594 struct link_config *lc = &p->link_cfg;
2596 if (epause->autoneg == AUTONEG_DISABLE)
2597 lc->requested_fc = 0;
2598 else if (lc->supported & FW_PORT_CAP_ANEG)
2599 lc->requested_fc = PAUSE_AUTONEG;
2603 if (epause->rx_pause)
2604 lc->requested_fc |= PAUSE_RX;
2605 if (epause->tx_pause)
2606 lc->requested_fc |= PAUSE_TX;
2607 if (netif_running(dev))
2608 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2613 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2615 const struct port_info *pi = netdev_priv(dev);
2616 const struct sge *s = &pi->adapter->sge;
2618 e->rx_max_pending = MAX_RX_BUFFERS;
2619 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2620 e->rx_jumbo_max_pending = 0;
2621 e->tx_max_pending = MAX_TXQ_ENTRIES;
2623 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2624 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2625 e->rx_jumbo_pending = 0;
2626 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2629 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2632 const struct port_info *pi = netdev_priv(dev);
2633 struct adapter *adapter = pi->adapter;
2634 struct sge *s = &adapter->sge;
2636 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2637 e->tx_pending > MAX_TXQ_ENTRIES ||
2638 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2639 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2640 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2643 if (adapter->flags & FULL_INIT_DONE)
2646 for (i = 0; i < pi->nqsets; ++i) {
2647 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2648 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2649 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2654 static int closest_timer(const struct sge *s, int time)
2656 int i, delta, match = 0, min_delta = INT_MAX;
2658 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2659 delta = time - s->timer_val[i];
2662 if (delta < min_delta) {
2670 static int closest_thres(const struct sge *s, int thres)
2672 int i, delta, match = 0, min_delta = INT_MAX;
2674 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2675 delta = thres - s->counter_val[i];
2678 if (delta < min_delta) {
2687 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2689 static unsigned int qtimer_val(const struct adapter *adap,
2690 const struct sge_rspq *q)
2692 unsigned int idx = q->intr_params >> 1;
2694 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2698 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2700 * @us: the hold-off time in us, or 0 to disable timer
2701 * @cnt: the hold-off packet count, or 0 to disable counter
2703 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2704 * one of the two needs to be enabled for the queue to generate interrupts.
2706 static int set_rspq_intr_params(struct sge_rspq *q,
2707 unsigned int us, unsigned int cnt)
2709 struct adapter *adap = q->adap;
2711 if ((us | cnt) == 0)
2718 new_idx = closest_thres(&adap->sge, cnt);
2719 if (q->desc && q->pktcnt_idx != new_idx) {
2720 /* the queue has already been created, update it */
2721 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2722 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2723 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2724 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2729 q->pktcnt_idx = new_idx;
2732 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2733 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2738 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2739 * @dev: the network device
2740 * @us: the hold-off time in us, or 0 to disable timer
2741 * @cnt: the hold-off packet count, or 0 to disable counter
2743 * Set the RX interrupt hold-off parameters for a network device.
2745 static int set_rx_intr_params(struct net_device *dev,
2746 unsigned int us, unsigned int cnt)
2749 struct port_info *pi = netdev_priv(dev);
2750 struct adapter *adap = pi->adapter;
2751 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2753 for (i = 0; i < pi->nqsets; i++, q++) {
2754 err = set_rspq_intr_params(&q->rspq, us, cnt);
2761 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2764 struct port_info *pi = netdev_priv(dev);
2765 struct adapter *adap = pi->adapter;
2766 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2768 for (i = 0; i < pi->nqsets; i++, q++)
2769 q->rspq.adaptive_rx = adaptive_rx;
2774 static int get_adaptive_rx_setting(struct net_device *dev)
2776 struct port_info *pi = netdev_priv(dev);
2777 struct adapter *adap = pi->adapter;
2778 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2780 return q->rspq.adaptive_rx;
2783 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2785 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
2786 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2787 c->rx_max_coalesced_frames);
2790 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2792 const struct port_info *pi = netdev_priv(dev);
2793 const struct adapter *adap = pi->adapter;
2794 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2796 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2797 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2798 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2799 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
2804 * eeprom_ptov - translate a physical EEPROM address to virtual
2805 * @phys_addr: the physical EEPROM address
2806 * @fn: the PCI function number
2807 * @sz: size of function-specific area
2809 * Translate a physical EEPROM address to virtual. The first 1K is
2810 * accessed through virtual addresses starting at 31K, the rest is
2811 * accessed through virtual addresses starting at 0.
2813 * The mapping is as follows:
2814 * [0..1K) -> [31K..32K)
2815 * [1K..1K+A) -> [31K-A..31K)
2816 * [1K+A..ES) -> [0..ES-A-1K)
2818 * where A = @fn * @sz, and ES = EEPROM size.
2820 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2823 if (phys_addr < 1024)
2824 return phys_addr + (31 << 10);
2825 if (phys_addr < 1024 + fn)
2826 return 31744 - fn + phys_addr - 1024;
2827 if (phys_addr < EEPROMSIZE)
2828 return phys_addr - 1024 - fn;
2833 * The next two routines implement eeprom read/write from physical addresses.
2835 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2837 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2840 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2841 return vaddr < 0 ? vaddr : 0;
2844 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2846 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2849 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2850 return vaddr < 0 ? vaddr : 0;
2853 #define EEPROM_MAGIC 0x38E2F10C
2855 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2859 struct adapter *adapter = netdev2adap(dev);
2861 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2865 e->magic = EEPROM_MAGIC;
2866 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2867 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2870 memcpy(data, buf + e->offset, e->len);
2875 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2880 u32 aligned_offset, aligned_len, *p;
2881 struct adapter *adapter = netdev2adap(dev);
2883 if (eeprom->magic != EEPROM_MAGIC)
2886 aligned_offset = eeprom->offset & ~3;
2887 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2889 if (adapter->fn > 0) {
2890 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2892 if (aligned_offset < start ||
2893 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2897 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2899 * RMW possibly needed for first or last words.
2901 buf = kmalloc(aligned_len, GFP_KERNEL);
2904 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2905 if (!err && aligned_len > 4)
2906 err = eeprom_rd_phys(adapter,
2907 aligned_offset + aligned_len - 4,
2908 (u32 *)&buf[aligned_len - 4]);
2911 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2915 err = t4_seeprom_wp(adapter, false);
2919 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2920 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2921 aligned_offset += 4;
2925 err = t4_seeprom_wp(adapter, true);
2932 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2935 const struct firmware *fw;
2936 struct adapter *adap = netdev2adap(netdev);
2937 unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1;
2939 ef->data[sizeof(ef->data) - 1] = '\0';
2940 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2944 /* If the adapter has been fully initialized then we'll go ahead and
2945 * try to get the firmware's cooperation in upgrading to the new
2946 * firmware image otherwise we'll try to do the entire job from the
2947 * host ... and we always "force" the operation in this path.
2949 if (adap->flags & FULL_INIT_DONE)
2952 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
2953 release_firmware(fw);
2955 dev_info(adap->pdev_dev, "loaded firmware %s,"
2956 " reload cxgb4 driver\n", ef->data);
2960 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2961 #define BCAST_CRC 0xa0ccc1a6
2963 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2965 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2966 wol->wolopts = netdev2adap(dev)->wol;
2967 memset(&wol->sopass, 0, sizeof(wol->sopass));
2970 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2973 struct port_info *pi = netdev_priv(dev);
2975 if (wol->wolopts & ~WOL_SUPPORTED)
2977 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2978 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2979 if (wol->wolopts & WAKE_BCAST) {
2980 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2983 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2984 ~6ULL, ~0ULL, BCAST_CRC, true);
2986 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2990 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2992 const struct port_info *pi = netdev_priv(dev);
2993 netdev_features_t changed = dev->features ^ features;
2996 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2999 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
3001 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
3003 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
3007 static u32 get_rss_table_size(struct net_device *dev)
3009 const struct port_info *pi = netdev_priv(dev);
3011 return pi->rss_size;
3014 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
3016 const struct port_info *pi = netdev_priv(dev);
3017 unsigned int n = pi->rss_size;
3024 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
3027 struct port_info *pi = netdev_priv(dev);
3029 for (i = 0; i < pi->rss_size; i++)
3031 if (pi->adapter->flags & FULL_INIT_DONE)
3032 return write_rss(pi, pi->rss);
3036 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
3039 const struct port_info *pi = netdev_priv(dev);
3041 switch (info->cmd) {
3042 case ETHTOOL_GRXFH: {
3043 unsigned int v = pi->rss_mode;
3046 switch (info->flow_type) {
3048 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3049 info->data = RXH_IP_SRC | RXH_IP_DST |
3050 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3051 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3052 info->data = RXH_IP_SRC | RXH_IP_DST;
3055 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3056 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3057 info->data = RXH_IP_SRC | RXH_IP_DST |
3058 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3059 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3060 info->data = RXH_IP_SRC | RXH_IP_DST;
3063 case AH_ESP_V4_FLOW:
3065 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3066 info->data = RXH_IP_SRC | RXH_IP_DST;
3069 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3070 info->data = RXH_IP_SRC | RXH_IP_DST |
3071 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3072 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3073 info->data = RXH_IP_SRC | RXH_IP_DST;
3076 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3077 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3078 info->data = RXH_IP_SRC | RXH_IP_DST |
3079 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3080 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3081 info->data = RXH_IP_SRC | RXH_IP_DST;
3084 case AH_ESP_V6_FLOW:
3086 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3087 info->data = RXH_IP_SRC | RXH_IP_DST;
3092 case ETHTOOL_GRXRINGS:
3093 info->data = pi->nqsets;
3099 static const struct ethtool_ops cxgb_ethtool_ops = {
3100 .get_settings = get_settings,
3101 .set_settings = set_settings,
3102 .get_drvinfo = get_drvinfo,
3103 .get_msglevel = get_msglevel,
3104 .set_msglevel = set_msglevel,
3105 .get_ringparam = get_sge_param,
3106 .set_ringparam = set_sge_param,
3107 .get_coalesce = get_coalesce,
3108 .set_coalesce = set_coalesce,
3109 .get_eeprom_len = get_eeprom_len,
3110 .get_eeprom = get_eeprom,
3111 .set_eeprom = set_eeprom,
3112 .get_pauseparam = get_pauseparam,
3113 .set_pauseparam = set_pauseparam,
3114 .get_link = ethtool_op_get_link,
3115 .get_strings = get_strings,
3116 .set_phys_id = identify_port,
3117 .nway_reset = restart_autoneg,
3118 .get_sset_count = get_sset_count,
3119 .get_ethtool_stats = get_stats,
3120 .get_regs_len = get_regs_len,
3121 .get_regs = get_regs,
3124 .get_rxnfc = get_rxnfc,
3125 .get_rxfh_indir_size = get_rss_table_size,
3126 .get_rxfh = get_rss_table,
3127 .set_rxfh = set_rss_table,
3128 .flash_device = set_flash,
3131 static int setup_debugfs(struct adapter *adap)
3133 if (IS_ERR_OR_NULL(adap->debugfs_root))
3136 #ifdef CONFIG_DEBUG_FS
3137 t4_setup_debugfs(adap);
3143 * upper-layer driver support
3147 * Allocate an active-open TID and set it to the supplied value.
3149 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3153 spin_lock_bh(&t->atid_lock);
3155 union aopen_entry *p = t->afree;
3157 atid = (p - t->atid_tab) + t->atid_base;
3162 spin_unlock_bh(&t->atid_lock);
3165 EXPORT_SYMBOL(cxgb4_alloc_atid);
3168 * Release an active-open TID.
3170 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3172 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3174 spin_lock_bh(&t->atid_lock);
3178 spin_unlock_bh(&t->atid_lock);
3180 EXPORT_SYMBOL(cxgb4_free_atid);
3183 * Allocate a server TID and set it to the supplied value.
3185 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3189 spin_lock_bh(&t->stid_lock);
3190 if (family == PF_INET) {
3191 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3192 if (stid < t->nstids)
3193 __set_bit(stid, t->stid_bmap);
3197 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3202 t->stid_tab[stid].data = data;
3203 stid += t->stid_base;
3204 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3205 * This is equivalent to 4 TIDs. With CLIP enabled it
3208 if (family == PF_INET)
3211 t->stids_in_use += 4;
3213 spin_unlock_bh(&t->stid_lock);
3216 EXPORT_SYMBOL(cxgb4_alloc_stid);
3218 /* Allocate a server filter TID and set it to the supplied value.
3220 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3224 spin_lock_bh(&t->stid_lock);
3225 if (family == PF_INET) {
3226 stid = find_next_zero_bit(t->stid_bmap,
3227 t->nstids + t->nsftids, t->nstids);
3228 if (stid < (t->nstids + t->nsftids))
3229 __set_bit(stid, t->stid_bmap);
3236 t->stid_tab[stid].data = data;
3238 stid += t->sftid_base;
3241 spin_unlock_bh(&t->stid_lock);
3244 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3246 /* Release a server TID.
3248 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3250 /* Is it a server filter TID? */
3251 if (t->nsftids && (stid >= t->sftid_base)) {
3252 stid -= t->sftid_base;
3255 stid -= t->stid_base;
3258 spin_lock_bh(&t->stid_lock);
3259 if (family == PF_INET)
3260 __clear_bit(stid, t->stid_bmap);
3262 bitmap_release_region(t->stid_bmap, stid, 2);
3263 t->stid_tab[stid].data = NULL;
3264 if (family == PF_INET)
3267 t->stids_in_use -= 4;
3268 spin_unlock_bh(&t->stid_lock);
3270 EXPORT_SYMBOL(cxgb4_free_stid);
3273 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3275 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3278 struct cpl_tid_release *req;
3280 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3281 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3282 INIT_TP_WR(req, tid);
3283 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3287 * Queue a TID release request and if necessary schedule a work queue to
3290 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3293 void **p = &t->tid_tab[tid];
3294 struct adapter *adap = container_of(t, struct adapter, tids);
3296 spin_lock_bh(&adap->tid_release_lock);
3297 *p = adap->tid_release_head;
3298 /* Low 2 bits encode the Tx channel number */
3299 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3300 if (!adap->tid_release_task_busy) {
3301 adap->tid_release_task_busy = true;
3302 queue_work(adap->workq, &adap->tid_release_task);
3304 spin_unlock_bh(&adap->tid_release_lock);
3308 * Process the list of pending TID release requests.
3310 static void process_tid_release_list(struct work_struct *work)
3312 struct sk_buff *skb;
3313 struct adapter *adap;
3315 adap = container_of(work, struct adapter, tid_release_task);
3317 spin_lock_bh(&adap->tid_release_lock);
3318 while (adap->tid_release_head) {
3319 void **p = adap->tid_release_head;
3320 unsigned int chan = (uintptr_t)p & 3;
3321 p = (void *)p - chan;
3323 adap->tid_release_head = *p;
3325 spin_unlock_bh(&adap->tid_release_lock);
3327 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3329 schedule_timeout_uninterruptible(1);
3331 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3332 t4_ofld_send(adap, skb);
3333 spin_lock_bh(&adap->tid_release_lock);
3335 adap->tid_release_task_busy = false;
3336 spin_unlock_bh(&adap->tid_release_lock);
3340 * Release a TID and inform HW. If we are unable to allocate the release
3341 * message we defer to a work queue.
3343 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3346 struct sk_buff *skb;
3347 struct adapter *adap = container_of(t, struct adapter, tids);
3349 old = t->tid_tab[tid];
3350 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3352 t->tid_tab[tid] = NULL;
3353 mk_tid_release(skb, chan, tid);
3354 t4_ofld_send(adap, skb);
3356 cxgb4_queue_tid_release(t, chan, tid);
3358 atomic_dec(&t->tids_in_use);
3360 EXPORT_SYMBOL(cxgb4_remove_tid);
3363 * Allocate and initialize the TID tables. Returns 0 on success.
3365 static int tid_init(struct tid_info *t)
3368 unsigned int stid_bmap_size;
3369 unsigned int natids = t->natids;
3370 struct adapter *adap = container_of(t, struct adapter, tids);
3372 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3373 size = t->ntids * sizeof(*t->tid_tab) +
3374 natids * sizeof(*t->atid_tab) +
3375 t->nstids * sizeof(*t->stid_tab) +
3376 t->nsftids * sizeof(*t->stid_tab) +
3377 stid_bmap_size * sizeof(long) +
3378 t->nftids * sizeof(*t->ftid_tab) +
3379 t->nsftids * sizeof(*t->ftid_tab);
3381 t->tid_tab = t4_alloc_mem(size);
3385 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3386 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3387 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3388 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3389 spin_lock_init(&t->stid_lock);
3390 spin_lock_init(&t->atid_lock);
3392 t->stids_in_use = 0;
3394 t->atids_in_use = 0;
3395 atomic_set(&t->tids_in_use, 0);
3397 /* Setup the free list for atid_tab and clear the stid bitmap. */
3400 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3401 t->afree = t->atid_tab;
3403 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3404 /* Reserve stid 0 for T4/T5 adapters */
3405 if (!t->stid_base &&
3406 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3407 __set_bit(0, t->stid_bmap);
3412 int cxgb4_clip_get(const struct net_device *dev,
3413 const struct in6_addr *lip)
3415 struct adapter *adap;
3416 struct fw_clip_cmd c;
3418 adap = netdev2adap(dev);
3419 memset(&c, 0, sizeof(c));
3420 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3421 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
3422 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3423 c.ip_hi = *(__be64 *)(lip->s6_addr);
3424 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3425 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3427 EXPORT_SYMBOL(cxgb4_clip_get);
3429 int cxgb4_clip_release(const struct net_device *dev,
3430 const struct in6_addr *lip)
3432 struct adapter *adap;
3433 struct fw_clip_cmd c;
3435 adap = netdev2adap(dev);
3436 memset(&c, 0, sizeof(c));
3437 c.op_to_write = htonl(FW_CMD_OP_V(FW_CLIP_CMD) |
3438 FW_CMD_REQUEST_F | FW_CMD_READ_F);
3439 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3440 c.ip_hi = *(__be64 *)(lip->s6_addr);
3441 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3442 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3444 EXPORT_SYMBOL(cxgb4_clip_release);
3447 * cxgb4_create_server - create an IP server
3449 * @stid: the server TID
3450 * @sip: local IP address to bind server to
3451 * @sport: the server's TCP port
3452 * @queue: queue to direct messages from this server to
3454 * Create an IP server for the given port and address.
3455 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3457 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3458 __be32 sip, __be16 sport, __be16 vlan,
3462 struct sk_buff *skb;
3463 struct adapter *adap;
3464 struct cpl_pass_open_req *req;
3467 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3471 adap = netdev2adap(dev);
3472 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3474 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3475 req->local_port = sport;
3476 req->peer_port = htons(0);
3477 req->local_ip = sip;
3478 req->peer_ip = htonl(0);
3479 chan = rxq_to_chan(&adap->sge, queue);
3480 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3481 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3482 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3483 ret = t4_mgmt_tx(adap, skb);
3484 return net_xmit_eval(ret);
3486 EXPORT_SYMBOL(cxgb4_create_server);
3488 /* cxgb4_create_server6 - create an IPv6 server
3490 * @stid: the server TID
3491 * @sip: local IPv6 address to bind server to
3492 * @sport: the server's TCP port
3493 * @queue: queue to direct messages from this server to
3495 * Create an IPv6 server for the given port and address.
3496 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3498 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3499 const struct in6_addr *sip, __be16 sport,
3503 struct sk_buff *skb;
3504 struct adapter *adap;
3505 struct cpl_pass_open_req6 *req;
3508 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3512 adap = netdev2adap(dev);
3513 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3515 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3516 req->local_port = sport;
3517 req->peer_port = htons(0);
3518 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3519 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3520 req->peer_ip_hi = cpu_to_be64(0);
3521 req->peer_ip_lo = cpu_to_be64(0);
3522 chan = rxq_to_chan(&adap->sge, queue);
3523 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3524 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3525 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3526 ret = t4_mgmt_tx(adap, skb);
3527 return net_xmit_eval(ret);
3529 EXPORT_SYMBOL(cxgb4_create_server6);
3531 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3532 unsigned int queue, bool ipv6)
3534 struct sk_buff *skb;
3535 struct adapter *adap;
3536 struct cpl_close_listsvr_req *req;
3539 adap = netdev2adap(dev);
3541 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3545 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3547 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3548 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3549 LISTSVR_IPV6(0)) | QUEUENO(queue));
3550 ret = t4_mgmt_tx(adap, skb);
3551 return net_xmit_eval(ret);
3553 EXPORT_SYMBOL(cxgb4_remove_server);
3556 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3557 * @mtus: the HW MTU table
3558 * @mtu: the target MTU
3559 * @idx: index of selected entry in the MTU table
3561 * Returns the index and the value in the HW MTU table that is closest to
3562 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3563 * table, in which case that smallest available value is selected.
3565 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3570 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3576 EXPORT_SYMBOL(cxgb4_best_mtu);
3579 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3580 * @mtus: the HW MTU table
3581 * @header_size: Header Size
3582 * @data_size_max: maximum Data Segment Size
3583 * @data_size_align: desired Data Segment Size Alignment (2^N)
3584 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3586 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3587 * MTU Table based solely on a Maximum MTU parameter, we break that
3588 * parameter up into a Header Size and Maximum Data Segment Size, and
3589 * provide a desired Data Segment Size Alignment. If we find an MTU in
3590 * the Hardware MTU Table which will result in a Data Segment Size with
3591 * the requested alignment _and_ that MTU isn't "too far" from the
3592 * closest MTU, then we'll return that rather than the closest MTU.
3594 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3595 unsigned short header_size,
3596 unsigned short data_size_max,
3597 unsigned short data_size_align,
3598 unsigned int *mtu_idxp)
3600 unsigned short max_mtu = header_size + data_size_max;
3601 unsigned short data_size_align_mask = data_size_align - 1;
3602 int mtu_idx, aligned_mtu_idx;
3604 /* Scan the MTU Table till we find an MTU which is larger than our
3605 * Maximum MTU or we reach the end of the table. Along the way,
3606 * record the last MTU found, if any, which will result in a Data
3607 * Segment Length matching the requested alignment.
3609 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3610 unsigned short data_size = mtus[mtu_idx] - header_size;
3612 /* If this MTU minus the Header Size would result in a
3613 * Data Segment Size of the desired alignment, remember it.
3615 if ((data_size & data_size_align_mask) == 0)
3616 aligned_mtu_idx = mtu_idx;
3618 /* If we're not at the end of the Hardware MTU Table and the
3619 * next element is larger than our Maximum MTU, drop out of
3622 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3626 /* If we fell out of the loop because we ran to the end of the table,
3627 * then we just have to use the last [largest] entry.
3629 if (mtu_idx == NMTUS)
3632 /* If we found an MTU which resulted in the requested Data Segment
3633 * Length alignment and that's "not far" from the largest MTU which is
3634 * less than or equal to the maximum MTU, then use that.
3636 if (aligned_mtu_idx >= 0 &&
3637 mtu_idx - aligned_mtu_idx <= 1)
3638 mtu_idx = aligned_mtu_idx;
3640 /* If the caller has passed in an MTU Index pointer, pass the
3641 * MTU Index back. Return the MTU value.
3644 *mtu_idxp = mtu_idx;
3645 return mtus[mtu_idx];
3647 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3650 * cxgb4_port_chan - get the HW channel of a port
3651 * @dev: the net device for the port
3653 * Return the HW Tx channel of the given port.
3655 unsigned int cxgb4_port_chan(const struct net_device *dev)
3657 return netdev2pinfo(dev)->tx_chan;
3659 EXPORT_SYMBOL(cxgb4_port_chan);
3661 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3663 struct adapter *adap = netdev2adap(dev);
3664 u32 v1, v2, lp_count, hp_count;
3666 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3667 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3668 if (is_t4(adap->params.chip)) {
3669 lp_count = G_LP_COUNT(v1);
3670 hp_count = G_HP_COUNT(v1);
3672 lp_count = G_LP_COUNT_T5(v1);
3673 hp_count = G_HP_COUNT_T5(v2);
3675 return lpfifo ? lp_count : hp_count;
3677 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3680 * cxgb4_port_viid - get the VI id of a port
3681 * @dev: the net device for the port
3683 * Return the VI id of the given port.
3685 unsigned int cxgb4_port_viid(const struct net_device *dev)
3687 return netdev2pinfo(dev)->viid;
3689 EXPORT_SYMBOL(cxgb4_port_viid);
3692 * cxgb4_port_idx - get the index of a port
3693 * @dev: the net device for the port
3695 * Return the index of the given port.
3697 unsigned int cxgb4_port_idx(const struct net_device *dev)
3699 return netdev2pinfo(dev)->port_id;
3701 EXPORT_SYMBOL(cxgb4_port_idx);
3703 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3704 struct tp_tcp_stats *v6)
3706 struct adapter *adap = pci_get_drvdata(pdev);
3708 spin_lock(&adap->stats_lock);
3709 t4_tp_get_tcp_stats(adap, v4, v6);
3710 spin_unlock(&adap->stats_lock);
3712 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3714 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3715 const unsigned int *pgsz_order)
3717 struct adapter *adap = netdev2adap(dev);
3719 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3720 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3721 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3722 HPZ3(pgsz_order[3]));
3724 EXPORT_SYMBOL(cxgb4_iscsi_init);
3726 int cxgb4_flush_eq_cache(struct net_device *dev)
3728 struct adapter *adap = netdev2adap(dev);
3731 ret = t4_fwaddrspace_write(adap, adap->mbox,
3732 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3735 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3737 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3739 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3743 spin_lock(&adap->win0_lock);
3744 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3745 sizeof(indices), (__be32 *)&indices,
3747 spin_unlock(&adap->win0_lock);
3749 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3750 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3755 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3758 struct adapter *adap = netdev2adap(dev);
3759 u16 hw_pidx, hw_cidx;
3762 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3766 if (pidx != hw_pidx) {
3769 if (pidx >= hw_pidx)
3770 delta = pidx - hw_pidx;
3772 delta = size - hw_pidx + pidx;
3774 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3775 QID(qid) | PIDX(delta));
3780 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3782 void cxgb4_disable_db_coalescing(struct net_device *dev)
3784 struct adapter *adap;
3786 adap = netdev2adap(dev);
3787 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3790 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3792 void cxgb4_enable_db_coalescing(struct net_device *dev)
3794 struct adapter *adap;
3796 adap = netdev2adap(dev);
3797 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3799 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3801 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3803 struct adapter *adap;
3804 u32 offset, memtype, memaddr;
3805 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
3806 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3809 adap = netdev2adap(dev);
3811 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3813 /* Figure out where the offset lands in the Memory Type/Address scheme.
3814 * This code assumes that the memory is laid out starting at offset 0
3815 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3816 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3817 * MC0, and some have both MC0 and MC1.
3819 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
3820 edc0_size = EDRAM0_SIZE_G(size) << 20;
3821 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
3822 edc1_size = EDRAM1_SIZE_G(size) << 20;
3823 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
3824 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
3826 edc0_end = edc0_size;
3827 edc1_end = edc0_end + edc1_size;
3828 mc0_end = edc1_end + mc0_size;
3830 if (offset < edc0_end) {
3833 } else if (offset < edc1_end) {
3835 memaddr = offset - edc0_end;
3837 if (offset < mc0_end) {
3839 memaddr = offset - edc1_end;
3840 } else if (is_t4(adap->params.chip)) {
3841 /* T4 only has a single memory channel */
3844 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3845 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
3846 mc1_end = mc0_end + mc1_size;
3847 if (offset < mc1_end) {
3849 memaddr = offset - mc0_end;
3851 /* offset beyond the end of any memory */
3857 spin_lock(&adap->win0_lock);
3858 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3859 spin_unlock(&adap->win0_lock);
3863 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3867 EXPORT_SYMBOL(cxgb4_read_tpte);
3869 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3872 struct adapter *adap;
3874 adap = netdev2adap(dev);
3875 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3876 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3878 return ((u64)hi << 32) | (u64)lo;
3880 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3882 static struct pci_driver cxgb4_driver;
3884 static void check_neigh_update(struct neighbour *neigh)
3886 const struct device *parent;
3887 const struct net_device *netdev = neigh->dev;
3889 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3890 netdev = vlan_dev_real_dev(netdev);
3891 parent = netdev->dev.parent;
3892 if (parent && parent->driver == &cxgb4_driver.driver)
3893 t4_l2t_update(dev_get_drvdata(parent), neigh);
3896 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3900 case NETEVENT_NEIGH_UPDATE:
3901 check_neigh_update(data);
3903 case NETEVENT_REDIRECT:
3910 static bool netevent_registered;
3911 static struct notifier_block cxgb4_netevent_nb = {
3912 .notifier_call = netevent_cb
3915 static void drain_db_fifo(struct adapter *adap, int usecs)
3917 u32 v1, v2, lp_count, hp_count;
3920 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3921 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3922 if (is_t4(adap->params.chip)) {
3923 lp_count = G_LP_COUNT(v1);
3924 hp_count = G_HP_COUNT(v1);
3926 lp_count = G_LP_COUNT_T5(v1);
3927 hp_count = G_HP_COUNT_T5(v2);
3930 if (lp_count == 0 && hp_count == 0)
3932 set_current_state(TASK_UNINTERRUPTIBLE);
3933 schedule_timeout(usecs_to_jiffies(usecs));
3937 static void disable_txq_db(struct sge_txq *q)
3939 unsigned long flags;
3941 spin_lock_irqsave(&q->db_lock, flags);
3943 spin_unlock_irqrestore(&q->db_lock, flags);
3946 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3948 spin_lock_irq(&q->db_lock);
3949 if (q->db_pidx_inc) {
3950 /* Make sure that all writes to the TX descriptors
3951 * are committed before we tell HW about them.
3954 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3955 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3959 spin_unlock_irq(&q->db_lock);
3962 static void disable_dbs(struct adapter *adap)
3966 for_each_ethrxq(&adap->sge, i)
3967 disable_txq_db(&adap->sge.ethtxq[i].q);
3968 for_each_ofldrxq(&adap->sge, i)
3969 disable_txq_db(&adap->sge.ofldtxq[i].q);
3970 for_each_port(adap, i)
3971 disable_txq_db(&adap->sge.ctrlq[i].q);
3974 static void enable_dbs(struct adapter *adap)
3978 for_each_ethrxq(&adap->sge, i)
3979 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3980 for_each_ofldrxq(&adap->sge, i)
3981 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3982 for_each_port(adap, i)
3983 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3986 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3988 if (adap->uld_handle[CXGB4_ULD_RDMA])
3989 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3993 static void process_db_full(struct work_struct *work)
3995 struct adapter *adap;
3997 adap = container_of(work, struct adapter, db_full_task);
3999 drain_db_fifo(adap, dbfifo_drain_delay);
4001 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4002 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4003 DBFIFO_HP_INT | DBFIFO_LP_INT,
4004 DBFIFO_HP_INT | DBFIFO_LP_INT);
4007 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4009 u16 hw_pidx, hw_cidx;
4012 spin_lock_irq(&q->db_lock);
4013 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4016 if (q->db_pidx != hw_pidx) {
4019 if (q->db_pidx >= hw_pidx)
4020 delta = q->db_pidx - hw_pidx;
4022 delta = q->size - hw_pidx + q->db_pidx;
4024 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4025 QID(q->cntxt_id) | PIDX(delta));
4030 spin_unlock_irq(&q->db_lock);
4032 CH_WARN(adap, "DB drop recovery failed.\n");
4034 static void recover_all_queues(struct adapter *adap)
4038 for_each_ethrxq(&adap->sge, i)
4039 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4040 for_each_ofldrxq(&adap->sge, i)
4041 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4042 for_each_port(adap, i)
4043 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4046 static void process_db_drop(struct work_struct *work)
4048 struct adapter *adap;
4050 adap = container_of(work, struct adapter, db_drop_task);
4052 if (is_t4(adap->params.chip)) {
4053 drain_db_fifo(adap, dbfifo_drain_delay);
4054 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
4055 drain_db_fifo(adap, dbfifo_drain_delay);
4056 recover_all_queues(adap);
4057 drain_db_fifo(adap, dbfifo_drain_delay);
4059 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4061 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4062 u16 qid = (dropped_db >> 15) & 0x1ffff;
4063 u16 pidx_inc = dropped_db & 0x1fff;
4065 unsigned short udb_density;
4066 unsigned long qpshift;
4070 dev_warn(adap->pdev_dev,
4071 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4073 (dropped_db >> 14) & 1,
4074 (dropped_db >> 13) & 1,
4077 drain_db_fifo(adap, 1);
4079 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4080 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4081 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4082 qpshift = PAGE_SHIFT - ilog2(udb_density);
4083 udb = qid << qpshift;
4085 page = udb / PAGE_SIZE;
4086 udb += (qid - (page * udb_density)) * 128;
4088 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4090 /* Re-enable BAR2 WC */
4091 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4094 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4097 void t4_db_full(struct adapter *adap)
4099 if (is_t4(adap->params.chip)) {
4101 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4102 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4103 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4104 queue_work(adap->workq, &adap->db_full_task);
4108 void t4_db_dropped(struct adapter *adap)
4110 if (is_t4(adap->params.chip)) {
4112 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4114 queue_work(adap->workq, &adap->db_drop_task);
4117 static void uld_attach(struct adapter *adap, unsigned int uld)
4120 struct cxgb4_lld_info lli;
4123 lli.pdev = adap->pdev;
4125 lli.l2t = adap->l2t;
4126 lli.tids = &adap->tids;
4127 lli.ports = adap->port;
4128 lli.vr = &adap->vres;
4129 lli.mtus = adap->params.mtus;
4130 if (uld == CXGB4_ULD_RDMA) {
4131 lli.rxq_ids = adap->sge.rdma_rxq;
4132 lli.ciq_ids = adap->sge.rdma_ciq;
4133 lli.nrxq = adap->sge.rdmaqs;
4134 lli.nciq = adap->sge.rdmaciqs;
4135 } else if (uld == CXGB4_ULD_ISCSI) {
4136 lli.rxq_ids = adap->sge.ofld_rxq;
4137 lli.nrxq = adap->sge.ofldqsets;
4139 lli.ntxq = adap->sge.ofldqsets;
4140 lli.nchan = adap->params.nports;
4141 lli.nports = adap->params.nports;
4142 lli.wr_cred = adap->params.ofldq_wr_cred;
4143 lli.adapter_type = adap->params.chip;
4144 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4145 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4146 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
4147 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4149 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
4150 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4152 lli.filt_mode = adap->params.tp.vlan_pri_map;
4153 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4154 for (i = 0; i < NCHAN; i++)
4156 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4157 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4158 lli.fw_vers = adap->params.fw_vers;
4159 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4160 lli.sge_ingpadboundary = adap->sge.fl_align;
4161 lli.sge_egrstatuspagesize = adap->sge.stat_len;
4162 lli.sge_pktshift = adap->sge.pktshift;
4163 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4164 lli.max_ordird_qp = adap->params.max_ordird_qp;
4165 lli.max_ird_adapter = adap->params.max_ird_adapter;
4166 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4168 handle = ulds[uld].add(&lli);
4169 if (IS_ERR(handle)) {
4170 dev_warn(adap->pdev_dev,
4171 "could not attach to the %s driver, error %ld\n",
4172 uld_str[uld], PTR_ERR(handle));
4176 adap->uld_handle[uld] = handle;
4178 if (!netevent_registered) {
4179 register_netevent_notifier(&cxgb4_netevent_nb);
4180 netevent_registered = true;
4183 if (adap->flags & FULL_INIT_DONE)
4184 ulds[uld].state_change(handle, CXGB4_STATE_UP);
4187 static void attach_ulds(struct adapter *adap)
4191 spin_lock(&adap_rcu_lock);
4192 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4193 spin_unlock(&adap_rcu_lock);
4195 mutex_lock(&uld_mutex);
4196 list_add_tail(&adap->list_node, &adapter_list);
4197 for (i = 0; i < CXGB4_ULD_MAX; i++)
4199 uld_attach(adap, i);
4200 mutex_unlock(&uld_mutex);
4203 static void detach_ulds(struct adapter *adap)
4207 mutex_lock(&uld_mutex);
4208 list_del(&adap->list_node);
4209 for (i = 0; i < CXGB4_ULD_MAX; i++)
4210 if (adap->uld_handle[i]) {
4211 ulds[i].state_change(adap->uld_handle[i],
4212 CXGB4_STATE_DETACH);
4213 adap->uld_handle[i] = NULL;
4215 if (netevent_registered && list_empty(&adapter_list)) {
4216 unregister_netevent_notifier(&cxgb4_netevent_nb);
4217 netevent_registered = false;
4219 mutex_unlock(&uld_mutex);
4221 spin_lock(&adap_rcu_lock);
4222 list_del_rcu(&adap->rcu_node);
4223 spin_unlock(&adap_rcu_lock);
4226 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4230 mutex_lock(&uld_mutex);
4231 for (i = 0; i < CXGB4_ULD_MAX; i++)
4232 if (adap->uld_handle[i])
4233 ulds[i].state_change(adap->uld_handle[i], new_state);
4234 mutex_unlock(&uld_mutex);
4238 * cxgb4_register_uld - register an upper-layer driver
4239 * @type: the ULD type
4240 * @p: the ULD methods
4242 * Registers an upper-layer driver with this driver and notifies the ULD
4243 * about any presently available devices that support its type. Returns
4244 * %-EBUSY if a ULD of the same type is already registered.
4246 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4249 struct adapter *adap;
4251 if (type >= CXGB4_ULD_MAX)
4253 mutex_lock(&uld_mutex);
4254 if (ulds[type].add) {
4259 list_for_each_entry(adap, &adapter_list, list_node)
4260 uld_attach(adap, type);
4261 out: mutex_unlock(&uld_mutex);
4264 EXPORT_SYMBOL(cxgb4_register_uld);
4267 * cxgb4_unregister_uld - unregister an upper-layer driver
4268 * @type: the ULD type
4270 * Unregisters an existing upper-layer driver.
4272 int cxgb4_unregister_uld(enum cxgb4_uld type)
4274 struct adapter *adap;
4276 if (type >= CXGB4_ULD_MAX)
4278 mutex_lock(&uld_mutex);
4279 list_for_each_entry(adap, &adapter_list, list_node)
4280 adap->uld_handle[type] = NULL;
4281 ulds[type].add = NULL;
4282 mutex_unlock(&uld_mutex);
4285 EXPORT_SYMBOL(cxgb4_unregister_uld);
4287 /* Check if netdev on which event is occured belongs to us or not. Return
4288 * success (true) if it belongs otherwise failure (false).
4289 * Called with rcu_read_lock() held.
4291 #if IS_ENABLED(CONFIG_IPV6)
4292 static bool cxgb4_netdev(const struct net_device *netdev)
4294 struct adapter *adap;
4297 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4298 for (i = 0; i < MAX_NPORTS; i++)
4299 if (adap->port[i] == netdev)
4304 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4305 unsigned long event)
4307 int ret = NOTIFY_DONE;
4310 if (cxgb4_netdev(event_dev)) {
4313 ret = cxgb4_clip_get(event_dev,
4314 (const struct in6_addr *)ifa->addr.s6_addr);
4322 cxgb4_clip_release(event_dev,
4323 (const struct in6_addr *)ifa->addr.s6_addr);
4334 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4335 unsigned long event, void *data)
4337 struct inet6_ifaddr *ifa = data;
4338 struct net_device *event_dev;
4339 int ret = NOTIFY_DONE;
4340 struct bonding *bond = netdev_priv(ifa->idev->dev);
4341 struct list_head *iter;
4342 struct slave *slave;
4343 struct pci_dev *first_pdev = NULL;
4345 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4346 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4347 ret = clip_add(event_dev, ifa, event);
4348 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4349 /* It is possible that two different adapters are bonded in one
4350 * bond. We need to find such different adapters and add clip
4351 * in all of them only once.
4353 bond_for_each_slave(bond, slave, iter) {
4355 ret = clip_add(slave->dev, ifa, event);
4356 /* If clip_add is success then only initialize
4357 * first_pdev since it means it is our device
4359 if (ret == NOTIFY_OK)
4360 first_pdev = to_pci_dev(
4361 slave->dev->dev.parent);
4362 } else if (first_pdev !=
4363 to_pci_dev(slave->dev->dev.parent))
4364 ret = clip_add(slave->dev, ifa, event);
4367 ret = clip_add(ifa->idev->dev, ifa, event);
4372 static struct notifier_block cxgb4_inet6addr_notifier = {
4373 .notifier_call = cxgb4_inet6addr_handler
4376 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4377 * a physical device.
4378 * The physical device reference is needed to send the actul CLIP command.
4380 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4382 struct inet6_dev *idev = NULL;
4383 struct inet6_ifaddr *ifa;
4386 idev = __in6_dev_get(root_dev);
4390 read_lock_bh(&idev->lock);
4391 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4392 ret = cxgb4_clip_get(dev,
4393 (const struct in6_addr *)ifa->addr.s6_addr);
4397 read_unlock_bh(&idev->lock);
4402 static int update_root_dev_clip(struct net_device *dev)
4404 struct net_device *root_dev = NULL;
4407 /* First populate the real net device's IPv6 addresses */
4408 ret = update_dev_clip(dev, dev);
4412 /* Parse all bond and vlan devices layered on top of the physical dev */
4413 root_dev = netdev_master_upper_dev_get_rcu(dev);
4415 ret = update_dev_clip(root_dev, dev);
4420 for (i = 0; i < VLAN_N_VID; i++) {
4421 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4425 ret = update_dev_clip(root_dev, dev);
4432 static void update_clip(const struct adapter *adap)
4435 struct net_device *dev;
4440 for (i = 0; i < MAX_NPORTS; i++) {
4441 dev = adap->port[i];
4445 ret = update_root_dev_clip(dev);
4452 #endif /* IS_ENABLED(CONFIG_IPV6) */
4455 * cxgb_up - enable the adapter
4456 * @adap: adapter being enabled
4458 * Called when the first port is enabled, this function performs the
4459 * actions necessary to make an adapter operational, such as completing
4460 * the initialization of HW modules, and enabling interrupts.
4462 * Must be called with the rtnl lock held.
4464 static int cxgb_up(struct adapter *adap)
4468 err = setup_sge_queues(adap);
4471 err = setup_rss(adap);
4475 if (adap->flags & USING_MSIX) {
4476 name_msix_vecs(adap);
4477 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4478 adap->msix_info[0].desc, adap);
4482 err = request_msix_queue_irqs(adap);
4484 free_irq(adap->msix_info[0].vec, adap);
4488 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4489 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4490 adap->port[0]->name, adap);
4496 t4_intr_enable(adap);
4497 adap->flags |= FULL_INIT_DONE;
4498 notify_ulds(adap, CXGB4_STATE_UP);
4499 #if IS_ENABLED(CONFIG_IPV6)
4505 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4507 t4_free_sge_resources(adap);
4511 static void cxgb_down(struct adapter *adapter)
4513 t4_intr_disable(adapter);
4514 cancel_work_sync(&adapter->tid_release_task);
4515 cancel_work_sync(&adapter->db_full_task);
4516 cancel_work_sync(&adapter->db_drop_task);
4517 adapter->tid_release_task_busy = false;
4518 adapter->tid_release_head = NULL;
4520 if (adapter->flags & USING_MSIX) {
4521 free_msix_queue_irqs(adapter);
4522 free_irq(adapter->msix_info[0].vec, adapter);
4524 free_irq(adapter->pdev->irq, adapter);
4525 quiesce_rx(adapter);
4526 t4_sge_stop(adapter);
4527 t4_free_sge_resources(adapter);
4528 adapter->flags &= ~FULL_INIT_DONE;
4532 * net_device operations
4534 static int cxgb_open(struct net_device *dev)
4537 struct port_info *pi = netdev_priv(dev);
4538 struct adapter *adapter = pi->adapter;
4540 netif_carrier_off(dev);
4542 if (!(adapter->flags & FULL_INIT_DONE)) {
4543 err = cxgb_up(adapter);
4548 err = link_start(dev);
4550 netif_tx_start_all_queues(dev);
4554 static int cxgb_close(struct net_device *dev)
4556 struct port_info *pi = netdev_priv(dev);
4557 struct adapter *adapter = pi->adapter;
4559 netif_tx_stop_all_queues(dev);
4560 netif_carrier_off(dev);
4561 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4564 /* Return an error number if the indicated filter isn't writable ...
4566 static int writable_filter(struct filter_entry *f)
4576 /* Delete the filter at the specified index (if valid). The checks for all
4577 * the common problems with doing this like the filter being locked, currently
4578 * pending in another operation, etc.
4580 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4582 struct filter_entry *f;
4585 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4588 f = &adapter->tids.ftid_tab[fidx];
4589 ret = writable_filter(f);
4593 return del_filter_wr(adapter, fidx);
4598 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4599 __be32 sip, __be16 sport, __be16 vlan,
4600 unsigned int queue, unsigned char port, unsigned char mask)
4603 struct filter_entry *f;
4604 struct adapter *adap;
4608 adap = netdev2adap(dev);
4610 /* Adjust stid to correct filter index */
4611 stid -= adap->tids.sftid_base;
4612 stid += adap->tids.nftids;
4614 /* Check to make sure the filter requested is writable ...
4616 f = &adap->tids.ftid_tab[stid];
4617 ret = writable_filter(f);
4621 /* Clear out any old resources being used by the filter before
4622 * we start constructing the new filter.
4625 clear_filter(adap, f);
4627 /* Clear out filter specifications */
4628 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4629 f->fs.val.lport = cpu_to_be16(sport);
4630 f->fs.mask.lport = ~0;
4632 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4633 for (i = 0; i < 4; i++) {
4634 f->fs.val.lip[i] = val[i];
4635 f->fs.mask.lip[i] = ~0;
4637 if (adap->params.tp.vlan_pri_map & F_PORT) {
4638 f->fs.val.iport = port;
4639 f->fs.mask.iport = mask;
4643 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4644 f->fs.val.proto = IPPROTO_TCP;
4645 f->fs.mask.proto = ~0;
4650 /* Mark filter as locked */
4654 ret = set_filter_wr(adap, stid);
4656 clear_filter(adap, f);
4662 EXPORT_SYMBOL(cxgb4_create_server_filter);
4664 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4665 unsigned int queue, bool ipv6)
4668 struct filter_entry *f;
4669 struct adapter *adap;
4671 adap = netdev2adap(dev);
4673 /* Adjust stid to correct filter index */
4674 stid -= adap->tids.sftid_base;
4675 stid += adap->tids.nftids;
4677 f = &adap->tids.ftid_tab[stid];
4678 /* Unlock the filter */
4681 ret = delete_filter(adap, stid);
4687 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4689 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4690 struct rtnl_link_stats64 *ns)
4692 struct port_stats stats;
4693 struct port_info *p = netdev_priv(dev);
4694 struct adapter *adapter = p->adapter;
4696 /* Block retrieving statistics during EEH error
4697 * recovery. Otherwise, the recovery might fail
4698 * and the PCI device will be removed permanently
4700 spin_lock(&adapter->stats_lock);
4701 if (!netif_device_present(dev)) {
4702 spin_unlock(&adapter->stats_lock);
4705 t4_get_port_stats(adapter, p->tx_chan, &stats);
4706 spin_unlock(&adapter->stats_lock);
4708 ns->tx_bytes = stats.tx_octets;
4709 ns->tx_packets = stats.tx_frames;
4710 ns->rx_bytes = stats.rx_octets;
4711 ns->rx_packets = stats.rx_frames;
4712 ns->multicast = stats.rx_mcast_frames;
4714 /* detailed rx_errors */
4715 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4717 ns->rx_over_errors = 0;
4718 ns->rx_crc_errors = stats.rx_fcs_err;
4719 ns->rx_frame_errors = stats.rx_symbol_err;
4720 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4721 stats.rx_ovflow2 + stats.rx_ovflow3 +
4722 stats.rx_trunc0 + stats.rx_trunc1 +
4723 stats.rx_trunc2 + stats.rx_trunc3;
4724 ns->rx_missed_errors = 0;
4726 /* detailed tx_errors */
4727 ns->tx_aborted_errors = 0;
4728 ns->tx_carrier_errors = 0;
4729 ns->tx_fifo_errors = 0;
4730 ns->tx_heartbeat_errors = 0;
4731 ns->tx_window_errors = 0;
4733 ns->tx_errors = stats.tx_error_frames;
4734 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4735 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4739 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4742 int ret = 0, prtad, devad;
4743 struct port_info *pi = netdev_priv(dev);
4744 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4748 if (pi->mdio_addr < 0)
4750 data->phy_id = pi->mdio_addr;
4754 if (mdio_phy_id_is_c45(data->phy_id)) {
4755 prtad = mdio_phy_id_prtad(data->phy_id);
4756 devad = mdio_phy_id_devad(data->phy_id);
4757 } else if (data->phy_id < 32) {
4758 prtad = data->phy_id;
4760 data->reg_num &= 0x1f;
4764 mbox = pi->adapter->fn;
4765 if (cmd == SIOCGMIIREG)
4766 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4767 data->reg_num, &data->val_out);
4769 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4770 data->reg_num, data->val_in);
4778 static void cxgb_set_rxmode(struct net_device *dev)
4780 /* unfortunately we can't return errors to the stack */
4781 set_rxmode(dev, -1, false);
4784 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4787 struct port_info *pi = netdev_priv(dev);
4789 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4791 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4798 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4801 struct sockaddr *addr = p;
4802 struct port_info *pi = netdev_priv(dev);
4804 if (!is_valid_ether_addr(addr->sa_data))
4805 return -EADDRNOTAVAIL;
4807 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4808 pi->xact_addr_filt, addr->sa_data, true, true);
4812 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4813 pi->xact_addr_filt = ret;
4817 #ifdef CONFIG_NET_POLL_CONTROLLER
4818 static void cxgb_netpoll(struct net_device *dev)
4820 struct port_info *pi = netdev_priv(dev);
4821 struct adapter *adap = pi->adapter;
4823 if (adap->flags & USING_MSIX) {
4825 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4827 for (i = pi->nqsets; i; i--, rx++)
4828 t4_sge_intr_msix(0, &rx->rspq);
4830 t4_intr_handler(adap)(0, adap);
4834 static const struct net_device_ops cxgb4_netdev_ops = {
4835 .ndo_open = cxgb_open,
4836 .ndo_stop = cxgb_close,
4837 .ndo_start_xmit = t4_eth_xmit,
4838 .ndo_select_queue = cxgb_select_queue,
4839 .ndo_get_stats64 = cxgb_get_stats,
4840 .ndo_set_rx_mode = cxgb_set_rxmode,
4841 .ndo_set_mac_address = cxgb_set_mac_addr,
4842 .ndo_set_features = cxgb_set_features,
4843 .ndo_validate_addr = eth_validate_addr,
4844 .ndo_do_ioctl = cxgb_ioctl,
4845 .ndo_change_mtu = cxgb_change_mtu,
4846 #ifdef CONFIG_NET_POLL_CONTROLLER
4847 .ndo_poll_controller = cxgb_netpoll,
4851 void t4_fatal_err(struct adapter *adap)
4853 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4854 t4_intr_disable(adap);
4855 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4858 /* Return the specified PCI-E Configuration Space register from our Physical
4859 * Function. We try first via a Firmware LDST Command since we prefer to let
4860 * the firmware own all of these registers, but if that fails we go for it
4861 * directly ourselves.
4863 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4865 struct fw_ldst_cmd ldst_cmd;
4869 /* Construct and send the Firmware LDST Command to retrieve the
4870 * specified PCI-E Configuration Space register.
4872 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4873 ldst_cmd.op_to_addrspace =
4874 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
4877 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4878 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4879 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4880 ldst_cmd.u.pcie.ctrl_to_fn =
4881 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4882 ldst_cmd.u.pcie.r = reg;
4883 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4886 /* If the LDST Command suucceeded, exctract the returned register
4887 * value. Otherwise read it directly ourself.
4890 val = ntohl(ldst_cmd.u.pcie.data[0]);
4892 t4_hw_pci_read_cfg4(adap, reg, &val);
4897 static void setup_memwin(struct adapter *adap)
4899 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4901 if (is_t4(adap->params.chip)) {
4904 /* Truncation intentional: we only read the bottom 32-bits of
4905 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4906 * mechanism to read BAR0 instead of using
4907 * pci_resource_start() because we could be operating from
4908 * within a Virtual Machine which is trapping our accesses to
4909 * our Configuration Space and we need to set up the PCI-E
4910 * Memory Window decoders with the actual addresses which will
4911 * be coming across the PCI-E link.
4913 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4914 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4915 adap->t4_bar0 = bar0;
4917 mem_win0_base = bar0 + MEMWIN0_BASE;
4918 mem_win1_base = bar0 + MEMWIN1_BASE;
4919 mem_win2_base = bar0 + MEMWIN2_BASE;
4920 mem_win2_aperture = MEMWIN2_APERTURE;
4922 /* For T5, only relative offset inside the PCIe BAR is passed */
4923 mem_win0_base = MEMWIN0_BASE;
4924 mem_win1_base = MEMWIN1_BASE;
4925 mem_win2_base = MEMWIN2_BASE_T5;
4926 mem_win2_aperture = MEMWIN2_APERTURE_T5;
4928 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4929 mem_win0_base | BIR(0) |
4930 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4931 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4932 mem_win1_base | BIR(0) |
4933 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4934 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4935 mem_win2_base | BIR(0) |
4936 WINDOW(ilog2(mem_win2_aperture) - 10));
4937 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
4940 static void setup_memwin_rdma(struct adapter *adap)
4942 if (adap->vres.ocq.size) {
4946 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4947 start &= PCI_BASE_ADDRESS_MEM_MASK;
4948 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4949 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4951 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4952 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4954 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4955 adap->vres.ocq.start);
4957 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4961 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4966 /* get device capabilities */
4967 memset(c, 0, sizeof(*c));
4968 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4969 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4970 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4971 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4975 /* select capabilities we'll be using */
4976 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4978 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4980 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4981 } else if (vf_acls) {
4982 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4985 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4986 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4987 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4991 ret = t4_config_glbl_rss(adap, adap->fn,
4992 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4993 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4994 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4998 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4999 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
5005 /* tweak some settings */
5006 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5007 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5008 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5009 v = t4_read_reg(adap, TP_PIO_DATA);
5010 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
5012 /* first 4 Tx modulation queues point to consecutive Tx channels */
5013 adap->params.tp.tx_modq_map = 0xE4;
5014 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5015 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5017 /* associate each Tx modulation queue with consecutive Tx channels */
5019 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5020 &v, 1, A_TP_TX_SCHED_HDR);
5021 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5022 &v, 1, A_TP_TX_SCHED_FIFO);
5023 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5024 &v, 1, A_TP_TX_SCHED_PCMD);
5026 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5027 if (is_offload(adap)) {
5028 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5029 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5030 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5031 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5032 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5033 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5034 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5035 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5036 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5037 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5040 /* get basic stuff going */
5041 return t4_early_init(adap, adap->fn);
5045 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5047 #define MAX_ATIDS 8192U
5050 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5052 * If the firmware we're dealing with has Configuration File support, then
5053 * we use that to perform all configuration
5057 * Tweak configuration based on module parameters, etc. Most of these have
5058 * defaults assigned to them by Firmware Configuration Files (if we're using
5059 * them) but need to be explicitly set if we're using hard-coded
5060 * initialization. But even in the case of using Firmware Configuration
5061 * Files, we'd like to expose the ability to change these via module
5062 * parameters so these are essentially common tweaks/settings for
5063 * Configuration Files and hard-coded initialization ...
5065 static int adap_init0_tweaks(struct adapter *adapter)
5068 * Fix up various Host-Dependent Parameters like Page Size, Cache
5069 * Line Size, etc. The firmware default is for a 4KB Page Size and
5070 * 64B Cache Line Size ...
5072 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
5075 * Process module parameters which affect early initialization.
5077 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5078 dev_err(&adapter->pdev->dev,
5079 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5083 t4_set_reg_field(adapter, SGE_CONTROL,
5085 PKTSHIFT(rx_dma_offset));
5088 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5089 * adds the pseudo header itself.
5091 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5092 CSUM_HAS_PSEUDO_HDR, 0);
5098 * Attempt to initialize the adapter via a Firmware Configuration File.
5100 static int adap_init0_config(struct adapter *adapter, int reset)
5102 struct fw_caps_config_cmd caps_cmd;
5103 const struct firmware *cf;
5104 unsigned long mtype = 0, maddr = 0;
5105 u32 finiver, finicsum, cfcsum;
5107 int config_issued = 0;
5108 char *fw_config_file, fw_config_file_path[256];
5109 char *config_name = NULL;
5112 * Reset device if necessary.
5115 ret = t4_fw_reset(adapter, adapter->mbox,
5116 PIORSTMODE | PIORST);
5122 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5123 * then use that. Otherwise, use the configuration file stored
5124 * in the adapter flash ...
5126 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
5128 fw_config_file = FW4_CFNAME;
5131 fw_config_file = FW5_CFNAME;
5134 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5135 adapter->pdev->device);
5140 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5142 config_name = "On FLASH";
5143 mtype = FW_MEMTYPE_CF_FLASH;
5144 maddr = t4_flash_cfg_addr(adapter);
5146 u32 params[7], val[7];
5148 sprintf(fw_config_file_path,
5149 "/lib/firmware/%s", fw_config_file);
5150 config_name = fw_config_file_path;
5152 if (cf->size >= FLASH_CFG_MAX_SIZE)
5155 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5156 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5157 ret = t4_query_params(adapter, adapter->mbox,
5158 adapter->fn, 0, 1, params, val);
5161 * For t4_memory_rw() below addresses and
5162 * sizes have to be in terms of multiples of 4
5163 * bytes. So, if the Configuration File isn't
5164 * a multiple of 4 bytes in length we'll have
5165 * to write that out separately since we can't
5166 * guarantee that the bytes following the
5167 * residual byte in the buffer returned by
5168 * request_firmware() are zeroed out ...
5170 size_t resid = cf->size & 0x3;
5171 size_t size = cf->size & ~0x3;
5172 __be32 *data = (__be32 *)cf->data;
5174 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5175 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5177 spin_lock(&adapter->win0_lock);
5178 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5179 size, data, T4_MEMORY_WRITE);
5180 if (ret == 0 && resid != 0) {
5187 last.word = data[size >> 2];
5188 for (i = resid; i < 4; i++)
5190 ret = t4_memory_rw(adapter, 0, mtype,
5195 spin_unlock(&adapter->win0_lock);
5199 release_firmware(cf);
5205 * Issue a Capability Configuration command to the firmware to get it
5206 * to parse the Configuration File. We don't use t4_fw_config_file()
5207 * because we want the ability to modify various features after we've
5208 * processed the configuration file ...
5210 memset(&caps_cmd, 0, sizeof(caps_cmd));
5211 caps_cmd.op_to_write =
5212 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5215 caps_cmd.cfvalid_to_len16 =
5216 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5217 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5218 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5219 FW_LEN16(caps_cmd));
5220 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5223 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5224 * Configuration File in FLASH), our last gasp effort is to use the
5225 * Firmware Configuration File which is embedded in the firmware. A
5226 * very few early versions of the firmware didn't have one embedded
5227 * but we can ignore those.
5229 if (ret == -ENOENT) {
5230 memset(&caps_cmd, 0, sizeof(caps_cmd));
5231 caps_cmd.op_to_write =
5232 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5235 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5236 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5237 sizeof(caps_cmd), &caps_cmd);
5238 config_name = "Firmware Default";
5245 finiver = ntohl(caps_cmd.finiver);
5246 finicsum = ntohl(caps_cmd.finicsum);
5247 cfcsum = ntohl(caps_cmd.cfcsum);
5248 if (finicsum != cfcsum)
5249 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5250 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5254 * And now tell the firmware to use the configuration we just loaded.
5256 caps_cmd.op_to_write =
5257 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5260 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5261 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5267 * Tweak configuration based on system architecture, module
5270 ret = adap_init0_tweaks(adapter);
5275 * And finally tell the firmware to initialize itself using the
5276 * parameters from the Configuration File.
5278 ret = t4_fw_initialize(adapter, adapter->mbox);
5283 * Return successfully and note that we're operating with parameters
5284 * not supplied by the driver, rather than from hard-wired
5285 * initialization constants burried in the driver.
5287 adapter->flags |= USING_SOFT_PARAMS;
5288 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5289 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5290 config_name, finiver, cfcsum);
5294 * Something bad happened. Return the error ... (If the "error"
5295 * is that there's no Configuration File on the adapter we don't
5296 * want to issue a warning since this is fairly common.)
5299 if (config_issued && ret != -ENOENT)
5300 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5306 * Attempt to initialize the adapter via hard-coded, driver supplied
5309 static int adap_init0_no_config(struct adapter *adapter, int reset)
5311 struct sge *s = &adapter->sge;
5312 struct fw_caps_config_cmd caps_cmd;
5317 * Reset device if necessary
5320 ret = t4_fw_reset(adapter, adapter->mbox,
5321 PIORSTMODE | PIORST);
5327 * Get device capabilities and select which we'll be using.
5329 memset(&caps_cmd, 0, sizeof(caps_cmd));
5330 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5331 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5332 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5333 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5338 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5340 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5342 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5343 } else if (vf_acls) {
5344 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5347 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5348 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5349 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5355 * Tweak configuration based on system architecture, module
5358 ret = adap_init0_tweaks(adapter);
5363 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5364 * mode which maps each Virtual Interface to its own section of
5365 * the RSS Table and we turn on all map and hash enables ...
5367 adapter->flags |= RSS_TNLALLLOOKUP;
5368 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5369 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5370 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5371 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5372 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5373 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5378 * Set up our own fundamental resource provisioning ...
5380 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5381 PFRES_NEQ, PFRES_NETHCTRL,
5382 PFRES_NIQFLINT, PFRES_NIQ,
5383 PFRES_TC, PFRES_NVI,
5384 FW_PFVF_CMD_CMASK_MASK,
5385 pfvfres_pmask(adapter, adapter->fn, 0),
5387 PFRES_R_CAPS, PFRES_WX_CAPS);
5392 * Perform low level SGE initialization. We need to do this before we
5393 * send the firmware the INITIALIZE command because that will cause
5394 * any other PF Drivers which are waiting for the Master
5395 * Initialization to proceed forward.
5397 for (i = 0; i < SGE_NTIMERS - 1; i++)
5398 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5399 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5400 s->counter_val[0] = 1;
5401 for (i = 1; i < SGE_NCOUNTERS; i++)
5402 s->counter_val[i] = min(intr_cnt[i - 1],
5403 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5404 t4_sge_init(adapter);
5406 #ifdef CONFIG_PCI_IOV
5408 * Provision resource limits for Virtual Functions. We currently
5409 * grant them all the same static resource limits except for the Port
5410 * Access Rights Mask which we're assigning based on the PF. All of
5411 * the static provisioning stuff for both the PF and VF really needs
5412 * to be managed in a persistent manner for each device which the
5413 * firmware controls.
5418 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5419 if (num_vf[pf] <= 0)
5422 /* VF numbering starts at 1! */
5423 for (vf = 1; vf <= num_vf[pf]; vf++) {
5424 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5426 VFRES_NEQ, VFRES_NETHCTRL,
5427 VFRES_NIQFLINT, VFRES_NIQ,
5428 VFRES_TC, VFRES_NVI,
5429 FW_PFVF_CMD_CMASK_MASK,
5433 VFRES_R_CAPS, VFRES_WX_CAPS);
5435 dev_warn(adapter->pdev_dev,
5437 "provision pf/vf=%d/%d; "
5438 "err=%d\n", pf, vf, ret);
5445 * Set up the default filter mode. Later we'll want to implement this
5446 * via a firmware command, etc. ... This needs to be done before the
5447 * firmare initialization command ... If the selected set of fields
5448 * isn't equal to the default value, we'll need to make sure that the
5449 * field selections will fit in the 36-bit budget.
5451 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5454 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5455 switch (tp_vlan_pri_map & (1 << j)) {
5457 /* compressed filter field not enabled */
5477 case ETHERTYPE_MASK:
5483 case MPSHITTYPE_MASK:
5486 case FRAGMENTATION_MASK:
5492 dev_err(adapter->pdev_dev,
5493 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5494 " using %#x\n", tp_vlan_pri_map, bits,
5495 TP_VLAN_PRI_MAP_DEFAULT);
5496 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5499 v = tp_vlan_pri_map;
5500 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5501 &v, 1, TP_VLAN_PRI_MAP);
5504 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5505 * to support any of the compressed filter fields above. Newer
5506 * versions of the firmware do this automatically but it doesn't hurt
5507 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5508 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5509 * since the firmware automatically turns this on and off when we have
5510 * a non-zero number of filters active (since it does have a
5511 * performance impact).
5513 if (tp_vlan_pri_map)
5514 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5515 FIVETUPLELOOKUP_MASK,
5516 FIVETUPLELOOKUP_MASK);
5519 * Tweak some settings.
5521 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5522 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5523 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5524 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5527 * Get basic stuff going by issuing the Firmware Initialize command.
5528 * Note that this _must_ be after all PFVF commands ...
5530 ret = t4_fw_initialize(adapter, adapter->mbox);
5535 * Return successfully!
5537 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5538 "driver parameters\n");
5542 * Something bad happened. Return the error ...
5548 static struct fw_info fw_info_array[] = {
5551 .fs_name = FW4_CFNAME,
5552 .fw_mod_name = FW4_FNAME,
5554 .chip = FW_HDR_CHIP_T4,
5555 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5556 .intfver_nic = FW_INTFVER(T4, NIC),
5557 .intfver_vnic = FW_INTFVER(T4, VNIC),
5558 .intfver_ri = FW_INTFVER(T4, RI),
5559 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5560 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5564 .fs_name = FW5_CFNAME,
5565 .fw_mod_name = FW5_FNAME,
5567 .chip = FW_HDR_CHIP_T5,
5568 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5569 .intfver_nic = FW_INTFVER(T5, NIC),
5570 .intfver_vnic = FW_INTFVER(T5, VNIC),
5571 .intfver_ri = FW_INTFVER(T5, RI),
5572 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5573 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5578 static struct fw_info *find_fw_info(int chip)
5582 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5583 if (fw_info_array[i].chip == chip)
5584 return &fw_info_array[i];
5590 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5592 static int adap_init0(struct adapter *adap)
5596 enum dev_state state;
5597 u32 params[7], val[7];
5598 struct fw_caps_config_cmd caps_cmd;
5602 * Contact FW, advertising Master capability (and potentially forcing
5603 * ourselves as the Master PF if our module parameter force_init is
5606 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5607 force_init ? MASTER_MUST : MASTER_MAY,
5610 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5614 if (ret == adap->mbox)
5615 adap->flags |= MASTER_PF;
5616 if (force_init && state == DEV_STATE_INIT)
5617 state = DEV_STATE_UNINIT;
5620 * If we're the Master PF Driver and the device is uninitialized,
5621 * then let's consider upgrading the firmware ... (We always want
5622 * to check the firmware version number in order to A. get it for
5623 * later reporting and B. to warn if the currently loaded firmware
5624 * is excessively mismatched relative to the driver.)
5626 t4_get_fw_version(adap, &adap->params.fw_vers);
5627 t4_get_tp_version(adap, &adap->params.tp_vers);
5628 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5629 struct fw_info *fw_info;
5630 struct fw_hdr *card_fw;
5631 const struct firmware *fw;
5632 const u8 *fw_data = NULL;
5633 unsigned int fw_size = 0;
5635 /* This is the firmware whose headers the driver was compiled
5638 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5639 if (fw_info == NULL) {
5640 dev_err(adap->pdev_dev,
5641 "unable to get firmware info for chip %d.\n",
5642 CHELSIO_CHIP_VERSION(adap->params.chip));
5646 /* allocate memory to read the header of the firmware on the
5649 card_fw = t4_alloc_mem(sizeof(*card_fw));
5651 /* Get FW from from /lib/firmware/ */
5652 ret = request_firmware(&fw, fw_info->fw_mod_name,
5655 dev_err(adap->pdev_dev,
5656 "unable to load firmware image %s, error %d\n",
5657 fw_info->fw_mod_name, ret);
5663 /* upgrade FW logic */
5664 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5669 release_firmware(fw);
5670 t4_free_mem(card_fw);
5677 * Grab VPD parameters. This should be done after we establish a
5678 * connection to the firmware since some of the VPD parameters
5679 * (notably the Core Clock frequency) are retrieved via requests to
5680 * the firmware. On the other hand, we need these fairly early on
5681 * so we do this right after getting ahold of the firmware.
5683 ret = get_vpd_params(adap, &adap->params.vpd);
5688 * Find out what ports are available to us. Note that we need to do
5689 * this before calling adap_init0_no_config() since it needs nports
5693 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5694 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5695 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5699 adap->params.nports = hweight32(port_vec);
5700 adap->params.portvec = port_vec;
5703 * If the firmware is initialized already (and we're not forcing a
5704 * master initialization), note that we're living with existing
5705 * adapter parameters. Otherwise, it's time to try initializing the
5708 if (state == DEV_STATE_INIT) {
5709 dev_info(adap->pdev_dev, "Coming up as %s: "\
5710 "Adapter already initialized\n",
5711 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5712 adap->flags |= USING_SOFT_PARAMS;
5714 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5715 "Initializing adapter\n");
5717 * If the firmware doesn't support Configuration
5718 * Files warn user and exit,
5721 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5722 "configuration file.\n");
5724 ret = adap_init0_no_config(adap, reset);
5727 * Find out whether we're dealing with a version of
5728 * the firmware which has configuration file support.
5730 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5731 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5732 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5736 * If the firmware doesn't support Configuration
5737 * Files, use the old Driver-based, hard-wired
5738 * initialization. Otherwise, try using the
5739 * Configuration File support and fall back to the
5740 * Driver-based initialization if there's no
5741 * Configuration File found.
5744 ret = adap_init0_no_config(adap, reset);
5747 * The firmware provides us with a memory
5748 * buffer where we can load a Configuration
5749 * File from the host if we want to override
5750 * the Configuration File in flash.
5753 ret = adap_init0_config(adap, reset);
5754 if (ret == -ENOENT) {
5755 dev_info(adap->pdev_dev,
5756 "No Configuration File present "
5757 "on adapter. Using hard-wired "
5758 "configuration parameters.\n");
5760 ret = adap_init0_no_config(adap, reset);
5765 dev_err(adap->pdev_dev,
5766 "could not initialize adapter, error %d\n",
5773 * If we're living with non-hard-coded parameters (either from a
5774 * Firmware Configuration File or values programmed by a different PF
5775 * Driver), give the SGE code a chance to pull in anything that it
5776 * needs ... Note that this must be called after we retrieve our VPD
5777 * parameters in order to know how to convert core ticks to seconds.
5779 if (adap->flags & USING_SOFT_PARAMS) {
5780 ret = t4_sge_init(adap);
5785 if (is_bypass_device(adap->pdev->device))
5786 adap->params.bypass = 1;
5789 * Grab some of our basic fundamental operating parameters.
5791 #define FW_PARAM_DEV(param) \
5792 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5793 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5795 #define FW_PARAM_PFVF(param) \
5796 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5797 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5798 FW_PARAMS_PARAM_Y(0) | \
5799 FW_PARAMS_PARAM_Z(0)
5801 params[0] = FW_PARAM_PFVF(EQ_START);
5802 params[1] = FW_PARAM_PFVF(L2T_START);
5803 params[2] = FW_PARAM_PFVF(L2T_END);
5804 params[3] = FW_PARAM_PFVF(FILTER_START);
5805 params[4] = FW_PARAM_PFVF(FILTER_END);
5806 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5807 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5810 adap->sge.egr_start = val[0];
5811 adap->l2t_start = val[1];
5812 adap->l2t_end = val[2];
5813 adap->tids.ftid_base = val[3];
5814 adap->tids.nftids = val[4] - val[3] + 1;
5815 adap->sge.ingr_start = val[5];
5817 /* query params related to active filter region */
5818 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5819 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5820 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5821 /* If Active filter size is set we enable establishing
5822 * offload connection through firmware work request
5824 if ((val[0] != val[1]) && (ret >= 0)) {
5825 adap->flags |= FW_OFLD_CONN;
5826 adap->tids.aftid_base = val[0];
5827 adap->tids.aftid_end = val[1];
5830 /* If we're running on newer firmware, let it know that we're
5831 * prepared to deal with encapsulated CPL messages. Older
5832 * firmware won't understand this and we'll just get
5833 * unencapsulated messages ...
5835 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5837 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5840 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5841 * capability. Earlier versions of the firmware didn't have the
5842 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5843 * permission to use ULPTX MEMWRITE DSGL.
5845 if (is_t4(adap->params.chip)) {
5846 adap->params.ulptx_memwrite_dsgl = false;
5848 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5849 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5851 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5855 * Get device capabilities so we can determine what resources we need
5858 memset(&caps_cmd, 0, sizeof(caps_cmd));
5859 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5860 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5861 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5862 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5867 if (caps_cmd.ofldcaps) {
5868 /* query offload-related parameters */
5869 params[0] = FW_PARAM_DEV(NTID);
5870 params[1] = FW_PARAM_PFVF(SERVER_START);
5871 params[2] = FW_PARAM_PFVF(SERVER_END);
5872 params[3] = FW_PARAM_PFVF(TDDP_START);
5873 params[4] = FW_PARAM_PFVF(TDDP_END);
5874 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5875 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5879 adap->tids.ntids = val[0];
5880 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5881 adap->tids.stid_base = val[1];
5882 adap->tids.nstids = val[2] - val[1] + 1;
5884 * Setup server filter region. Divide the availble filter
5885 * region into two parts. Regular filters get 1/3rd and server
5886 * filters get 2/3rd part. This is only enabled if workarond
5888 * 1. For regular filters.
5889 * 2. Server filter: This are special filters which are used
5890 * to redirect SYN packets to offload queue.
5892 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5893 adap->tids.sftid_base = adap->tids.ftid_base +
5894 DIV_ROUND_UP(adap->tids.nftids, 3);
5895 adap->tids.nsftids = adap->tids.nftids -
5896 DIV_ROUND_UP(adap->tids.nftids, 3);
5897 adap->tids.nftids = adap->tids.sftid_base -
5898 adap->tids.ftid_base;
5900 adap->vres.ddp.start = val[3];
5901 adap->vres.ddp.size = val[4] - val[3] + 1;
5902 adap->params.ofldq_wr_cred = val[5];
5904 adap->params.offload = 1;
5906 if (caps_cmd.rdmacaps) {
5907 params[0] = FW_PARAM_PFVF(STAG_START);
5908 params[1] = FW_PARAM_PFVF(STAG_END);
5909 params[2] = FW_PARAM_PFVF(RQ_START);
5910 params[3] = FW_PARAM_PFVF(RQ_END);
5911 params[4] = FW_PARAM_PFVF(PBL_START);
5912 params[5] = FW_PARAM_PFVF(PBL_END);
5913 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5917 adap->vres.stag.start = val[0];
5918 adap->vres.stag.size = val[1] - val[0] + 1;
5919 adap->vres.rq.start = val[2];
5920 adap->vres.rq.size = val[3] - val[2] + 1;
5921 adap->vres.pbl.start = val[4];
5922 adap->vres.pbl.size = val[5] - val[4] + 1;
5924 params[0] = FW_PARAM_PFVF(SQRQ_START);
5925 params[1] = FW_PARAM_PFVF(SQRQ_END);
5926 params[2] = FW_PARAM_PFVF(CQ_START);
5927 params[3] = FW_PARAM_PFVF(CQ_END);
5928 params[4] = FW_PARAM_PFVF(OCQ_START);
5929 params[5] = FW_PARAM_PFVF(OCQ_END);
5930 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5934 adap->vres.qp.start = val[0];
5935 adap->vres.qp.size = val[1] - val[0] + 1;
5936 adap->vres.cq.start = val[2];
5937 adap->vres.cq.size = val[3] - val[2] + 1;
5938 adap->vres.ocq.start = val[4];
5939 adap->vres.ocq.size = val[5] - val[4] + 1;
5941 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5942 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5943 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5946 adap->params.max_ordird_qp = 8;
5947 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5950 adap->params.max_ordird_qp = val[0];
5951 adap->params.max_ird_adapter = val[1];
5953 dev_info(adap->pdev_dev,
5954 "max_ordird_qp %d max_ird_adapter %d\n",
5955 adap->params.max_ordird_qp,
5956 adap->params.max_ird_adapter);
5958 if (caps_cmd.iscsicaps) {
5959 params[0] = FW_PARAM_PFVF(ISCSI_START);
5960 params[1] = FW_PARAM_PFVF(ISCSI_END);
5961 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5965 adap->vres.iscsi.start = val[0];
5966 adap->vres.iscsi.size = val[1] - val[0] + 1;
5968 #undef FW_PARAM_PFVF
5971 /* The MTU/MSS Table is initialized by now, so load their values. If
5972 * we're initializing the adapter, then we'll make any modifications
5973 * we want to the MTU/MSS Table and also initialize the congestion
5976 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5977 if (state != DEV_STATE_INIT) {
5980 /* The default MTU Table contains values 1492 and 1500.
5981 * However, for TCP, it's better to have two values which are
5982 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5983 * This allows us to have a TCP Data Payload which is a
5984 * multiple of 8 regardless of what combination of TCP Options
5985 * are in use (always a multiple of 4 bytes) which is
5986 * important for performance reasons. For instance, if no
5987 * options are in use, then we have a 20-byte IP header and a
5988 * 20-byte TCP header. In this case, a 1500-byte MSS would
5989 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5990 * which is not a multiple of 8. So using an MSS of 1488 in
5991 * this case results in a TCP Data Payload of 1448 bytes which
5992 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5993 * Stamps have been negotiated, then an MTU of 1500 bytes
5994 * results in a TCP Data Payload of 1448 bytes which, as
5995 * above, is a multiple of 8 bytes ...
5997 for (i = 0; i < NMTUS; i++)
5998 if (adap->params.mtus[i] == 1492) {
5999 adap->params.mtus[i] = 1488;
6003 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6004 adap->params.b_wnd);
6006 t4_init_tp_params(adap);
6007 adap->flags |= FW_OK;
6011 * Something bad happened. If a command timed out or failed with EIO
6012 * FW does not operate within its spec or something catastrophic
6013 * happened to HW/FW, stop issuing commands.
6016 if (ret != -ETIMEDOUT && ret != -EIO)
6017 t4_fw_bye(adap, adap->mbox);
6023 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6024 pci_channel_state_t state)
6027 struct adapter *adap = pci_get_drvdata(pdev);
6033 adap->flags &= ~FW_OK;
6034 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
6035 spin_lock(&adap->stats_lock);
6036 for_each_port(adap, i) {
6037 struct net_device *dev = adap->port[i];
6039 netif_device_detach(dev);
6040 netif_carrier_off(dev);
6042 spin_unlock(&adap->stats_lock);
6043 if (adap->flags & FULL_INIT_DONE)
6046 if ((adap->flags & DEV_ENABLED)) {
6047 pci_disable_device(pdev);
6048 adap->flags &= ~DEV_ENABLED;
6050 out: return state == pci_channel_io_perm_failure ?
6051 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6054 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6057 struct fw_caps_config_cmd c;
6058 struct adapter *adap = pci_get_drvdata(pdev);
6061 pci_restore_state(pdev);
6062 pci_save_state(pdev);
6063 return PCI_ERS_RESULT_RECOVERED;
6066 if (!(adap->flags & DEV_ENABLED)) {
6067 if (pci_enable_device(pdev)) {
6068 dev_err(&pdev->dev, "Cannot reenable PCI "
6069 "device after reset\n");
6070 return PCI_ERS_RESULT_DISCONNECT;
6072 adap->flags |= DEV_ENABLED;
6075 pci_set_master(pdev);
6076 pci_restore_state(pdev);
6077 pci_save_state(pdev);
6078 pci_cleanup_aer_uncorrect_error_status(pdev);
6080 if (t4_wait_dev_ready(adap->regs) < 0)
6081 return PCI_ERS_RESULT_DISCONNECT;
6082 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
6083 return PCI_ERS_RESULT_DISCONNECT;
6084 adap->flags |= FW_OK;
6085 if (adap_init1(adap, &c))
6086 return PCI_ERS_RESULT_DISCONNECT;
6088 for_each_port(adap, i) {
6089 struct port_info *p = adap2pinfo(adap, i);
6091 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6094 return PCI_ERS_RESULT_DISCONNECT;
6096 p->xact_addr_filt = -1;
6099 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6100 adap->params.b_wnd);
6103 return PCI_ERS_RESULT_DISCONNECT;
6104 return PCI_ERS_RESULT_RECOVERED;
6107 static void eeh_resume(struct pci_dev *pdev)
6110 struct adapter *adap = pci_get_drvdata(pdev);
6116 for_each_port(adap, i) {
6117 struct net_device *dev = adap->port[i];
6119 if (netif_running(dev)) {
6121 cxgb_set_rxmode(dev);
6123 netif_device_attach(dev);
6128 static const struct pci_error_handlers cxgb4_eeh = {
6129 .error_detected = eeh_err_detected,
6130 .slot_reset = eeh_slot_reset,
6131 .resume = eeh_resume,
6134 static inline bool is_x_10g_port(const struct link_config *lc)
6136 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6137 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
6140 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6141 unsigned int us, unsigned int cnt,
6142 unsigned int size, unsigned int iqe_size)
6145 set_rspq_intr_params(q, us, cnt);
6146 q->iqe_len = iqe_size;
6151 * Perform default configuration of DMA queues depending on the number and type
6152 * of ports we found and the number of available CPUs. Most settings can be
6153 * modified by the admin prior to actual use.
6155 static void cfg_queues(struct adapter *adap)
6157 struct sge *s = &adap->sge;
6158 int i, n10g = 0, qidx = 0;
6159 #ifndef CONFIG_CHELSIO_T4_DCB
6164 for_each_port(adap, i)
6165 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6166 #ifdef CONFIG_CHELSIO_T4_DCB
6167 /* For Data Center Bridging support we need to be able to support up
6168 * to 8 Traffic Priorities; each of which will be assigned to its
6169 * own TX Queue in order to prevent Head-Of-Line Blocking.
6171 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6172 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6173 MAX_ETH_QSETS, adap->params.nports * 8);
6177 for_each_port(adap, i) {
6178 struct port_info *pi = adap2pinfo(adap, i);
6180 pi->first_qset = qidx;
6184 #else /* !CONFIG_CHELSIO_T4_DCB */
6186 * We default to 1 queue per non-10G port and up to # of cores queues
6190 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6191 if (q10g > netif_get_num_default_rss_queues())
6192 q10g = netif_get_num_default_rss_queues();
6194 for_each_port(adap, i) {
6195 struct port_info *pi = adap2pinfo(adap, i);
6197 pi->first_qset = qidx;
6198 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6201 #endif /* !CONFIG_CHELSIO_T4_DCB */
6204 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6206 if (is_offload(adap)) {
6208 * For offload we use 1 queue/channel if all ports are up to 1G,
6209 * otherwise we divide all available queues amongst the channels
6210 * capped by the number of available cores.
6213 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6215 s->ofldqsets = roundup(i, adap->params.nports);
6217 s->ofldqsets = adap->params.nports;
6218 /* For RDMA one Rx queue per channel suffices */
6219 s->rdmaqs = adap->params.nports;
6220 s->rdmaciqs = adap->params.nports;
6223 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6224 struct sge_eth_rxq *r = &s->ethrxq[i];
6226 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6230 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6231 s->ethtxq[i].q.size = 1024;
6233 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6234 s->ctrlq[i].q.size = 512;
6236 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6237 s->ofldtxq[i].q.size = 1024;
6239 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6240 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6242 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6243 r->rspq.uld = CXGB4_ULD_ISCSI;
6247 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6248 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6250 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6251 r->rspq.uld = CXGB4_ULD_RDMA;
6255 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6256 if (ciq_size > SGE_MAX_IQ_SIZE) {
6257 CH_WARN(adap, "CIQ size too small for available IQs\n");
6258 ciq_size = SGE_MAX_IQ_SIZE;
6261 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6262 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6264 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6265 r->rspq.uld = CXGB4_ULD_RDMA;
6268 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6269 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6273 * Reduce the number of Ethernet queues across all ports to at most n.
6274 * n provides at least one queue per port.
6276 static void reduce_ethqs(struct adapter *adap, int n)
6279 struct port_info *pi;
6281 while (n < adap->sge.ethqsets)
6282 for_each_port(adap, i) {
6283 pi = adap2pinfo(adap, i);
6284 if (pi->nqsets > 1) {
6286 adap->sge.ethqsets--;
6287 if (adap->sge.ethqsets <= n)
6293 for_each_port(adap, i) {
6294 pi = adap2pinfo(adap, i);
6300 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6301 #define EXTRA_VECS 2
6303 static int enable_msix(struct adapter *adap)
6307 struct sge *s = &adap->sge;
6308 unsigned int nchan = adap->params.nports;
6309 struct msix_entry entries[MAX_INGQ + 1];
6311 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6312 entries[i].entry = i;
6314 want = s->max_ethqsets + EXTRA_VECS;
6315 if (is_offload(adap)) {
6316 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6317 /* need nchan for each possible ULD */
6318 ofld_need = 3 * nchan;
6320 #ifdef CONFIG_CHELSIO_T4_DCB
6321 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6324 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6326 need = adap->params.nports + EXTRA_VECS + ofld_need;
6328 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6333 * Distribute available vectors to the various queue groups.
6334 * Every group gets its minimum requirement and NIC gets top
6335 * priority for leftovers.
6337 i = want - EXTRA_VECS - ofld_need;
6338 if (i < s->max_ethqsets) {
6339 s->max_ethqsets = i;
6340 if (i < s->ethqsets)
6341 reduce_ethqs(adap, i);
6343 if (is_offload(adap)) {
6344 i = want - EXTRA_VECS - s->max_ethqsets;
6345 i -= ofld_need - nchan;
6346 s->ofldqsets = (i / nchan) * nchan; /* round down */
6348 for (i = 0; i < want; ++i)
6349 adap->msix_info[i].vec = entries[i].vector;
6356 static int init_rss(struct adapter *adap)
6360 for_each_port(adap, i) {
6361 struct port_info *pi = adap2pinfo(adap, i);
6363 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6366 for (j = 0; j < pi->rss_size; j++)
6367 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6372 static void print_port_info(const struct net_device *dev)
6376 const char *spd = "";
6377 const struct port_info *pi = netdev_priv(dev);
6378 const struct adapter *adap = pi->adapter;
6380 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6382 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6384 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6387 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6388 bufp += sprintf(bufp, "100/");
6389 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6390 bufp += sprintf(bufp, "1000/");
6391 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6392 bufp += sprintf(bufp, "10G/");
6393 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6394 bufp += sprintf(bufp, "40G/");
6397 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6399 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6400 adap->params.vpd.id,
6401 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6402 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6403 (adap->flags & USING_MSIX) ? " MSI-X" :
6404 (adap->flags & USING_MSI) ? " MSI" : "");
6405 netdev_info(dev, "S/N: %s, P/N: %s\n",
6406 adap->params.vpd.sn, adap->params.vpd.pn);
6409 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6411 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6415 * Free the following resources:
6416 * - memory used for tables
6419 * - resources FW is holding for us
6421 static void free_some_resources(struct adapter *adapter)
6425 t4_free_mem(adapter->l2t);
6426 t4_free_mem(adapter->tids.tid_tab);
6427 disable_msi(adapter);
6429 for_each_port(adapter, i)
6430 if (adapter->port[i]) {
6431 kfree(adap2pinfo(adapter, i)->rss);
6432 free_netdev(adapter->port[i]);
6434 if (adapter->flags & FW_OK)
6435 t4_fw_bye(adapter, adapter->fn);
6438 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6439 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6440 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6441 #define SEGMENT_SIZE 128
6443 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6445 int func, i, err, s_qpp, qpp, num_seg;
6446 struct port_info *pi;
6447 bool highdma = false;
6448 struct adapter *adapter = NULL;
6451 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6453 err = pci_request_regions(pdev, KBUILD_MODNAME);
6455 /* Just info, some other driver may have claimed the device. */
6456 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6460 err = pci_enable_device(pdev);
6462 dev_err(&pdev->dev, "cannot enable PCI device\n");
6463 goto out_release_regions;
6466 regs = pci_ioremap_bar(pdev, 0);
6468 dev_err(&pdev->dev, "cannot map device registers\n");
6470 goto out_disable_device;
6473 err = t4_wait_dev_ready(regs);
6475 goto out_unmap_bar0;
6477 /* We control everything through one PF */
6478 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6479 if (func != ent->driver_data) {
6481 pci_disable_device(pdev);
6482 pci_save_state(pdev); /* to restore SR-IOV later */
6486 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6488 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6490 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6491 "coherent allocations\n");
6492 goto out_unmap_bar0;
6495 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6497 dev_err(&pdev->dev, "no usable DMA configuration\n");
6498 goto out_unmap_bar0;
6502 pci_enable_pcie_error_reporting(pdev);
6503 enable_pcie_relaxed_ordering(pdev);
6504 pci_set_master(pdev);
6505 pci_save_state(pdev);
6507 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6510 goto out_unmap_bar0;
6513 adapter->workq = create_singlethread_workqueue("cxgb4");
6514 if (!adapter->workq) {
6516 goto out_free_adapter;
6519 /* PCI device has been enabled */
6520 adapter->flags |= DEV_ENABLED;
6522 adapter->regs = regs;
6523 adapter->pdev = pdev;
6524 adapter->pdev_dev = &pdev->dev;
6525 adapter->mbox = func;
6527 adapter->msg_enable = dflt_msg_enable;
6528 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6530 spin_lock_init(&adapter->stats_lock);
6531 spin_lock_init(&adapter->tid_release_lock);
6532 spin_lock_init(&adapter->win0_lock);
6534 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6535 INIT_WORK(&adapter->db_full_task, process_db_full);
6536 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6538 err = t4_prep_adapter(adapter);
6540 goto out_free_adapter;
6543 if (!is_t4(adapter->params.chip)) {
6544 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6545 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6546 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6547 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6549 /* Each segment size is 128B. Write coalescing is enabled only
6550 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6551 * queue is less no of segments that can be accommodated in
6554 if (qpp > num_seg) {
6556 "Incorrect number of egress queues per page\n");
6558 goto out_free_adapter;
6560 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6561 pci_resource_len(pdev, 2));
6562 if (!adapter->bar2) {
6563 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6565 goto out_free_adapter;
6569 setup_memwin(adapter);
6570 err = adap_init0(adapter);
6571 setup_memwin_rdma(adapter);
6575 for_each_port(adapter, i) {
6576 struct net_device *netdev;
6578 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6585 SET_NETDEV_DEV(netdev, &pdev->dev);
6587 adapter->port[i] = netdev;
6588 pi = netdev_priv(netdev);
6589 pi->adapter = adapter;
6590 pi->xact_addr_filt = -1;
6592 netdev->irq = pdev->irq;
6594 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6595 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6596 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6597 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6599 netdev->hw_features |= NETIF_F_HIGHDMA;
6600 netdev->features |= netdev->hw_features;
6601 netdev->vlan_features = netdev->features & VLAN_FEAT;
6603 netdev->priv_flags |= IFF_UNICAST_FLT;
6605 netdev->netdev_ops = &cxgb4_netdev_ops;
6606 #ifdef CONFIG_CHELSIO_T4_DCB
6607 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6608 cxgb4_dcb_state_init(netdev);
6610 netdev->ethtool_ops = &cxgb_ethtool_ops;
6613 pci_set_drvdata(pdev, adapter);
6615 if (adapter->flags & FW_OK) {
6616 err = t4_port_init(adapter, func, func, 0);
6622 * Configure queues and allocate tables now, they can be needed as
6623 * soon as the first register_netdev completes.
6625 cfg_queues(adapter);
6627 adapter->l2t = t4_init_l2t();
6628 if (!adapter->l2t) {
6629 /* We tolerate a lack of L2T, giving up some functionality */
6630 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6631 adapter->params.offload = 0;
6634 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6635 dev_warn(&pdev->dev, "could not allocate TID table, "
6637 adapter->params.offload = 0;
6640 /* See what interrupts we'll be using */
6641 if (msi > 1 && enable_msix(adapter) == 0)
6642 adapter->flags |= USING_MSIX;
6643 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6644 adapter->flags |= USING_MSI;
6646 err = init_rss(adapter);
6651 * The card is now ready to go. If any errors occur during device
6652 * registration we do not fail the whole card but rather proceed only
6653 * with the ports we manage to register successfully. However we must
6654 * register at least one net device.
6656 for_each_port(adapter, i) {
6657 pi = adap2pinfo(adapter, i);
6658 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6659 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6661 err = register_netdev(adapter->port[i]);
6664 adapter->chan_map[pi->tx_chan] = i;
6665 print_port_info(adapter->port[i]);
6668 dev_err(&pdev->dev, "could not register any net devices\n");
6672 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6676 if (cxgb4_debugfs_root) {
6677 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6678 cxgb4_debugfs_root);
6679 setup_debugfs(adapter);
6682 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6683 pdev->needs_freset = 1;
6685 if (is_offload(adapter))
6686 attach_ulds(adapter);
6689 #ifdef CONFIG_PCI_IOV
6690 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6691 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6692 dev_info(&pdev->dev,
6693 "instantiated %u virtual functions\n",
6699 free_some_resources(adapter);
6701 if (!is_t4(adapter->params.chip))
6702 iounmap(adapter->bar2);
6705 destroy_workqueue(adapter->workq);
6711 pci_disable_pcie_error_reporting(pdev);
6712 pci_disable_device(pdev);
6713 out_release_regions:
6714 pci_release_regions(pdev);
6718 static void remove_one(struct pci_dev *pdev)
6720 struct adapter *adapter = pci_get_drvdata(pdev);
6722 #ifdef CONFIG_PCI_IOV
6723 pci_disable_sriov(pdev);
6730 /* Tear down per-adapter Work Queue first since it can contain
6731 * references to our adapter data structure.
6733 destroy_workqueue(adapter->workq);
6735 if (is_offload(adapter))
6736 detach_ulds(adapter);
6738 for_each_port(adapter, i)
6739 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6740 unregister_netdev(adapter->port[i]);
6742 debugfs_remove_recursive(adapter->debugfs_root);
6744 /* If we allocated filters, free up state associated with any
6747 if (adapter->tids.ftid_tab) {
6748 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6749 for (i = 0; i < (adapter->tids.nftids +
6750 adapter->tids.nsftids); i++, f++)
6752 clear_filter(adapter, f);
6755 if (adapter->flags & FULL_INIT_DONE)
6758 free_some_resources(adapter);
6759 iounmap(adapter->regs);
6760 if (!is_t4(adapter->params.chip))
6761 iounmap(adapter->bar2);
6762 pci_disable_pcie_error_reporting(pdev);
6763 if ((adapter->flags & DEV_ENABLED)) {
6764 pci_disable_device(pdev);
6765 adapter->flags &= ~DEV_ENABLED;
6767 pci_release_regions(pdev);
6771 pci_release_regions(pdev);
6774 static struct pci_driver cxgb4_driver = {
6775 .name = KBUILD_MODNAME,
6776 .id_table = cxgb4_pci_tbl,
6778 .remove = remove_one,
6779 .shutdown = remove_one,
6780 .err_handler = &cxgb4_eeh,
6783 static int __init cxgb4_init_module(void)
6787 /* Debugfs support is optional, just warn if this fails */
6788 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6789 if (!cxgb4_debugfs_root)
6790 pr_warn("could not create debugfs entry, continuing\n");
6792 ret = pci_register_driver(&cxgb4_driver);
6794 debugfs_remove(cxgb4_debugfs_root);
6796 #if IS_ENABLED(CONFIG_IPV6)
6797 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6803 static void __exit cxgb4_cleanup_module(void)
6805 #if IS_ENABLED(CONFIG_IPV6)
6806 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6808 pci_unregister_driver(&cxgb4_driver);
6809 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6812 module_init(cxgb4_init_module);
6813 module_exit(cxgb4_cleanup_module);