2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Common functions between PF and VF
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
41 * Chris Telfer <chris.telfer@netronome.com>
44 #include <linux/bitfield.h>
45 #include <linux/bpf.h>
46 #include <linux/bpf_trace.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/init.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/interrupt.h>
55 #include <linux/ipv6.h>
56 #include <linux/page_ref.h>
57 #include <linux/pci.h>
58 #include <linux/pci_regs.h>
59 #include <linux/msi.h>
60 #include <linux/ethtool.h>
61 #include <linux/log2.h>
62 #include <linux/if_vlan.h>
63 #include <linux/random.h>
65 #include <linux/ktime.h>
67 #include <net/pkt_cls.h>
68 #include <net/vxlan.h>
70 #include "nfpcore/nfp_nsp.h"
71 #include "nfp_net_ctrl.h"
75 * nfp_net_get_fw_version() - Read and parse the FW version
76 * @fw_ver: Output fw_version structure to read to
77 * @ctrl_bar: Mapped address of the control BAR
79 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
80 void __iomem *ctrl_bar)
84 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
85 put_unaligned_le32(reg, fw_ver);
88 static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
90 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
91 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
92 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
96 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
98 dma_sync_single_for_device(dp->dev, dma_addr,
99 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
103 static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
105 dma_unmap_single_attrs(dp->dev, dma_addr,
106 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
107 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
110 static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
113 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
114 len, dp->rx_dma_dir);
119 * Firmware reconfig may take a while so we have two versions of it -
120 * synchronous and asynchronous (posted). All synchronous callers are holding
121 * RTNL so we don't have to worry about serializing them.
123 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
125 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
126 /* ensure update is written before pinging HW */
128 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
131 /* Pass 0 as update to run posted reconfigs. */
132 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
134 update |= nn->reconfig_posted;
135 nn->reconfig_posted = 0;
137 nfp_net_reconfig_start(nn, update);
139 nn->reconfig_timer_active = true;
140 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
143 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
147 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
150 if (reg & NFP_NET_CFG_UPDATE_ERR) {
151 nn_err(nn, "Reconfig error: 0x%08x\n", reg);
153 } else if (last_check) {
154 nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
161 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
163 bool timed_out = false;
165 /* Poll update field, waiting for NFP to ack the config */
166 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
168 timed_out = time_is_before_eq_jiffies(deadline);
171 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
174 return timed_out ? -EIO : 0;
177 static void nfp_net_reconfig_timer(unsigned long data)
179 struct nfp_net *nn = (void *)data;
181 spin_lock_bh(&nn->reconfig_lock);
183 nn->reconfig_timer_active = false;
185 /* If sync caller is present it will take over from us */
186 if (nn->reconfig_sync_present)
189 /* Read reconfig status and report errors */
190 nfp_net_reconfig_check_done(nn, true);
192 if (nn->reconfig_posted)
193 nfp_net_reconfig_start_async(nn, 0);
195 spin_unlock_bh(&nn->reconfig_lock);
199 * nfp_net_reconfig_post() - Post async reconfig request
200 * @nn: NFP Net device to reconfigure
201 * @update: The value for the update field in the BAR config
203 * Record FW reconfiguration request. Reconfiguration will be kicked off
204 * whenever reconfiguration machinery is idle. Multiple requests can be
207 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
209 spin_lock_bh(&nn->reconfig_lock);
211 /* Sync caller will kick off async reconf when it's done, just post */
212 if (nn->reconfig_sync_present) {
213 nn->reconfig_posted |= update;
217 /* Opportunistically check if the previous command is done */
218 if (!nn->reconfig_timer_active ||
219 nfp_net_reconfig_check_done(nn, false))
220 nfp_net_reconfig_start_async(nn, update);
222 nn->reconfig_posted |= update;
224 spin_unlock_bh(&nn->reconfig_lock);
228 * nfp_net_reconfig() - Reconfigure the firmware
229 * @nn: NFP Net device to reconfigure
230 * @update: The value for the update field in the BAR config
232 * Write the update word to the BAR and ping the reconfig queue. The
233 * poll until the firmware has acknowledged the update by zeroing the
236 * Return: Negative errno on error, 0 on success
238 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
240 bool cancelled_timer = false;
241 u32 pre_posted_requests;
244 spin_lock_bh(&nn->reconfig_lock);
246 nn->reconfig_sync_present = true;
248 if (nn->reconfig_timer_active) {
249 del_timer(&nn->reconfig_timer);
250 nn->reconfig_timer_active = false;
251 cancelled_timer = true;
253 pre_posted_requests = nn->reconfig_posted;
254 nn->reconfig_posted = 0;
256 spin_unlock_bh(&nn->reconfig_lock);
259 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
261 /* Run the posted reconfigs which were issued before we started */
262 if (pre_posted_requests) {
263 nfp_net_reconfig_start(nn, pre_posted_requests);
264 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
267 nfp_net_reconfig_start(nn, update);
268 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
270 spin_lock_bh(&nn->reconfig_lock);
272 if (nn->reconfig_posted)
273 nfp_net_reconfig_start_async(nn, 0);
275 nn->reconfig_sync_present = false;
277 spin_unlock_bh(&nn->reconfig_lock);
282 /* Interrupt configuration and handling
286 * nfp_net_irq_unmask() - Unmask automasked interrupt
287 * @nn: NFP Network structure
288 * @entry_nr: MSI-X table entry
290 * Clear the ICR for the IRQ entry.
292 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
294 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
299 * nfp_net_irqs_alloc() - allocates MSI-X irqs
300 * @pdev: PCI device structure
301 * @irq_entries: Array to be initialized and used to hold the irq entries
302 * @min_irqs: Minimal acceptable number of interrupts
303 * @wanted_irqs: Target number of interrupts to allocate
305 * Return: Number of irqs obtained or 0 on error.
308 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
309 unsigned int min_irqs, unsigned int wanted_irqs)
314 for (i = 0; i < wanted_irqs; i++)
315 irq_entries[i].entry = i;
317 got_irqs = pci_enable_msix_range(pdev, irq_entries,
318 min_irqs, wanted_irqs);
320 dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
321 min_irqs, wanted_irqs, got_irqs);
325 if (got_irqs < wanted_irqs)
326 dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
327 wanted_irqs, got_irqs);
333 * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
334 * @nn: NFP Network structure
335 * @irq_entries: Table of allocated interrupts
336 * @n: Size of @irq_entries (number of entries to grab)
338 * After interrupts are allocated with nfp_net_irqs_alloc() this function
339 * should be called to assign them to a specific netdev (port).
342 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
345 struct nfp_net_dp *dp = &nn->dp;
347 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
348 dp->num_r_vecs = nn->max_r_vecs;
350 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
352 if (dp->num_rx_rings > dp->num_r_vecs ||
353 dp->num_tx_rings > dp->num_r_vecs)
354 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
355 dp->num_rx_rings, dp->num_tx_rings,
358 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
359 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
360 dp->num_stack_tx_rings = dp->num_tx_rings;
364 * nfp_net_irqs_disable() - Disable interrupts
365 * @pdev: PCI device structure
367 * Undoes what @nfp_net_irqs_alloc() does.
369 void nfp_net_irqs_disable(struct pci_dev *pdev)
371 pci_disable_msix(pdev);
375 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
377 * @data: Opaque data structure
379 * Return: Indicate if the interrupt has been handled.
381 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
383 struct nfp_net_r_vector *r_vec = data;
385 napi_schedule_irqoff(&r_vec->napi);
387 /* The FW auto-masks any interrupt, either via the MASK bit in
388 * the MSI-X table or via the per entry ICR field. So there
389 * is no need to disable interrupts here.
394 bool nfp_net_link_changed_read_clear(struct nfp_net *nn)
399 spin_lock_irqsave(&nn->link_status_lock, flags);
400 ret = nn->link_changed;
401 nn->link_changed = false;
402 spin_unlock_irqrestore(&nn->link_status_lock, flags);
408 * nfp_net_read_link_status() - Reread link status from control BAR
409 * @nn: NFP Network structure
411 static void nfp_net_read_link_status(struct nfp_net *nn)
417 spin_lock_irqsave(&nn->link_status_lock, flags);
419 sts = nn_readl(nn, NFP_NET_CFG_STS);
420 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
422 if (nn->link_up == link_up)
425 nn->link_up = link_up;
426 nn->link_changed = true;
429 netif_carrier_on(nn->dp.netdev);
430 netdev_info(nn->dp.netdev, "NIC Link is Up\n");
432 netif_carrier_off(nn->dp.netdev);
433 netdev_info(nn->dp.netdev, "NIC Link is Down\n");
436 spin_unlock_irqrestore(&nn->link_status_lock, flags);
440 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
442 * @data: Opaque data structure
444 * Return: Indicate if the interrupt has been handled.
446 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
448 struct nfp_net *nn = data;
449 struct msix_entry *entry;
451 entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
453 nfp_net_read_link_status(nn);
455 nfp_net_irq_unmask(nn, entry->entry);
461 * nfp_net_irq_exn() - Interrupt service routine for exceptions
463 * @data: Opaque data structure
465 * Return: Indicate if the interrupt has been handled.
467 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
469 struct nfp_net *nn = data;
471 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
472 /* XXX TO BE IMPLEMENTED */
477 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
478 * @tx_ring: TX ring structure
479 * @r_vec: IRQ vector servicing this ring
481 * @is_xdp: Is this an XDP TX ring?
484 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
485 struct nfp_net_r_vector *r_vec, unsigned int idx,
488 struct nfp_net *nn = r_vec->nfp_net;
491 tx_ring->r_vec = r_vec;
492 tx_ring->is_xdp = is_xdp;
494 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
495 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
499 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
500 * @rx_ring: RX ring structure
501 * @r_vec: IRQ vector servicing this ring
505 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
506 struct nfp_net_r_vector *r_vec, unsigned int idx)
508 struct nfp_net *nn = r_vec->nfp_net;
511 rx_ring->r_vec = r_vec;
513 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
514 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
518 * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
519 * @netdev: netdev structure
521 static void nfp_net_vecs_init(struct net_device *netdev)
523 struct nfp_net *nn = netdev_priv(netdev);
524 struct nfp_net_r_vector *r_vec;
527 nn->lsc_handler = nfp_net_irq_lsc;
528 nn->exn_handler = nfp_net_irq_exn;
530 for (r = 0; r < nn->max_r_vecs; r++) {
531 struct msix_entry *entry;
533 entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
535 r_vec = &nn->r_vecs[r];
537 r_vec->handler = nfp_net_irq_rxtx;
538 r_vec->irq_entry = entry->entry;
539 r_vec->irq_vector = entry->vector;
541 cpumask_set_cpu(r, &r_vec->affinity_mask);
546 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
547 * @nn: NFP Network structure
548 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
549 * @format: printf-style format to construct the interrupt name
550 * @name: Pointer to allocated space for interrupt name
551 * @name_sz: Size of space for interrupt name
552 * @vector_idx: Index of MSI-X vector used for this interrupt
553 * @handler: IRQ handler to register for this interrupt
556 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
557 const char *format, char *name, size_t name_sz,
558 unsigned int vector_idx, irq_handler_t handler)
560 struct msix_entry *entry;
563 entry = &nn->irq_entries[vector_idx];
565 snprintf(name, name_sz, format, netdev_name(nn->dp.netdev));
566 err = request_irq(entry->vector, handler, 0, name, nn);
568 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
572 nn_writeb(nn, ctrl_offset, entry->entry);
578 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
579 * @nn: NFP Network structure
580 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
581 * @vector_idx: Index of MSI-X vector used for this interrupt
583 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
584 unsigned int vector_idx)
586 nn_writeb(nn, ctrl_offset, 0xff);
587 free_irq(nn->irq_entries[vector_idx].vector, nn);
592 * One queue controller peripheral queue is used for transmit. The
593 * driver en-queues packets for transmit by advancing the write
594 * pointer. The device indicates that packets have transmitted by
595 * advancing the read pointer. The driver maintains a local copy of
596 * the read and write pointer in @struct nfp_net_tx_ring. The driver
597 * keeps @wr_p in sync with the queue controller write pointer and can
598 * determine how many packets have been transmitted by comparing its
599 * copy of the read pointer @rd_p with the read pointer maintained by
600 * the queue controller peripheral.
604 * nfp_net_tx_full() - Check if the TX ring is full
605 * @tx_ring: TX ring to check
606 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
608 * This function checks, based on the *host copy* of read/write
609 * pointer if a given TX ring is full. The real TX queue may have
610 * some newly made available slots.
612 * Return: True if the ring is full.
614 static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
616 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
619 /* Wrappers for deciding when to stop and restart TX queues */
620 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
622 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
625 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
627 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
631 * nfp_net_tx_ring_stop() - stop tx ring
632 * @nd_q: netdev queue
633 * @tx_ring: driver tx queue structure
635 * Safely stop TX ring. Remember that while we are running .start_xmit()
636 * someone else may be cleaning the TX ring completions so we need to be
637 * extra careful here.
639 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
640 struct nfp_net_tx_ring *tx_ring)
642 netif_tx_stop_queue(nd_q);
644 /* We can race with the TX completion out of NAPI so recheck */
646 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
647 netif_tx_start_queue(nd_q);
651 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
652 * @r_vec: per-ring structure
653 * @txbuf: Pointer to driver soft TX descriptor
654 * @txd: Pointer to HW TX descriptor
655 * @skb: Pointer to SKB
657 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
658 * Return error on packet header greater than maximum supported LSO header size.
660 static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
661 struct nfp_net_tx_buf *txbuf,
662 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
667 if (!skb_is_gso(skb))
670 if (!skb->encapsulation) {
671 txd->l3_offset = skb_network_offset(skb);
672 txd->l4_offset = skb_transport_offset(skb);
673 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
675 txd->l3_offset = skb_inner_network_offset(skb);
676 txd->l4_offset = skb_inner_transport_offset(skb);
677 hdrlen = skb_inner_transport_header(skb) - skb->data +
678 inner_tcp_hdrlen(skb);
681 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
682 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
684 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
685 txd->lso_hdrlen = hdrlen;
686 txd->mss = cpu_to_le16(mss);
687 txd->flags |= PCIE_DESC_TX_LSO;
689 u64_stats_update_begin(&r_vec->tx_sync);
691 u64_stats_update_end(&r_vec->tx_sync);
695 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
696 * @dp: NFP Net data path struct
697 * @r_vec: per-ring structure
698 * @txbuf: Pointer to driver soft TX descriptor
699 * @txd: Pointer to TX descriptor
700 * @skb: Pointer to SKB
702 * This function sets the TX checksum flags in the TX descriptor based
703 * on the configuration and the protocol of the packet to be transmitted.
705 static void nfp_net_tx_csum(struct nfp_net_dp *dp,
706 struct nfp_net_r_vector *r_vec,
707 struct nfp_net_tx_buf *txbuf,
708 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
710 struct ipv6hdr *ipv6h;
714 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
717 if (skb->ip_summed != CHECKSUM_PARTIAL)
720 txd->flags |= PCIE_DESC_TX_CSUM;
721 if (skb->encapsulation)
722 txd->flags |= PCIE_DESC_TX_ENCAP;
724 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
725 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
727 if (iph->version == 4) {
728 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
729 l4_hdr = iph->protocol;
730 } else if (ipv6h->version == 6) {
731 l4_hdr = ipv6h->nexthdr;
733 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
739 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
742 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
745 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
749 u64_stats_update_begin(&r_vec->tx_sync);
750 if (skb->encapsulation)
751 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
753 r_vec->hw_csum_tx += txbuf->pkt_cnt;
754 u64_stats_update_end(&r_vec->tx_sync);
757 static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
760 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
761 tx_ring->wr_ptr_add = 0;
765 * nfp_net_tx() - Main transmit entry point
766 * @skb: SKB to transmit
767 * @netdev: netdev structure
769 * Return: NETDEV_TX_OK on success.
771 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
773 struct nfp_net *nn = netdev_priv(netdev);
774 const struct skb_frag_struct *frag;
775 struct nfp_net_tx_desc *txd, txdg;
776 struct nfp_net_tx_ring *tx_ring;
777 struct nfp_net_r_vector *r_vec;
778 struct nfp_net_tx_buf *txbuf;
779 struct netdev_queue *nd_q;
780 struct nfp_net_dp *dp;
788 qidx = skb_get_queue_mapping(skb);
789 tx_ring = &dp->tx_rings[qidx];
790 r_vec = tx_ring->r_vec;
791 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
793 nr_frags = skb_shinfo(skb)->nr_frags;
795 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
796 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
797 qidx, tx_ring->wr_p, tx_ring->rd_p);
798 netif_tx_stop_queue(nd_q);
799 nfp_net_tx_xmit_more_flush(tx_ring);
800 u64_stats_update_begin(&r_vec->tx_sync);
802 u64_stats_update_end(&r_vec->tx_sync);
803 return NETDEV_TX_BUSY;
806 /* Start with the head skbuf */
807 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
809 if (dma_mapping_error(dp->dev, dma_addr))
812 wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
814 /* Stash the soft descriptor of the head then initialize it */
815 txbuf = &tx_ring->txbufs[wr_idx];
817 txbuf->dma_addr = dma_addr;
820 txbuf->real_len = skb->len;
822 /* Build TX descriptor */
823 txd = &tx_ring->txds[wr_idx];
824 txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0;
825 txd->dma_len = cpu_to_le16(skb_headlen(skb));
826 nfp_desc_set_dma_addr(txd, dma_addr);
827 txd->data_len = cpu_to_le16(skb->len);
833 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
834 nfp_net_tx_tso(r_vec, txbuf, txd, skb);
835 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
836 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
837 txd->flags |= PCIE_DESC_TX_VLAN;
838 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
843 /* all descs must match except for in addr, length and eop */
846 for (f = 0; f < nr_frags; f++) {
847 frag = &skb_shinfo(skb)->frags[f];
848 fsize = skb_frag_size(frag);
850 dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
851 fsize, DMA_TO_DEVICE);
852 if (dma_mapping_error(dp->dev, dma_addr))
855 wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
856 tx_ring->txbufs[wr_idx].skb = skb;
857 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
858 tx_ring->txbufs[wr_idx].fidx = f;
860 txd = &tx_ring->txds[wr_idx];
862 txd->dma_len = cpu_to_le16(fsize);
863 nfp_desc_set_dma_addr(txd, dma_addr);
865 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
868 u64_stats_update_begin(&r_vec->tx_sync);
870 u64_stats_update_end(&r_vec->tx_sync);
873 netdev_tx_sent_queue(nd_q, txbuf->real_len);
875 tx_ring->wr_p += nr_frags + 1;
876 if (nfp_net_tx_ring_should_stop(tx_ring))
877 nfp_net_tx_ring_stop(nd_q, tx_ring);
879 tx_ring->wr_ptr_add += nr_frags + 1;
880 if (!skb->xmit_more || netif_xmit_stopped(nd_q))
881 nfp_net_tx_xmit_more_flush(tx_ring);
883 skb_tx_timestamp(skb);
890 frag = &skb_shinfo(skb)->frags[f];
891 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
892 skb_frag_size(frag), DMA_TO_DEVICE);
893 tx_ring->txbufs[wr_idx].skb = NULL;
894 tx_ring->txbufs[wr_idx].dma_addr = 0;
895 tx_ring->txbufs[wr_idx].fidx = -2;
898 wr_idx += tx_ring->cnt;
900 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
901 skb_headlen(skb), DMA_TO_DEVICE);
902 tx_ring->txbufs[wr_idx].skb = NULL;
903 tx_ring->txbufs[wr_idx].dma_addr = 0;
904 tx_ring->txbufs[wr_idx].fidx = -2;
906 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
907 nfp_net_tx_xmit_more_flush(tx_ring);
908 u64_stats_update_begin(&r_vec->tx_sync);
910 u64_stats_update_end(&r_vec->tx_sync);
911 dev_kfree_skb_any(skb);
916 * nfp_net_tx_complete() - Handled completed TX packets
917 * @tx_ring: TX ring structure
919 * Return: Number of completed TX descriptors
921 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
923 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
924 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
925 const struct skb_frag_struct *frag;
926 struct netdev_queue *nd_q;
927 u32 done_pkts = 0, done_bytes = 0;
934 if (tx_ring->wr_p == tx_ring->rd_p)
937 /* Work out how many descriptors have been transmitted */
938 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
940 if (qcp_rd_p == tx_ring->qcp_rd_p)
943 if (qcp_rd_p > tx_ring->qcp_rd_p)
944 todo = qcp_rd_p - tx_ring->qcp_rd_p;
946 todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
949 idx = tx_ring->rd_p & (tx_ring->cnt - 1);
952 skb = tx_ring->txbufs[idx].skb;
956 nr_frags = skb_shinfo(skb)->nr_frags;
957 fidx = tx_ring->txbufs[idx].fidx;
961 dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
962 skb_headlen(skb), DMA_TO_DEVICE);
964 done_pkts += tx_ring->txbufs[idx].pkt_cnt;
965 done_bytes += tx_ring->txbufs[idx].real_len;
968 frag = &skb_shinfo(skb)->frags[fidx];
969 dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
970 skb_frag_size(frag), DMA_TO_DEVICE);
973 /* check for last gather fragment */
974 if (fidx == nr_frags - 1)
975 dev_kfree_skb_any(skb);
977 tx_ring->txbufs[idx].dma_addr = 0;
978 tx_ring->txbufs[idx].skb = NULL;
979 tx_ring->txbufs[idx].fidx = -2;
982 tx_ring->qcp_rd_p = qcp_rd_p;
984 u64_stats_update_begin(&r_vec->tx_sync);
985 r_vec->tx_bytes += done_bytes;
986 r_vec->tx_pkts += done_pkts;
987 u64_stats_update_end(&r_vec->tx_sync);
989 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
990 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
991 if (nfp_net_tx_ring_should_wake(tx_ring)) {
992 /* Make sure TX thread will see updated tx_ring->rd_p */
995 if (unlikely(netif_tx_queue_stopped(nd_q)))
996 netif_tx_wake_queue(nd_q);
999 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1000 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1001 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1004 static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
1006 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1007 u32 done_pkts = 0, done_bytes = 0;
1011 if (tx_ring->wr_p == tx_ring->rd_p)
1014 /* Work out how many descriptors have been transmitted */
1015 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
1017 if (qcp_rd_p == tx_ring->qcp_rd_p)
1020 if (qcp_rd_p > tx_ring->qcp_rd_p)
1021 todo = qcp_rd_p - tx_ring->qcp_rd_p;
1023 todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
1027 idx = tx_ring->rd_p & (tx_ring->cnt - 1);
1030 done_bytes += tx_ring->txbufs[idx].real_len;
1033 tx_ring->qcp_rd_p = qcp_rd_p;
1035 u64_stats_update_begin(&r_vec->tx_sync);
1036 r_vec->tx_bytes += done_bytes;
1037 r_vec->tx_pkts += done_pkts;
1038 u64_stats_update_end(&r_vec->tx_sync);
1040 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1041 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1042 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1046 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
1047 * @dp: NFP Net data path struct
1048 * @tx_ring: TX ring structure
1050 * Assumes that the device is stopped
1053 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1055 const struct skb_frag_struct *frag;
1056 struct netdev_queue *nd_q;
1058 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
1059 struct nfp_net_tx_buf *tx_buf;
1060 struct sk_buff *skb;
1063 idx = tx_ring->rd_p & (tx_ring->cnt - 1);
1064 tx_buf = &tx_ring->txbufs[idx];
1066 skb = tx_ring->txbufs[idx].skb;
1067 nr_frags = skb_shinfo(skb)->nr_frags;
1069 if (tx_buf->fidx == -1) {
1071 dma_unmap_single(dp->dev, tx_buf->dma_addr,
1072 skb_headlen(skb), DMA_TO_DEVICE);
1074 /* unmap fragment */
1075 frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
1076 dma_unmap_page(dp->dev, tx_buf->dma_addr,
1077 skb_frag_size(frag), DMA_TO_DEVICE);
1080 /* check for last gather fragment */
1081 if (tx_buf->fidx == nr_frags - 1)
1082 dev_kfree_skb_any(skb);
1084 tx_buf->dma_addr = 0;
1088 tx_ring->qcp_rd_p++;
1092 memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
1095 tx_ring->qcp_rd_p = 0;
1096 tx_ring->wr_ptr_add = 0;
1098 if (tx_ring->is_xdp)
1101 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1102 netdev_tx_reset_queue(nd_q);
1105 static void nfp_net_tx_timeout(struct net_device *netdev)
1107 struct nfp_net *nn = netdev_priv(netdev);
1110 for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
1111 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1113 nn_warn(nn, "TX timeout on ring: %d\n", i);
1115 nn_warn(nn, "TX watchdog timeout\n");
1118 /* Receive processing
1121 nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
1123 unsigned int fl_bufsz;
1125 fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
1126 fl_bufsz += dp->rx_dma_off;
1127 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1128 fl_bufsz += NFP_NET_MAX_PREPEND;
1130 fl_bufsz += dp->rx_offset;
1131 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
1133 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
1134 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1140 nfp_net_free_frag(void *frag, bool xdp)
1143 skb_free_frag(frag);
1145 __free_page(virt_to_page(frag));
1149 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
1150 * @dp: NFP Net data path struct
1151 * @dma_addr: Pointer to storage for DMA address (output param)
1153 * This function will allcate a new page frag, map it for DMA.
1155 * Return: allocated page frag or NULL on failure.
1157 static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1162 frag = netdev_alloc_frag(dp->fl_bufsz);
1164 frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
1166 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1170 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1171 if (dma_mapping_error(dp->dev, *dma_addr)) {
1172 nfp_net_free_frag(frag, dp->xdp_prog);
1173 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1180 static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1185 frag = napi_alloc_frag(dp->fl_bufsz);
1187 frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
1189 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1193 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1194 if (dma_mapping_error(dp->dev, *dma_addr)) {
1195 nfp_net_free_frag(frag, dp->xdp_prog);
1196 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1204 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1205 * @dp: NFP Net data path struct
1206 * @rx_ring: RX ring structure
1207 * @frag: page fragment buffer
1208 * @dma_addr: DMA address of skb mapping
1210 static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
1211 struct nfp_net_rx_ring *rx_ring,
1212 void *frag, dma_addr_t dma_addr)
1214 unsigned int wr_idx;
1216 wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
1218 nfp_net_dma_sync_dev_rx(dp, dma_addr);
1220 /* Stash SKB and DMA address away */
1221 rx_ring->rxbufs[wr_idx].frag = frag;
1222 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1224 /* Fill freelist descriptor */
1225 rx_ring->rxds[wr_idx].fld.reserved = 0;
1226 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1227 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
1228 dma_addr + dp->rx_dma_off);
1231 rx_ring->wr_ptr_add++;
1232 if (rx_ring->wr_ptr_add >= NFP_NET_FL_BATCH) {
1233 /* Update write pointer of the freelist queue. Make
1234 * sure all writes are flushed before telling the hardware.
1237 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, rx_ring->wr_ptr_add);
1238 rx_ring->wr_ptr_add = 0;
1243 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1244 * @rx_ring: RX ring structure
1246 * Warning: Do *not* call if ring buffers were never put on the FW freelist
1247 * (i.e. device was not enabled)!
1249 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1251 unsigned int wr_idx, last_idx;
1253 /* Move the empty entry to the end of the list */
1254 wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
1255 last_idx = rx_ring->cnt - 1;
1256 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1257 rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
1258 rx_ring->rxbufs[last_idx].dma_addr = 0;
1259 rx_ring->rxbufs[last_idx].frag = NULL;
1261 memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
1264 rx_ring->wr_ptr_add = 0;
1268 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1269 * @dp: NFP Net data path struct
1270 * @rx_ring: RX ring to remove buffers from
1272 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1273 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1274 * to restore required ring geometry.
1277 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
1278 struct nfp_net_rx_ring *rx_ring)
1282 for (i = 0; i < rx_ring->cnt - 1; i++) {
1283 /* NULL skb can only happen when initial filling of the ring
1284 * fails to allocate enough buffers and calls here to free
1285 * already allocated ones.
1287 if (!rx_ring->rxbufs[i].frag)
1290 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
1291 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
1292 rx_ring->rxbufs[i].dma_addr = 0;
1293 rx_ring->rxbufs[i].frag = NULL;
1298 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1299 * @dp: NFP Net data path struct
1300 * @rx_ring: RX ring to remove buffers from
1303 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
1304 struct nfp_net_rx_ring *rx_ring)
1306 struct nfp_net_rx_buf *rxbufs;
1309 rxbufs = rx_ring->rxbufs;
1311 for (i = 0; i < rx_ring->cnt - 1; i++) {
1312 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
1313 if (!rxbufs[i].frag) {
1314 nfp_net_rx_ring_bufs_free(dp, rx_ring);
1323 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1324 * @dp: NFP Net data path struct
1325 * @rx_ring: RX ring to fill
1328 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
1329 struct nfp_net_rx_ring *rx_ring)
1333 for (i = 0; i < rx_ring->cnt - 1; i++)
1334 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
1335 rx_ring->rxbufs[i].dma_addr);
1339 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1340 * @flags: RX descriptor flags field in CPU byte order
1342 static int nfp_net_rx_csum_has_errors(u16 flags)
1344 u16 csum_all_checked, csum_all_ok;
1346 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1347 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1349 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1353 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1354 * @dp: NFP Net data path struct
1355 * @r_vec: per-ring structure
1356 * @rxd: Pointer to RX descriptor
1357 * @meta: Parsed metadata prepend
1358 * @skb: Pointer to SKB
1360 static void nfp_net_rx_csum(struct nfp_net_dp *dp,
1361 struct nfp_net_r_vector *r_vec,
1362 struct nfp_net_rx_desc *rxd,
1363 struct nfp_meta_parsed *meta, struct sk_buff *skb)
1365 skb_checksum_none_assert(skb);
1367 if (!(dp->netdev->features & NETIF_F_RXCSUM))
1370 if (meta->csum_type) {
1371 skb->ip_summed = meta->csum_type;
1372 skb->csum = meta->csum;
1373 u64_stats_update_begin(&r_vec->rx_sync);
1374 r_vec->hw_csum_rx_ok++;
1375 u64_stats_update_end(&r_vec->rx_sync);
1379 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1380 u64_stats_update_begin(&r_vec->rx_sync);
1381 r_vec->hw_csum_rx_error++;
1382 u64_stats_update_end(&r_vec->rx_sync);
1386 /* Assume that the firmware will never report inner CSUM_OK unless outer
1387 * L4 headers were successfully parsed. FW will always report zero UDP
1388 * checksum as CSUM_OK.
1390 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1391 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1392 __skb_incr_checksum_unnecessary(skb);
1393 u64_stats_update_begin(&r_vec->rx_sync);
1394 r_vec->hw_csum_rx_ok++;
1395 u64_stats_update_end(&r_vec->rx_sync);
1398 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1399 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1400 __skb_incr_checksum_unnecessary(skb);
1401 u64_stats_update_begin(&r_vec->rx_sync);
1402 r_vec->hw_csum_rx_inner_ok++;
1403 u64_stats_update_end(&r_vec->rx_sync);
1408 nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
1409 unsigned int type, __be32 *hash)
1411 if (!(netdev->features & NETIF_F_RXHASH))
1415 case NFP_NET_RSS_IPV4:
1416 case NFP_NET_RSS_IPV6:
1417 case NFP_NET_RSS_IPV6_EX:
1418 meta->hash_type = PKT_HASH_TYPE_L3;
1421 meta->hash_type = PKT_HASH_TYPE_L4;
1425 meta->hash = get_unaligned_be32(hash);
1429 nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
1430 void *data, struct nfp_net_rx_desc *rxd)
1432 struct nfp_net_rx_hash *rx_hash = data;
1434 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1437 nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
1442 nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
1443 void *data, int meta_len)
1447 meta_info = get_unaligned_be32(data);
1451 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1452 case NFP_NET_META_HASH:
1453 meta_info >>= NFP_NET_META_FIELD_SIZE;
1454 nfp_net_set_hash(netdev, meta,
1455 meta_info & NFP_NET_META_FIELD_MASK,
1459 case NFP_NET_META_MARK:
1460 meta->mark = get_unaligned_be32(data);
1463 case NFP_NET_META_CSUM:
1464 meta->csum_type = CHECKSUM_COMPLETE;
1466 (__force __wsum)__get_unaligned_cpu32(data);
1473 meta_info >>= NFP_NET_META_FIELD_SIZE;
1480 nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
1481 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
1482 struct sk_buff *skb)
1484 u64_stats_update_begin(&r_vec->rx_sync);
1486 u64_stats_update_end(&r_vec->rx_sync);
1488 /* skb is build based on the frag, free_skb() would free the frag
1489 * so to be able to reuse it we need an extra ref.
1491 if (skb && rxbuf && skb->head == rxbuf->frag)
1492 page_ref_inc(virt_to_head_page(rxbuf->frag));
1494 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
1496 dev_kfree_skb_any(skb);
1500 nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
1501 struct nfp_net_tx_ring *tx_ring,
1502 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
1503 unsigned int pkt_len)
1505 struct nfp_net_tx_buf *txbuf;
1506 struct nfp_net_tx_desc *txd;
1509 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1510 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf, NULL);
1514 wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
1516 /* Stash the soft descriptor of the head then initialize it */
1517 txbuf = &tx_ring->txbufs[wr_idx];
1519 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
1521 txbuf->frag = rxbuf->frag;
1522 txbuf->dma_addr = rxbuf->dma_addr;
1525 txbuf->real_len = pkt_len;
1527 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
1528 pkt_len, DMA_BIDIRECTIONAL);
1530 /* Build TX descriptor */
1531 txd = &tx_ring->txds[wr_idx];
1532 txd->offset_eop = PCIE_DESC_TX_EOP;
1533 txd->dma_len = cpu_to_le16(pkt_len);
1534 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
1535 txd->data_len = cpu_to_le16(pkt_len);
1539 txd->lso_hdrlen = 0;
1542 tx_ring->wr_ptr_add++;
1546 static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
1547 unsigned int *off, unsigned int *len)
1549 struct xdp_buff xdp;
1553 xdp.data_hard_start = hard_start;
1554 xdp.data = data + *off;
1555 xdp.data_end = data + *off + *len;
1557 orig_data = xdp.data;
1558 ret = bpf_prog_run_xdp(prog, &xdp);
1560 *len -= xdp.data - orig_data;
1561 *off += xdp.data - orig_data;
1567 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1568 * @rx_ring: RX ring to receive from
1569 * @budget: NAPI budget
1571 * Note, this function is separated out from the napi poll function to
1572 * more cleanly separate packet receive code from other bookkeeping
1573 * functions performed in the napi poll function.
1575 * Return: Number of packets received.
1577 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1579 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1580 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1581 struct nfp_net_tx_ring *tx_ring;
1582 struct bpf_prog *xdp_prog;
1583 unsigned int true_bufsz;
1584 struct sk_buff *skb;
1585 int pkts_polled = 0;
1589 xdp_prog = READ_ONCE(dp->xdp_prog);
1590 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1591 tx_ring = r_vec->xdp_ring;
1593 while (pkts_polled < budget) {
1594 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1595 struct nfp_net_rx_buf *rxbuf;
1596 struct nfp_net_rx_desc *rxd;
1597 struct nfp_meta_parsed meta;
1598 dma_addr_t new_dma_addr;
1601 idx = rx_ring->rd_p & (rx_ring->cnt - 1);
1603 rxd = &rx_ring->rxds[idx];
1604 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1607 /* Memory barrier to ensure that we won't do other reads
1608 * before the DD bit.
1612 memset(&meta, 0, sizeof(meta));
1617 rxbuf = &rx_ring->rxbufs[idx];
1619 * <-- [rx_offset] -->
1620 * ---------------------------------------------------------
1621 * | [XX] | metadata | packet | XXXX |
1622 * ---------------------------------------------------------
1623 * <---------------- data_len --------------->
1625 * The rx_offset is fixed for all packets, the meta_len can vary
1626 * on a packet by packet basis. If rx_offset is set to zero
1627 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1628 * buffer and is immediately followed by the packet (no [XX]).
1630 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1631 data_len = le16_to_cpu(rxd->rxd.data_len);
1632 pkt_len = data_len - meta_len;
1634 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1635 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1636 pkt_off += meta_len;
1638 pkt_off += dp->rx_offset;
1639 meta_off = pkt_off - meta_len;
1642 u64_stats_update_begin(&r_vec->rx_sync);
1644 r_vec->rx_bytes += pkt_len;
1645 u64_stats_update_end(&r_vec->rx_sync);
1647 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
1648 (dp->rx_offset && meta_len > dp->rx_offset))) {
1649 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
1651 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1655 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
1658 if (!dp->chained_metadata_format) {
1659 nfp_net_set_hash_desc(dp->netdev, &meta,
1660 rxbuf->frag + meta_off, rxd);
1661 } else if (meta_len) {
1664 end = nfp_net_parse_meta(dp->netdev, &meta,
1665 rxbuf->frag + meta_off,
1667 if (unlikely(end != rxbuf->frag + pkt_off)) {
1668 nn_dp_warn(dp, "invalid RX packet metadata\n");
1669 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
1675 if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
1676 dp->bpf_offload_xdp)) {
1677 unsigned int dma_off;
1681 hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
1683 act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
1684 &pkt_off, &pkt_len);
1689 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
1690 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
1694 trace_xdp_exception(dp->netdev,
1698 bpf_warn_invalid_xdp_action(act);
1700 trace_xdp_exception(dp->netdev, xdp_prog, act);
1702 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1708 skb = build_skb(rxbuf->frag, true_bufsz);
1709 if (unlikely(!skb)) {
1710 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1713 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
1714 if (unlikely(!new_frag)) {
1715 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1719 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1721 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1723 skb_reserve(skb, pkt_off);
1724 skb_put(skb, pkt_len);
1726 skb->mark = meta.mark;
1727 skb_set_hash(skb, meta.hash, meta.hash_type);
1729 skb_record_rx_queue(skb, rx_ring->idx);
1730 skb->protocol = eth_type_trans(skb, dp->netdev);
1732 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
1734 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1735 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1736 le16_to_cpu(rxd->rxd.vlan));
1738 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1741 if (xdp_prog && tx_ring->wr_ptr_add)
1742 nfp_net_tx_xmit_more_flush(tx_ring);
1749 * nfp_net_poll() - napi poll function
1750 * @napi: NAPI structure
1751 * @budget: NAPI budget
1753 * Return: number of packets polled.
1755 static int nfp_net_poll(struct napi_struct *napi, int budget)
1757 struct nfp_net_r_vector *r_vec =
1758 container_of(napi, struct nfp_net_r_vector, napi);
1759 unsigned int pkts_polled = 0;
1762 nfp_net_tx_complete(r_vec->tx_ring);
1763 if (r_vec->rx_ring) {
1764 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
1765 if (r_vec->xdp_ring)
1766 nfp_net_xdp_complete(r_vec->xdp_ring);
1769 if (pkts_polled < budget)
1770 if (napi_complete_done(napi, pkts_polled))
1771 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1776 /* Setup and Configuration
1780 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
1781 * @tx_ring: TX ring to free
1783 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1785 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1786 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1788 kfree(tx_ring->txbufs);
1791 dma_free_coherent(dp->dev, tx_ring->size,
1792 tx_ring->txds, tx_ring->dma);
1795 tx_ring->txbufs = NULL;
1796 tx_ring->txds = NULL;
1802 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
1803 * @dp: NFP Net data path struct
1804 * @tx_ring: TX Ring structure to allocate
1806 * Return: 0 on success, negative errno otherwise.
1809 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1811 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1814 tx_ring->cnt = dp->txd_cnt;
1816 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
1817 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
1818 &tx_ring->dma, GFP_KERNEL);
1822 sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
1823 tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
1824 if (!tx_ring->txbufs)
1827 if (!tx_ring->is_xdp)
1828 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
1834 nfp_net_tx_ring_free(tx_ring);
1839 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
1840 struct nfp_net_tx_ring *tx_ring)
1844 if (!tx_ring->is_xdp)
1847 for (i = 0; i < tx_ring->cnt; i++) {
1848 if (!tx_ring->txbufs[i].frag)
1851 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
1852 __free_page(virt_to_page(tx_ring->txbufs[i].frag));
1857 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
1858 struct nfp_net_tx_ring *tx_ring)
1860 struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
1863 if (!tx_ring->is_xdp)
1866 for (i = 0; i < tx_ring->cnt; i++) {
1867 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
1868 if (!txbufs[i].frag) {
1869 nfp_net_tx_ring_bufs_free(dp, tx_ring);
1877 static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
1881 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
1886 for (r = 0; r < dp->num_tx_rings; r++) {
1889 if (r >= dp->num_stack_tx_rings)
1890 bias = dp->num_stack_tx_rings;
1892 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
1895 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
1898 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
1906 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
1908 nfp_net_tx_ring_free(&dp->tx_rings[r]);
1910 kfree(dp->tx_rings);
1914 static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
1918 for (r = 0; r < dp->num_tx_rings; r++) {
1919 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
1920 nfp_net_tx_ring_free(&dp->tx_rings[r]);
1923 kfree(dp->tx_rings);
1927 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
1928 * @rx_ring: RX ring to free
1930 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1932 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1933 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1935 kfree(rx_ring->rxbufs);
1938 dma_free_coherent(dp->dev, rx_ring->size,
1939 rx_ring->rxds, rx_ring->dma);
1942 rx_ring->rxbufs = NULL;
1943 rx_ring->rxds = NULL;
1949 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
1950 * @dp: NFP Net data path struct
1951 * @rx_ring: RX ring to allocate
1953 * Return: 0 on success, negative errno otherwise.
1956 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
1960 rx_ring->cnt = dp->rxd_cnt;
1961 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
1962 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
1963 &rx_ring->dma, GFP_KERNEL);
1967 sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
1968 rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
1969 if (!rx_ring->rxbufs)
1975 nfp_net_rx_ring_free(rx_ring);
1979 static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
1983 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
1988 for (r = 0; r < dp->num_rx_rings; r++) {
1989 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
1991 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
1994 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
2002 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2004 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2006 kfree(dp->rx_rings);
2010 static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
2014 for (r = 0; r < dp->num_rx_rings; r++) {
2015 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2016 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2019 kfree(dp->rx_rings);
2023 nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
2024 struct nfp_net_r_vector *r_vec, int idx)
2026 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
2028 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
2030 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
2031 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
2035 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
2041 netif_napi_add(nn->dp.netdev, &r_vec->napi,
2042 nfp_net_poll, NAPI_POLL_WEIGHT);
2044 snprintf(r_vec->name, sizeof(r_vec->name),
2045 "%s-rxtx-%d", nn->dp.netdev->name, idx);
2046 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
2049 netif_napi_del(&r_vec->napi);
2050 nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
2053 disable_irq(r_vec->irq_vector);
2055 irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
2057 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
2064 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
2066 irq_set_affinity_hint(r_vec->irq_vector, NULL);
2067 netif_napi_del(&r_vec->napi);
2068 free_irq(r_vec->irq_vector, r_vec);
2072 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
2073 * @nn: NFP Net device to reconfigure
2075 void nfp_net_rss_write_itbl(struct nfp_net *nn)
2079 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
2080 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
2081 get_unaligned_le32(nn->rss_itbl + i));
2085 * nfp_net_rss_write_key() - Write RSS hash key to device
2086 * @nn: NFP Net device to reconfigure
2088 void nfp_net_rss_write_key(struct nfp_net *nn)
2092 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
2093 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
2094 get_unaligned_le32(nn->rss_key + i));
2098 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
2099 * @nn: NFP Net device to reconfigure
2101 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
2107 /* Compute factor used to convert coalesce '_usecs' parameters to
2108 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
2111 factor = nn->me_freq_mhz / 16;
2113 /* copy RX interrupt coalesce parameters */
2114 value = (nn->rx_coalesce_max_frames << 16) |
2115 (factor * nn->rx_coalesce_usecs);
2116 for (i = 0; i < nn->dp.num_rx_rings; i++)
2117 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
2119 /* copy TX interrupt coalesce parameters */
2120 value = (nn->tx_coalesce_max_frames << 16) |
2121 (factor * nn->tx_coalesce_usecs);
2122 for (i = 0; i < nn->dp.num_tx_rings; i++)
2123 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
2127 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
2128 * @nn: NFP Net device to reconfigure
2130 * Writes the MAC address from the netdev to the device control BAR. Does not
2131 * perform the required reconfig. We do a bit of byte swapping dance because
2134 static void nfp_net_write_mac_addr(struct nfp_net *nn)
2136 nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
2137 get_unaligned_be32(nn->dp.netdev->dev_addr));
2138 nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
2139 get_unaligned_be16(nn->dp.netdev->dev_addr + 4));
2142 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
2144 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
2145 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
2146 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
2148 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
2149 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
2150 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
2154 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
2155 * @nn: NFP Net device to reconfigure
2157 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
2159 u32 new_ctrl, update;
2163 new_ctrl = nn->dp.ctrl;
2164 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
2165 update = NFP_NET_CFG_UPDATE_GEN;
2166 update |= NFP_NET_CFG_UPDATE_MSIX;
2167 update |= NFP_NET_CFG_UPDATE_RING;
2169 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2170 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
2172 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2173 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2175 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2176 err = nfp_net_reconfig(nn, update);
2178 nn_err(nn, "Could not disable device: %d\n", err);
2180 for (r = 0; r < nn->dp.num_rx_rings; r++)
2181 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
2182 for (r = 0; r < nn->dp.num_tx_rings; r++)
2183 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
2184 for (r = 0; r < nn->dp.num_r_vecs; r++)
2185 nfp_net_vec_clear_ring_data(nn, r);
2187 nn->dp.ctrl = new_ctrl;
2191 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
2192 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
2194 /* Write the DMA address, size and MSI-X info to the device */
2195 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
2196 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
2197 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
2201 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
2202 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
2204 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
2205 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
2206 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
2210 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
2211 * @nn: NFP Net device to reconfigure
2213 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
2215 u32 bufsz, new_ctrl, update = 0;
2219 new_ctrl = nn->dp.ctrl;
2221 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
2222 nfp_net_rss_write_key(nn);
2223 nfp_net_rss_write_itbl(nn);
2224 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
2225 update |= NFP_NET_CFG_UPDATE_RSS;
2228 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
2229 nfp_net_coalesce_write_cfg(nn);
2230 update |= NFP_NET_CFG_UPDATE_IRQMOD;
2233 for (r = 0; r < nn->dp.num_tx_rings; r++)
2234 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
2235 for (r = 0; r < nn->dp.num_rx_rings; r++)
2236 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
2238 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
2239 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
2241 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
2242 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
2244 nfp_net_write_mac_addr(nn);
2246 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.netdev->mtu);
2248 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
2249 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
2252 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
2253 update |= NFP_NET_CFG_UPDATE_GEN;
2254 update |= NFP_NET_CFG_UPDATE_MSIX;
2255 update |= NFP_NET_CFG_UPDATE_RING;
2256 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2257 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
2259 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2260 err = nfp_net_reconfig(nn, update);
2262 nfp_net_clear_config_and_disable(nn);
2266 nn->dp.ctrl = new_ctrl;
2268 for (r = 0; r < nn->dp.num_rx_rings; r++)
2269 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
2271 /* Since reconfiguration requests while NFP is down are ignored we
2272 * have to wipe the entire VXLAN configuration and reinitialize it.
2274 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
2275 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
2276 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
2277 udp_tunnel_get_rx_info(nn->dp.netdev);
2284 * nfp_net_open_stack() - Start the device from stack's perspective
2285 * @nn: NFP Net device to reconfigure
2287 static void nfp_net_open_stack(struct nfp_net *nn)
2291 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2292 napi_enable(&nn->r_vecs[r].napi);
2293 enable_irq(nn->r_vecs[r].irq_vector);
2296 netif_tx_wake_all_queues(nn->dp.netdev);
2298 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2299 nfp_net_read_link_status(nn);
2302 static int nfp_net_netdev_open(struct net_device *netdev)
2304 struct nfp_net *nn = netdev_priv(netdev);
2307 /* Step 1: Allocate resources for rings and the like
2308 * - Request interrupts
2309 * - Allocate RX and TX ring resources
2310 * - Setup initial RSS table
2312 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2313 nn->exn_name, sizeof(nn->exn_name),
2314 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2317 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2318 nn->lsc_name, sizeof(nn->lsc_name),
2319 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2322 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2324 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2325 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2327 goto err_cleanup_vec_p;
2330 err = nfp_net_rx_rings_prepare(nn, &nn->dp);
2332 goto err_cleanup_vec;
2334 err = nfp_net_tx_rings_prepare(nn, &nn->dp);
2336 goto err_free_rx_rings;
2338 for (r = 0; r < nn->max_r_vecs; r++)
2339 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2341 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
2343 goto err_free_rings;
2345 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
2347 goto err_free_rings;
2349 /* Step 2: Configure the NFP
2350 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2351 * - Write MAC address (in case it changed)
2353 * - Set the Freelist buffer size
2356 err = nfp_net_set_config_and_enable(nn);
2358 goto err_free_rings;
2360 /* Step 3: Enable for kernel
2361 * - put some freelist descriptors on each RX ring
2362 * - enable NAPI on each ring
2363 * - enable all TX queues
2366 nfp_net_open_stack(nn);
2371 nfp_net_tx_rings_free(&nn->dp);
2373 nfp_net_rx_rings_free(&nn->dp);
2375 r = nn->dp.num_r_vecs;
2378 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2379 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2381 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2386 * nfp_net_close_stack() - Quiescent the stack (part of close)
2387 * @nn: NFP Net device to reconfigure
2389 static void nfp_net_close_stack(struct nfp_net *nn)
2393 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2394 netif_carrier_off(nn->dp.netdev);
2395 nn->link_up = false;
2397 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2398 disable_irq(nn->r_vecs[r].irq_vector);
2399 napi_disable(&nn->r_vecs[r].napi);
2402 netif_tx_disable(nn->dp.netdev);
2406 * nfp_net_close_free_all() - Free all runtime resources
2407 * @nn: NFP Net device to reconfigure
2409 static void nfp_net_close_free_all(struct nfp_net *nn)
2413 for (r = 0; r < nn->dp.num_rx_rings; r++) {
2414 nfp_net_rx_ring_bufs_free(&nn->dp, &nn->dp.rx_rings[r]);
2415 nfp_net_rx_ring_free(&nn->dp.rx_rings[r]);
2417 for (r = 0; r < nn->dp.num_tx_rings; r++) {
2418 nfp_net_tx_ring_bufs_free(&nn->dp, &nn->dp.tx_rings[r]);
2419 nfp_net_tx_ring_free(&nn->dp.tx_rings[r]);
2421 for (r = 0; r < nn->dp.num_r_vecs; r++)
2422 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2424 kfree(nn->dp.rx_rings);
2425 kfree(nn->dp.tx_rings);
2427 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2428 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2432 * nfp_net_netdev_close() - Called when the device is downed
2433 * @netdev: netdev structure
2435 static int nfp_net_netdev_close(struct net_device *netdev)
2437 struct nfp_net *nn = netdev_priv(netdev);
2439 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2441 nfp_net_close_stack(nn);
2445 nfp_net_clear_config_and_disable(nn);
2447 /* Step 3: Free resources
2449 nfp_net_close_free_all(nn);
2451 nn_dbg(nn, "%s down", netdev->name);
2455 static void nfp_net_set_rx_mode(struct net_device *netdev)
2457 struct nfp_net *nn = netdev_priv(netdev);
2460 new_ctrl = nn->dp.ctrl;
2462 if (netdev->flags & IFF_PROMISC) {
2463 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2464 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2466 nn_warn(nn, "FW does not support promiscuous mode\n");
2468 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2471 if (new_ctrl == nn->dp.ctrl)
2474 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2475 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2477 nn->dp.ctrl = new_ctrl;
2480 static void nfp_net_rss_init_itbl(struct nfp_net *nn)
2484 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2486 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
2489 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
2491 struct nfp_net_dp new_dp = *dp;
2496 nn->dp.netdev->mtu = new_dp.mtu;
2498 if (!netif_is_rxfh_configured(nn->dp.netdev))
2499 nfp_net_rss_init_itbl(nn);
2502 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
2507 nfp_net_dp_swap(nn, dp);
2509 for (r = 0; r < nn->max_r_vecs; r++)
2510 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2512 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
2516 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
2517 err = netif_set_real_num_tx_queues(nn->dp.netdev,
2518 nn->dp.num_stack_tx_rings);
2523 return nfp_net_set_config_and_enable(nn);
2526 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
2528 struct nfp_net_dp *new;
2530 new = kmalloc(sizeof(*new), GFP_KERNEL);
2536 /* Clear things which need to be recomputed */
2538 new->tx_rings = NULL;
2539 new->rx_rings = NULL;
2540 new->num_r_vecs = 0;
2541 new->num_stack_tx_rings = 0;
2547 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
2548 struct netlink_ext_ack *extack)
2550 /* XDP-enabled tests */
2553 if (dp->fl_bufsz > PAGE_SIZE) {
2554 NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
2557 if (dp->num_tx_rings > nn->max_tx_rings) {
2558 NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
2565 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
2566 struct netlink_ext_ack *extack)
2570 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
2572 dp->num_stack_tx_rings = dp->num_tx_rings;
2574 dp->num_stack_tx_rings -= dp->num_rx_rings;
2576 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
2578 err = nfp_net_check_config(nn, dp, extack);
2582 if (!netif_running(dp->netdev)) {
2583 nfp_net_dp_swap(nn, dp);
2588 /* Prepare new rings */
2589 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
2590 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2593 goto err_cleanup_vecs;
2597 err = nfp_net_rx_rings_prepare(nn, dp);
2599 goto err_cleanup_vecs;
2601 err = nfp_net_tx_rings_prepare(nn, dp);
2605 /* Stop device, swap in new rings, try to start the firmware */
2606 nfp_net_close_stack(nn);
2607 nfp_net_clear_config_and_disable(nn);
2609 err = nfp_net_dp_swap_enable(nn, dp);
2613 nfp_net_clear_config_and_disable(nn);
2615 /* Try with old configuration and old rings */
2616 err2 = nfp_net_dp_swap_enable(nn, dp);
2618 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
2621 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
2622 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2624 nfp_net_rx_rings_free(dp);
2625 nfp_net_tx_rings_free(dp);
2627 nfp_net_open_stack(nn);
2634 nfp_net_rx_rings_free(dp);
2636 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
2637 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2642 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
2644 struct nfp_net *nn = netdev_priv(netdev);
2645 struct nfp_net_dp *dp;
2647 dp = nfp_net_clone_dp(nn);
2653 return nfp_net_ring_reconfig(nn, dp, NULL);
2656 static void nfp_net_stat64(struct net_device *netdev,
2657 struct rtnl_link_stats64 *stats)
2659 struct nfp_net *nn = netdev_priv(netdev);
2662 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2663 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
2668 start = u64_stats_fetch_begin(&r_vec->rx_sync);
2669 data[0] = r_vec->rx_pkts;
2670 data[1] = r_vec->rx_bytes;
2671 data[2] = r_vec->rx_drops;
2672 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
2673 stats->rx_packets += data[0];
2674 stats->rx_bytes += data[1];
2675 stats->rx_dropped += data[2];
2678 start = u64_stats_fetch_begin(&r_vec->tx_sync);
2679 data[0] = r_vec->tx_pkts;
2680 data[1] = r_vec->tx_bytes;
2681 data[2] = r_vec->tx_errors;
2682 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
2683 stats->tx_packets += data[0];
2684 stats->tx_bytes += data[1];
2685 stats->tx_errors += data[2];
2689 static bool nfp_net_ebpf_capable(struct nfp_net *nn)
2691 if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
2692 nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
2698 nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
2699 struct tc_to_netdev *tc)
2701 struct nfp_net *nn = netdev_priv(netdev);
2703 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
2705 if (proto != htons(ETH_P_ALL))
2708 if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
2709 if (!nn->dp.bpf_offload_xdp)
2710 return nfp_net_bpf_offload(nn, tc->cls_bpf);
2718 static int nfp_net_set_features(struct net_device *netdev,
2719 netdev_features_t features)
2721 netdev_features_t changed = netdev->features ^ features;
2722 struct nfp_net *nn = netdev_priv(netdev);
2726 /* Assume this is not called with features we have not advertised */
2728 new_ctrl = nn->dp.ctrl;
2730 if (changed & NETIF_F_RXCSUM) {
2731 if (features & NETIF_F_RXCSUM)
2732 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
2734 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
2737 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2738 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
2739 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2741 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
2744 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
2745 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
2746 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
2747 NFP_NET_CFG_CTRL_LSO;
2749 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
2752 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2753 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2754 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2756 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
2759 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
2760 if (features & NETIF_F_HW_VLAN_CTAG_TX)
2761 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2763 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
2766 if (changed & NETIF_F_SG) {
2767 if (features & NETIF_F_SG)
2768 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
2770 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
2773 if (changed & NETIF_F_HW_TC && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
2774 nn_err(nn, "Cannot disable HW TC offload while in use\n");
2778 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
2779 netdev->features, features, changed);
2781 if (new_ctrl == nn->dp.ctrl)
2784 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
2785 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2786 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
2790 nn->dp.ctrl = new_ctrl;
2795 static netdev_features_t
2796 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
2797 netdev_features_t features)
2801 /* We can't do TSO over double tagged packets (802.1AD) */
2802 features &= vlan_features_check(skb, features);
2804 if (!skb->encapsulation)
2807 /* Ensure that inner L4 header offset fits into TX descriptor field */
2808 if (skb_is_gso(skb)) {
2811 hdrlen = skb_inner_transport_header(skb) - skb->data +
2812 inner_tcp_hdrlen(skb);
2814 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
2815 features &= ~NETIF_F_GSO_MASK;
2818 /* VXLAN/GRE check */
2819 switch (vlan_get_protocol(skb)) {
2820 case htons(ETH_P_IP):
2821 l4_hdr = ip_hdr(skb)->protocol;
2823 case htons(ETH_P_IPV6):
2824 l4_hdr = ipv6_hdr(skb)->nexthdr;
2827 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2830 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
2831 skb->inner_protocol != htons(ETH_P_TEB) ||
2832 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
2833 (l4_hdr == IPPROTO_UDP &&
2834 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
2835 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
2836 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2842 nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
2844 struct nfp_net *nn = netdev_priv(netdev);
2850 if (!nn->eth_port->is_split)
2851 err = snprintf(name, len, "p%d", nn->eth_port->label_port);
2853 err = snprintf(name, len, "p%ds%d", nn->eth_port->label_port,
2854 nn->eth_port->label_subport);
2862 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
2863 * @nn: NFP Net device to reconfigure
2864 * @idx: Index into the port table where new port should be written
2865 * @port: UDP port to configure (pass zero to remove VXLAN port)
2867 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
2871 nn->vxlan_ports[idx] = port;
2873 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
2876 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
2877 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
2878 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2879 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
2880 be16_to_cpu(nn->vxlan_ports[i]));
2882 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
2886 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
2887 * @nn: NFP Network structure
2888 * @port: UDP port to look for
2890 * Return: if the port is already in the table -- it's position;
2891 * if the port is not in the table -- free position to use;
2892 * if the table is full -- -ENOSPC.
2894 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
2896 int i, free_idx = -ENOSPC;
2898 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
2899 if (nn->vxlan_ports[i] == port)
2901 if (!nn->vxlan_usecnt[i])
2908 static void nfp_net_add_vxlan_port(struct net_device *netdev,
2909 struct udp_tunnel_info *ti)
2911 struct nfp_net *nn = netdev_priv(netdev);
2914 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2917 idx = nfp_net_find_vxlan_idx(nn, ti->port);
2921 if (!nn->vxlan_usecnt[idx]++)
2922 nfp_net_set_vxlan_port(nn, idx, ti->port);
2925 static void nfp_net_del_vxlan_port(struct net_device *netdev,
2926 struct udp_tunnel_info *ti)
2928 struct nfp_net *nn = netdev_priv(netdev);
2931 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2934 idx = nfp_net_find_vxlan_idx(nn, ti->port);
2935 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
2938 if (!--nn->vxlan_usecnt[idx])
2939 nfp_net_set_vxlan_port(nn, idx, 0);
2942 static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
2944 struct tc_cls_bpf_offload cmd = {
2949 if (!nfp_net_ebpf_capable(nn))
2952 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
2953 if (!nn->dp.bpf_offload_xdp)
2954 return prog ? -EBUSY : 0;
2955 cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
2959 cmd.command = TC_CLSBPF_ADD;
2962 ret = nfp_net_bpf_offload(nn, &cmd);
2963 /* Stop offload if replace not possible */
2964 if (ret && cmd.command == TC_CLSBPF_REPLACE)
2965 nfp_net_xdp_offload(nn, NULL);
2966 nn->dp.bpf_offload_xdp = prog && !ret;
2970 static int nfp_net_xdp_setup(struct nfp_net *nn, struct netdev_xdp *xdp)
2972 struct bpf_prog *old_prog = nn->dp.xdp_prog;
2973 struct bpf_prog *prog = xdp->prog;
2974 struct nfp_net_dp *dp;
2977 if (!prog && !nn->dp.xdp_prog)
2979 if (prog && nn->dp.xdp_prog) {
2980 prog = xchg(&nn->dp.xdp_prog, prog);
2982 nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
2986 dp = nfp_net_clone_dp(nn);
2990 dp->xdp_prog = prog;
2991 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
2992 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2993 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
2995 /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
2996 err = nfp_net_ring_reconfig(nn, dp, xdp->extack);
3001 bpf_prog_put(old_prog);
3003 nfp_net_xdp_offload(nn, nn->dp.xdp_prog);
3008 static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
3010 struct nfp_net *nn = netdev_priv(netdev);
3012 switch (xdp->command) {
3013 case XDP_SETUP_PROG:
3014 return nfp_net_xdp_setup(nn, xdp);
3015 case XDP_QUERY_PROG:
3016 xdp->prog_attached = !!nn->dp.xdp_prog;
3023 static const struct net_device_ops nfp_net_netdev_ops = {
3024 .ndo_open = nfp_net_netdev_open,
3025 .ndo_stop = nfp_net_netdev_close,
3026 .ndo_start_xmit = nfp_net_tx,
3027 .ndo_get_stats64 = nfp_net_stat64,
3028 .ndo_setup_tc = nfp_net_setup_tc,
3029 .ndo_tx_timeout = nfp_net_tx_timeout,
3030 .ndo_set_rx_mode = nfp_net_set_rx_mode,
3031 .ndo_change_mtu = nfp_net_change_mtu,
3032 .ndo_set_mac_address = eth_mac_addr,
3033 .ndo_set_features = nfp_net_set_features,
3034 .ndo_features_check = nfp_net_features_check,
3035 .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
3036 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
3037 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
3038 .ndo_xdp = nfp_net_xdp,
3042 * nfp_net_info() - Print general info about the NIC
3043 * @nn: NFP Net device to reconfigure
3045 void nfp_net_info(struct nfp_net *nn)
3047 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
3048 nn->dp.is_vf ? "VF " : "",
3049 nn->dp.num_tx_rings, nn->max_tx_rings,
3050 nn->dp.num_rx_rings, nn->max_rx_rings);
3051 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
3052 nn->fw_ver.resv, nn->fw_ver.class,
3053 nn->fw_ver.major, nn->fw_ver.minor,
3055 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
3057 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
3058 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
3059 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
3060 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
3061 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
3062 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
3063 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
3064 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
3065 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
3066 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
3067 nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
3068 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
3069 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
3070 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
3071 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
3072 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
3073 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
3074 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
3075 nfp_net_ebpf_capable(nn) ? "BPF " : "",
3076 nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
3077 "RXCSUM_COMPLETE " : "");
3081 * nfp_net_netdev_alloc() - Allocate netdev and related structure
3083 * @max_tx_rings: Maximum number of TX rings supported by device
3084 * @max_rx_rings: Maximum number of RX rings supported by device
3086 * This function allocates a netdev device and fills in the initial
3087 * part of the @struct nfp_net structure.
3089 * Return: NFP Net device structure, or ERR_PTR on error.
3091 struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
3092 unsigned int max_tx_rings,
3093 unsigned int max_rx_rings)
3095 struct net_device *netdev;
3098 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
3099 max_tx_rings, max_rx_rings);
3101 return ERR_PTR(-ENOMEM);
3103 SET_NETDEV_DEV(netdev, &pdev->dev);
3104 nn = netdev_priv(netdev);
3106 nn->dp.netdev = netdev;
3107 nn->dp.dev = &pdev->dev;
3110 nn->max_tx_rings = max_tx_rings;
3111 nn->max_rx_rings = max_rx_rings;
3113 nn->dp.num_tx_rings = min_t(unsigned int,
3114 max_tx_rings, num_online_cpus());
3115 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
3116 netif_get_num_default_rss_queues());
3118 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
3119 nn->dp.num_r_vecs = min_t(unsigned int,
3120 nn->dp.num_r_vecs, num_online_cpus());
3122 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
3123 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
3125 spin_lock_init(&nn->reconfig_lock);
3126 spin_lock_init(&nn->rx_filter_lock);
3127 spin_lock_init(&nn->link_status_lock);
3129 setup_timer(&nn->reconfig_timer,
3130 nfp_net_reconfig_timer, (unsigned long)nn);
3131 setup_timer(&nn->rx_filter_stats_timer,
3132 nfp_net_filter_stats_timer, (unsigned long)nn);
3138 * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
3139 * @nn: NFP Net device to reconfigure
3141 void nfp_net_netdev_free(struct nfp_net *nn)
3143 free_netdev(nn->dp.netdev);
3147 * nfp_net_rss_key_sz() - Get current size of the RSS key
3148 * @nn: NFP Net device instance
3150 * Return: size of the RSS key for currently selected hash function.
3152 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
3154 switch (nn->rss_hfunc) {
3155 case ETH_RSS_HASH_TOP:
3156 return NFP_NET_CFG_RSS_KEY_SZ;
3157 case ETH_RSS_HASH_XOR:
3159 case ETH_RSS_HASH_CRC32:
3163 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
3168 * nfp_net_rss_init() - Set the initial RSS parameters
3169 * @nn: NFP Net device to reconfigure
3171 static void nfp_net_rss_init(struct nfp_net *nn)
3173 unsigned long func_bit, rss_cap_hfunc;
3176 /* Read the RSS function capability and select first supported func */
3177 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
3178 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
3180 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
3181 NFP_NET_CFG_RSS_TOEPLITZ);
3183 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
3184 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
3185 dev_warn(nn->dp.dev,
3186 "Bad RSS config, defaulting to Toeplitz hash\n");
3187 func_bit = ETH_RSS_HASH_TOP_BIT;
3189 nn->rss_hfunc = 1 << func_bit;
3191 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
3193 nfp_net_rss_init_itbl(nn);
3195 /* Enable IPv4/IPv6 TCP by default */
3196 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
3197 NFP_NET_CFG_RSS_IPV6_TCP |
3198 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
3199 NFP_NET_CFG_RSS_MASK;
3203 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
3204 * @nn: NFP Net device to reconfigure
3206 static void nfp_net_irqmod_init(struct nfp_net *nn)
3208 nn->rx_coalesce_usecs = 50;
3209 nn->rx_coalesce_max_frames = 64;
3210 nn->tx_coalesce_usecs = 50;
3211 nn->tx_coalesce_max_frames = 64;
3215 * nfp_net_netdev_init() - Initialise/finalise the netdev structure
3216 * @netdev: netdev structure
3218 * Return: 0 on success or negative errno on error.
3220 int nfp_net_netdev_init(struct net_device *netdev)
3222 struct nfp_net *nn = netdev_priv(netdev);
3225 nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
3227 /* Get some of the read-only fields from the BAR */
3228 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
3229 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
3231 /* Chained metadata is signalled by capabilities except in version 4 */
3232 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
3233 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
3234 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
3235 nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
3237 nfp_net_write_mac_addr(nn);
3239 /* Determine RX packet/metadata boundary offset */
3240 if (nn->fw_ver.major >= 2) {
3243 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
3244 if (reg > NFP_NET_MAX_PREPEND) {
3245 nn_err(nn, "Invalid rx offset: %d\n", reg);
3248 nn->dp.rx_offset = reg;
3250 nn->dp.rx_offset = NFP_NET_RX_OFFSET;
3253 /* Set default MTU and Freelist buffer size */
3254 if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
3255 netdev->mtu = nn->max_mtu;
3257 netdev->mtu = NFP_NET_DEFAULT_MTU;
3258 nn->dp.mtu = netdev->mtu;
3259 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
3261 /* Advertise/enable offloads based on capabilities
3263 * Note: netdev->features show the currently enabled features
3264 * and netdev->hw_features advertises which features are
3265 * supported. By default we enable most features.
3267 netdev->hw_features = NETIF_F_HIGHDMA;
3268 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
3269 netdev->hw_features |= NETIF_F_RXCSUM;
3270 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3272 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
3273 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3274 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3276 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
3277 netdev->hw_features |= NETIF_F_SG;
3278 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
3280 if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
3281 nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3282 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3283 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3284 NFP_NET_CFG_CTRL_LSO;
3286 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
3287 netdev->hw_features |= NETIF_F_RXHASH;
3288 nfp_net_rss_init(nn);
3289 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
3290 NFP_NET_CFG_CTRL_RSS;
3292 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
3293 nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
3294 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3295 netdev->hw_features |= NETIF_F_GSO_GRE |
3296 NETIF_F_GSO_UDP_TUNNEL;
3297 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
3299 netdev->hw_enc_features = netdev->hw_features;
3302 netdev->vlan_features = netdev->hw_features;
3304 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
3305 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3306 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3308 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
3309 if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3310 nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
3312 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
3313 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3317 netdev->features = netdev->hw_features;
3319 if (nfp_net_ebpf_capable(nn))
3320 netdev->hw_features |= NETIF_F_HW_TC;
3322 /* Advertise but disable TSO by default. */
3323 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3324 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3326 /* Allow L2 Broadcast and Multicast through by default, if supported */
3327 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
3328 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
3329 if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
3330 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
3332 /* Allow IRQ moderation, if supported */
3333 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
3334 nfp_net_irqmod_init(nn);
3335 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
3338 /* Stash the re-configuration queue away. First odd queue in TX Bar */
3339 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
3341 /* Make sure the FW knows the netdev is supposed to be disabled here */
3342 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
3343 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
3344 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
3345 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
3346 NFP_NET_CFG_UPDATE_GEN);
3350 /* Finalise the netdev setup */
3351 netdev->netdev_ops = &nfp_net_netdev_ops;
3352 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
3354 /* MTU range: 68 - hw-specific max */
3355 netdev->min_mtu = ETH_MIN_MTU;
3356 netdev->max_mtu = nn->max_mtu;
3358 netif_carrier_off(netdev);
3360 nfp_net_set_ethtool_ops(netdev);
3361 nfp_net_vecs_init(netdev);
3363 return register_netdev(netdev);
3367 * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
3368 * @netdev: netdev structure
3370 void nfp_net_netdev_clean(struct net_device *netdev)
3372 struct nfp_net *nn = netdev_priv(netdev);
3374 unregister_netdev(nn->dp.netdev);
3376 if (nn->dp.xdp_prog)
3377 bpf_prog_put(nn->dp.xdp_prog);
3378 if (nn->dp.bpf_offload_xdp)
3379 nfp_net_xdp_offload(nn, NULL);