1 /* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 #include <linux/types.h>
22 #include <linux/module.h>
26 #include <linux/if_macvlan.h>
27 #include <linux/prefetch.h>
31 #define DRV_VERSION "0.12.2-k"
32 const char fm10k_driver_version[] = DRV_VERSION;
33 char fm10k_driver_name[] = "fm10k";
34 static const char fm10k_driver_string[] =
35 "Intel(R) Ethernet Switch Host Interface Driver";
36 static const char fm10k_copyright[] =
37 "Copyright (c) 2013 Intel Corporation.";
39 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
40 MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_VERSION(DRV_VERSION);
45 * fm10k_init_module - Driver Registration Routine
47 * fm10k_init_module is the first routine called when the driver is
48 * loaded. All it does is register with the PCI subsystem.
50 static int __init fm10k_init_module(void)
52 pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version);
53 pr_info("%s\n", fm10k_copyright);
57 return fm10k_register_pci_driver();
59 module_init(fm10k_init_module);
62 * fm10k_exit_module - Driver Exit Cleanup Routine
64 * fm10k_exit_module is called just before the driver is removed
67 static void __exit fm10k_exit_module(void)
69 fm10k_unregister_pci_driver();
73 module_exit(fm10k_exit_module);
75 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
76 struct fm10k_rx_buffer *bi)
78 struct page *page = bi->page;
81 /* Only page will be NULL if buffer was consumed */
85 /* alloc new page for storage */
86 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
87 if (unlikely(!page)) {
88 rx_ring->rx_stats.alloc_failed++;
92 /* map page for use */
93 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
95 /* if mapping failed free memory back to system since
96 * there isn't much point in holding memory we can't use
98 if (dma_mapping_error(rx_ring->dev, dma)) {
102 rx_ring->rx_stats.alloc_failed++;
114 * fm10k_alloc_rx_buffers - Replace used receive buffers
115 * @rx_ring: ring to place buffers on
116 * @cleaned_count: number of buffers to replace
118 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
120 union fm10k_rx_desc *rx_desc;
121 struct fm10k_rx_buffer *bi;
122 u16 i = rx_ring->next_to_use;
128 rx_desc = FM10K_RX_DESC(rx_ring, i);
129 bi = &rx_ring->rx_buffer[i];
133 if (!fm10k_alloc_mapped_page(rx_ring, bi))
136 /* Refresh the desc even if buffer_addrs didn't change
137 * because each write-back erases this info.
139 rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
145 rx_desc = FM10K_RX_DESC(rx_ring, 0);
146 bi = rx_ring->rx_buffer;
150 /* clear the hdr_addr for the next_to_use descriptor */
151 rx_desc->q.hdr_addr = 0;
154 } while (cleaned_count);
158 if (rx_ring->next_to_use != i) {
159 /* record the next descriptor to use */
160 rx_ring->next_to_use = i;
162 /* update next to alloc since we have filled the ring */
163 rx_ring->next_to_alloc = i;
165 /* Force memory writes to complete before letting h/w
166 * know there are new descriptors to fetch. (Only
167 * applicable for weak-ordered memory model archs,
172 /* notify hardware of new descriptors */
173 writel(i, rx_ring->tail);
178 * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
179 * @rx_ring: rx descriptor ring to store buffers on
180 * @old_buff: donor buffer to have page reused
182 * Synchronizes page for reuse by the interface
184 static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
185 struct fm10k_rx_buffer *old_buff)
187 struct fm10k_rx_buffer *new_buff;
188 u16 nta = rx_ring->next_to_alloc;
190 new_buff = &rx_ring->rx_buffer[nta];
192 /* update, and store next to alloc */
194 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
196 /* transfer page from old buffer to new buffer */
197 memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer));
199 /* sync the buffer for use by the device */
200 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
201 old_buff->page_offset,
206 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
208 unsigned int truesize)
210 /* avoid re-using remote pages */
211 if (unlikely(page_to_nid(page) != numa_mem_id()))
214 #if (PAGE_SIZE < 8192)
215 /* if we are only owner of page we can reuse it */
216 if (unlikely(page_count(page) != 1))
219 /* flip page offset to other buffer */
220 rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
222 /* since we are the only owner of the page and we need to
223 * increment it, just set the value to 2 in order to avoid
224 * an unnecessary locked operation
226 atomic_set(&page->_count, 2);
228 /* move offset up to the next cache line */
229 rx_buffer->page_offset += truesize;
231 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
234 /* bump ref count on page before it is given to the stack */
242 * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
243 * @rx_ring: rx descriptor ring to transact packets on
244 * @rx_buffer: buffer containing page to add
245 * @rx_desc: descriptor containing length of buffer written by hardware
246 * @skb: sk_buff to place the data into
248 * This function will add the data contained in rx_buffer->page to the skb.
249 * This is done either through a direct copy if the data in the buffer is
250 * less than the skb header size, otherwise it will just attach the page as
253 * The function will then update the page offset if necessary and return
254 * true if the buffer can be reused by the interface.
256 static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring,
257 struct fm10k_rx_buffer *rx_buffer,
258 union fm10k_rx_desc *rx_desc,
261 struct page *page = rx_buffer->page;
262 unsigned int size = le16_to_cpu(rx_desc->w.length);
263 #if (PAGE_SIZE < 8192)
264 unsigned int truesize = FM10K_RX_BUFSZ;
266 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
269 if ((size <= FM10K_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
270 unsigned char *va = page_address(page) + rx_buffer->page_offset;
272 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
274 /* we can reuse buffer as-is, just make sure it is local */
275 if (likely(page_to_nid(page) == numa_mem_id()))
278 /* this page cannot be reused so discard it */
283 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
284 rx_buffer->page_offset, size, truesize);
286 return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
289 static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
290 union fm10k_rx_desc *rx_desc,
293 struct fm10k_rx_buffer *rx_buffer;
296 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
298 page = rx_buffer->page;
302 void *page_addr = page_address(page) +
303 rx_buffer->page_offset;
305 /* prefetch first cache line of first page */
307 #if L1_CACHE_BYTES < 128
308 prefetch(page_addr + L1_CACHE_BYTES);
311 /* allocate a skb to store the frags */
312 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
314 if (unlikely(!skb)) {
315 rx_ring->rx_stats.alloc_failed++;
319 /* we will be copying header into skb->data in
320 * pskb_may_pull so it is in our interest to prefetch
321 * it now to avoid a possible cache miss
323 prefetchw(skb->data);
326 /* we are reusing so sync this buffer for CPU use */
327 dma_sync_single_range_for_cpu(rx_ring->dev,
329 rx_buffer->page_offset,
333 /* pull page into skb */
334 if (fm10k_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
335 /* hand second half of page back to the ring */
336 fm10k_reuse_rx_page(rx_ring, rx_buffer);
338 /* we are not reusing the buffer so unmap it */
339 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
340 PAGE_SIZE, DMA_FROM_DEVICE);
343 /* clear contents of rx_buffer */
344 rx_buffer->page = NULL;
349 static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
350 union fm10k_rx_desc *rx_desc,
353 skb_checksum_none_assert(skb);
355 /* Rx checksum disabled via ethtool */
356 if (!(ring->netdev->features & NETIF_F_RXCSUM))
359 /* TCP/UDP checksum error bit is set */
360 if (fm10k_test_staterr(rx_desc,
361 FM10K_RXD_STATUS_L4E |
362 FM10K_RXD_STATUS_L4E2 |
363 FM10K_RXD_STATUS_IPE |
364 FM10K_RXD_STATUS_IPE2)) {
365 ring->rx_stats.csum_err++;
369 /* It must be a TCP or UDP packet with a valid checksum */
370 if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2))
371 skb->encapsulation = true;
372 else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS))
375 skb->ip_summed = CHECKSUM_UNNECESSARY;
378 #define FM10K_RSS_L4_TYPES_MASK \
379 ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \
380 (1ul << FM10K_RSSTYPE_IPV4_UDP) | \
381 (1ul << FM10K_RSSTYPE_IPV6_TCP) | \
382 (1ul << FM10K_RSSTYPE_IPV6_UDP))
384 static inline void fm10k_rx_hash(struct fm10k_ring *ring,
385 union fm10k_rx_desc *rx_desc,
390 if (!(ring->netdev->features & NETIF_F_RXHASH))
393 rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK;
397 skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
398 (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
399 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
402 static void fm10k_type_trans(struct fm10k_ring *rx_ring,
403 union fm10k_rx_desc *rx_desc,
406 struct net_device *dev = rx_ring->netdev;
407 struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel);
409 /* check to see if DGLORT belongs to a MACVLAN */
411 u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1;
413 idx -= l2_accel->dglort;
414 if (idx < l2_accel->size && l2_accel->macvlan[idx])
415 dev = l2_accel->macvlan[idx];
420 skb->protocol = eth_type_trans(skb, dev);
425 /* update MACVLAN statistics */
426 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1,
427 !!(rx_desc->w.hdr_info &
428 cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK)));
432 * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
433 * @rx_ring: rx descriptor ring packet is being transacted on
434 * @rx_desc: pointer to the EOP Rx descriptor
435 * @skb: pointer to current skb being populated
437 * This function checks the ring, descriptor, and packet information in
438 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
439 * other fields within the skb.
441 static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
442 union fm10k_rx_desc *rx_desc,
445 unsigned int len = skb->len;
447 fm10k_rx_hash(rx_ring, rx_desc, skb);
449 fm10k_rx_checksum(rx_ring, rx_desc, skb);
451 FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
453 skb_record_rx_queue(skb, rx_ring->queue_index);
455 FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
457 if (rx_desc->w.vlan) {
458 u16 vid = le16_to_cpu(rx_desc->w.vlan);
460 if (vid != rx_ring->vid)
461 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
464 fm10k_type_trans(rx_ring, rx_desc, skb);
470 * fm10k_is_non_eop - process handling of non-EOP buffers
471 * @rx_ring: Rx ring being processed
472 * @rx_desc: Rx descriptor for current buffer
474 * This function updates next to clean. If the buffer is an EOP buffer
475 * this function exits returning false, otherwise it will place the
476 * sk_buff in the next buffer to be chained and return true indicating
477 * that this is in fact a non-EOP buffer.
479 static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
480 union fm10k_rx_desc *rx_desc)
482 u32 ntc = rx_ring->next_to_clean + 1;
484 /* fetch, update, and store next to clean */
485 ntc = (ntc < rx_ring->count) ? ntc : 0;
486 rx_ring->next_to_clean = ntc;
488 prefetch(FM10K_RX_DESC(rx_ring, ntc));
490 if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP)))
497 * fm10k_pull_tail - fm10k specific version of skb_pull_tail
498 * @rx_ring: rx descriptor ring packet is being transacted on
499 * @rx_desc: pointer to the EOP Rx descriptor
500 * @skb: pointer to current skb being adjusted
502 * This function is an fm10k specific version of __pskb_pull_tail. The
503 * main difference between this version and the original function is that
504 * this function can make several assumptions about the state of things
505 * that allow for significant optimizations versus the standard function.
506 * As a result we can do things like drop a frag and maintain an accurate
507 * truesize for the skb.
509 static void fm10k_pull_tail(struct fm10k_ring *rx_ring,
510 union fm10k_rx_desc *rx_desc,
513 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
515 unsigned int pull_len;
517 /* it is valid to use page_address instead of kmap since we are
518 * working with pages allocated out of the lomem pool per
519 * alloc_page(GFP_ATOMIC)
521 va = skb_frag_address(frag);
523 /* we need the header to contain the greater of either ETH_HLEN or
524 * 60 bytes if the skb->len is less than 60 for skb_pad.
526 pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
528 /* align pull length to size of long to optimize memcpy performance */
529 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
531 /* update all of the pointers */
532 skb_frag_size_sub(frag, pull_len);
533 frag->page_offset += pull_len;
534 skb->data_len -= pull_len;
535 skb->tail += pull_len;
539 * fm10k_cleanup_headers - Correct corrupted or empty headers
540 * @rx_ring: rx descriptor ring packet is being transacted on
541 * @rx_desc: pointer to the EOP Rx descriptor
542 * @skb: pointer to current skb being fixed
544 * Address the case where we are pulling data in on pages only
545 * and as such no data is present in the skb header.
547 * In addition if skb is not at least 60 bytes we need to pad it so that
548 * it is large enough to qualify as a valid Ethernet frame.
550 * Returns true if an error was encountered and skb was freed.
552 static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
553 union fm10k_rx_desc *rx_desc,
556 if (unlikely((fm10k_test_staterr(rx_desc,
557 FM10K_RXD_STATUS_RXE)))) {
558 dev_kfree_skb_any(skb);
559 rx_ring->rx_stats.errors++;
563 /* place header in linear portion of buffer */
564 if (skb_is_nonlinear(skb))
565 fm10k_pull_tail(rx_ring, rx_desc, skb);
567 /* if skb_pad returns an error the skb was freed */
568 if (unlikely(skb->len < 60)) {
569 int pad_len = 60 - skb->len;
571 if (skb_pad(skb, pad_len))
573 __skb_put(skb, pad_len);
580 * fm10k_receive_skb - helper function to handle rx indications
581 * @q_vector: structure containing interrupt and ring information
582 * @skb: packet to send up
584 static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
587 napi_gro_receive(&q_vector->napi, skb);
590 static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
591 struct fm10k_ring *rx_ring,
594 struct sk_buff *skb = rx_ring->skb;
595 unsigned int total_bytes = 0, total_packets = 0;
596 u16 cleaned_count = fm10k_desc_unused(rx_ring);
599 union fm10k_rx_desc *rx_desc;
601 /* return some buffers to hardware, one at a time is too slow */
602 if (cleaned_count >= FM10K_RX_BUFFER_WRITE) {
603 fm10k_alloc_rx_buffers(rx_ring, cleaned_count);
607 rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
609 if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_DD))
612 /* This memory barrier is needed to keep us from reading
613 * any other fields out of the rx_desc until we know the
614 * RXD_STATUS_DD bit is set
618 /* retrieve a buffer from the ring */
619 skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
621 /* exit if we failed to retrieve a buffer */
627 /* fetch next buffer in frame if non-eop */
628 if (fm10k_is_non_eop(rx_ring, rx_desc))
631 /* verify the packet layout is correct */
632 if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
637 /* populate checksum, timestamp, VLAN, and protocol */
638 total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb);
640 fm10k_receive_skb(q_vector, skb);
642 /* reset skb pointer */
645 /* update budget accounting */
647 } while (likely(total_packets < budget));
649 /* place incomplete frames back on ring for completion */
652 u64_stats_update_begin(&rx_ring->syncp);
653 rx_ring->stats.packets += total_packets;
654 rx_ring->stats.bytes += total_bytes;
655 u64_stats_update_end(&rx_ring->syncp);
656 q_vector->rx.total_packets += total_packets;
657 q_vector->rx.total_bytes += total_bytes;
659 return total_packets < budget;
662 #define VXLAN_HLEN (sizeof(struct udphdr) + 8)
663 static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
665 struct fm10k_intfc *interface = netdev_priv(skb->dev);
666 struct fm10k_vxlan_port *vxlan_port;
668 /* we can only offload a vxlan if we recognize it as such */
669 vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
670 struct fm10k_vxlan_port, list);
674 if (vxlan_port->port != udp_hdr(skb)->dest)
677 /* return offset of udp_hdr plus 8 bytes for VXLAN header */
678 return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN);
681 #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF)
682 #define NVGRE_TNI htons(0x2000)
683 struct fm10k_nvgre_hdr {
689 static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
691 struct fm10k_nvgre_hdr *nvgre_hdr;
692 int hlen = ip_hdrlen(skb);
694 /* currently only IPv4 is supported due to hlen above */
695 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
698 /* our transport header should be NVGRE */
699 nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen);
701 /* verify all reserved flags are 0 */
702 if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
705 /* verify protocol is transparent Ethernet bridging */
706 if (nvgre_hdr->proto != htons(ETH_P_TEB))
709 /* report start of ethernet header */
710 if (nvgre_hdr->flags & NVGRE_TNI)
711 return (struct ethhdr *)(nvgre_hdr + 1);
713 return (struct ethhdr *)(&nvgre_hdr->tni);
716 static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
718 struct ethhdr *eth_hdr;
721 switch (vlan_get_protocol(skb)) {
722 case htons(ETH_P_IP):
723 l4_hdr = ip_hdr(skb)->protocol;
725 case htons(ETH_P_IPV6):
726 l4_hdr = ipv6_hdr(skb)->nexthdr;
734 eth_hdr = fm10k_port_is_vxlan(skb);
737 eth_hdr = fm10k_gre_is_nvgre(skb);
746 switch (eth_hdr->h_proto) {
747 case htons(ETH_P_IP):
748 case htons(ETH_P_IPV6):
754 return eth_hdr->h_proto;
757 static int fm10k_tso(struct fm10k_ring *tx_ring,
758 struct fm10k_tx_buffer *first)
760 struct sk_buff *skb = first->skb;
761 struct fm10k_tx_desc *tx_desc;
765 if (skb->ip_summed != CHECKSUM_PARTIAL)
768 if (!skb_is_gso(skb))
771 /* compute header lengths */
772 if (skb->encapsulation) {
773 if (!fm10k_tx_encap_offload(skb))
775 th = skb_inner_transport_header(skb);
777 th = skb_transport_header(skb);
780 /* compute offset from SOF to transport header and add header len */
781 hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2);
783 first->tx_flags |= FM10K_TX_FLAGS_CSUM;
785 /* update gso size and bytecount with header size */
786 first->gso_segs = skb_shinfo(skb)->gso_segs;
787 first->bytecount += (first->gso_segs - 1) * hdrlen;
789 /* populate Tx descriptor header size and mss */
790 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
791 tx_desc->hdrlen = hdrlen;
792 tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
796 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
797 if (!net_ratelimit())
798 netdev_err(tx_ring->netdev,
799 "TSO requested for unsupported tunnel, disabling offload\n");
803 static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
804 struct fm10k_tx_buffer *first)
806 struct sk_buff *skb = first->skb;
807 struct fm10k_tx_desc *tx_desc;
810 struct ipv6hdr *ipv6;
816 if (skb->ip_summed != CHECKSUM_PARTIAL)
819 if (skb->encapsulation) {
820 protocol = fm10k_tx_encap_offload(skb);
822 if (skb_checksum_help(skb)) {
823 dev_warn(tx_ring->dev,
824 "failed to offload encap csum!\n");
825 tx_ring->tx_stats.csum_err++;
829 network_hdr.raw = skb_inner_network_header(skb);
831 protocol = vlan_get_protocol(skb);
832 network_hdr.raw = skb_network_header(skb);
836 case htons(ETH_P_IP):
837 l4_hdr = network_hdr.ipv4->protocol;
839 case htons(ETH_P_IPV6):
840 l4_hdr = network_hdr.ipv6->nexthdr;
843 if (unlikely(net_ratelimit())) {
844 dev_warn(tx_ring->dev,
845 "partial checksum but ip version=%x!\n",
848 tx_ring->tx_stats.csum_err++;
857 if (skb->encapsulation)
860 if (unlikely(net_ratelimit())) {
861 dev_warn(tx_ring->dev,
862 "partial checksum but l4 proto=%x!\n",
865 tx_ring->tx_stats.csum_err++;
869 /* update TX checksum flag */
870 first->tx_flags |= FM10K_TX_FLAGS_CSUM;
873 /* populate Tx descriptor header size and mss */
874 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
879 #define FM10K_SET_FLAG(_input, _flag, _result) \
880 ((_flag <= _result) ? \
881 ((u32)(_input & _flag) * (_result / _flag)) : \
882 ((u32)(_input & _flag) / (_flag / _result)))
884 static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
886 /* set type for advanced descriptor with frame checksum insertion */
889 /* set checksum offload bits */
890 desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
891 FM10K_TXD_FLAG_CSUM);
896 static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
897 struct fm10k_tx_desc *tx_desc, u16 i,
898 dma_addr_t dma, unsigned int size, u8 desc_flags)
900 /* set RS and INT for last frame in a cache line */
901 if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0)
902 desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT;
904 /* record values to descriptor */
905 tx_desc->buffer_addr = cpu_to_le64(dma);
906 tx_desc->flags = desc_flags;
907 tx_desc->buflen = cpu_to_le16(size);
909 /* return true if we just wrapped the ring */
910 return i == tx_ring->count;
913 static void fm10k_tx_map(struct fm10k_ring *tx_ring,
914 struct fm10k_tx_buffer *first)
916 struct sk_buff *skb = first->skb;
917 struct fm10k_tx_buffer *tx_buffer;
918 struct fm10k_tx_desc *tx_desc;
919 struct skb_frag_struct *frag;
922 unsigned int data_len, size;
923 u32 tx_flags = first->tx_flags;
924 u16 i = tx_ring->next_to_use;
925 u8 flags = fm10k_tx_desc_flags(skb, tx_flags);
927 tx_desc = FM10K_TX_DESC(tx_ring, i);
929 /* add HW VLAN tag */
930 if (vlan_tx_tag_present(skb))
931 tx_desc->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
935 size = skb_headlen(skb);
938 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
940 data_len = skb->data_len;
943 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
944 if (dma_mapping_error(tx_ring->dev, dma))
947 /* record length, and DMA address */
948 dma_unmap_len_set(tx_buffer, len, size);
949 dma_unmap_addr_set(tx_buffer, dma, dma);
951 while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) {
952 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
953 FM10K_MAX_DATA_PER_TXD, flags)) {
954 tx_desc = FM10K_TX_DESC(tx_ring, 0);
958 dma += FM10K_MAX_DATA_PER_TXD;
959 size -= FM10K_MAX_DATA_PER_TXD;
962 if (likely(!data_len))
965 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
967 tx_desc = FM10K_TX_DESC(tx_ring, 0);
971 size = skb_frag_size(frag);
974 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
977 tx_buffer = &tx_ring->tx_buffer[i];
980 /* write last descriptor with LAST bit set */
981 flags |= FM10K_TXD_FLAG_LAST;
983 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
986 /* record bytecount for BQL */
987 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
989 /* record SW timestamp if HW timestamp is not available */
990 skb_tx_timestamp(first->skb);
992 /* Force memory writes to complete before letting h/w know there
993 * are new descriptors to fetch. (Only applicable for weak-ordered
994 * memory model archs, such as IA-64).
996 * We also need this memory barrier to make certain all of the
997 * status bits have been updated before next_to_watch is written.
1001 /* set next_to_watch value indicating a packet is present */
1002 first->next_to_watch = tx_desc;
1004 tx_ring->next_to_use = i;
1006 /* notify HW of packet */
1007 writel(i, tx_ring->tail);
1009 /* we need this if more than one processor can write to our tail
1010 * at a time, it synchronizes IO on IA64/Altix systems
1016 dev_err(tx_ring->dev, "TX DMA map failed\n");
1018 /* clear dma mappings for failed tx_buffer map */
1020 tx_buffer = &tx_ring->tx_buffer[i];
1021 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1022 if (tx_buffer == first)
1029 tx_ring->next_to_use = i;
1032 static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
1034 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
1038 /* We need to check again in a case another CPU has just
1039 * made room available. */
1040 if (likely(fm10k_desc_unused(tx_ring) < size))
1043 /* A reprieve! - use start_queue because it doesn't call schedule */
1044 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
1045 ++tx_ring->tx_stats.restart_queue;
1049 static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
1051 if (likely(fm10k_desc_unused(tx_ring) >= size))
1053 return __fm10k_maybe_stop_tx(tx_ring, size);
1056 netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
1057 struct fm10k_ring *tx_ring)
1059 struct fm10k_tx_buffer *first;
1062 #if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
1065 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1067 /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
1068 * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD,
1069 * + 2 desc gap to keep tail from touching head
1070 * otherwise try next time
1072 #if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
1073 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1074 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1076 count += skb_shinfo(skb)->nr_frags;
1078 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
1079 tx_ring->tx_stats.tx_busy++;
1080 return NETDEV_TX_BUSY;
1083 /* record the location of the first descriptor for this packet */
1084 first = &tx_ring->tx_buffer[tx_ring->next_to_use];
1086 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
1087 first->gso_segs = 1;
1089 /* record initial flags and protocol */
1090 first->tx_flags = tx_flags;
1092 tso = fm10k_tso(tx_ring, first);
1096 fm10k_tx_csum(tx_ring, first);
1098 fm10k_tx_map(tx_ring, first);
1100 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
1102 return NETDEV_TX_OK;
1105 dev_kfree_skb_any(first->skb);
1108 return NETDEV_TX_OK;
1111 static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
1113 return ring->stats.packets;
1116 static u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
1118 /* use SW head and tail until we have real hardware */
1119 u32 head = ring->next_to_clean;
1120 u32 tail = ring->next_to_use;
1122 return ((head <= tail) ? tail : tail + ring->count) - head;
1125 bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
1127 u32 tx_done = fm10k_get_tx_completed(tx_ring);
1128 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1129 u32 tx_pending = fm10k_get_tx_pending(tx_ring);
1131 clear_check_for_tx_hang(tx_ring);
1133 /* Check for a hung queue, but be thorough. This verifies
1134 * that a transmit has been completed since the previous
1135 * check AND there is at least one packet pending. By
1136 * requiring this to fail twice we avoid races with
1137 * clearing the ARMED bit and conditions where we
1138 * run the check_tx_hang logic with a transmit completion
1139 * pending but without time to complete it yet.
1141 if (!tx_pending || (tx_done_old != tx_done)) {
1142 /* update completed stats and continue */
1143 tx_ring->tx_stats.tx_done_old = tx_done;
1144 /* reset the countdown */
1145 clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
1150 /* make sure it is true for two checks in a row */
1151 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
1155 * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
1156 * @interface: driver private struct
1158 void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
1160 /* Do the reset outside of interrupt context */
1161 if (!test_bit(__FM10K_DOWN, &interface->state)) {
1162 netdev_err(interface->netdev, "Reset interface\n");
1163 interface->tx_timeout_count++;
1164 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1165 fm10k_service_event_schedule(interface);
1170 * fm10k_clean_tx_irq - Reclaim resources after transmit completes
1171 * @q_vector: structure containing interrupt and ring information
1172 * @tx_ring: tx ring to clean
1174 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
1175 struct fm10k_ring *tx_ring)
1177 struct fm10k_intfc *interface = q_vector->interface;
1178 struct fm10k_tx_buffer *tx_buffer;
1179 struct fm10k_tx_desc *tx_desc;
1180 unsigned int total_bytes = 0, total_packets = 0;
1181 unsigned int budget = q_vector->tx.work_limit;
1182 unsigned int i = tx_ring->next_to_clean;
1184 if (test_bit(__FM10K_DOWN, &interface->state))
1187 tx_buffer = &tx_ring->tx_buffer[i];
1188 tx_desc = FM10K_TX_DESC(tx_ring, i);
1189 i -= tx_ring->count;
1192 struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch;
1194 /* if next_to_watch is not set then there is no work pending */
1198 /* prevent any other reads prior to eop_desc */
1199 read_barrier_depends();
1201 /* if DD is not set pending work has not been completed */
1202 if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
1205 /* clear next_to_watch to prevent false hangs */
1206 tx_buffer->next_to_watch = NULL;
1208 /* update the statistics for this packet */
1209 total_bytes += tx_buffer->bytecount;
1210 total_packets += tx_buffer->gso_segs;
1213 dev_consume_skb_any(tx_buffer->skb);
1215 /* unmap skb header data */
1216 dma_unmap_single(tx_ring->dev,
1217 dma_unmap_addr(tx_buffer, dma),
1218 dma_unmap_len(tx_buffer, len),
1221 /* clear tx_buffer data */
1222 tx_buffer->skb = NULL;
1223 dma_unmap_len_set(tx_buffer, len, 0);
1225 /* unmap remaining buffers */
1226 while (tx_desc != eop_desc) {
1231 i -= tx_ring->count;
1232 tx_buffer = tx_ring->tx_buffer;
1233 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1236 /* unmap any remaining paged data */
1237 if (dma_unmap_len(tx_buffer, len)) {
1238 dma_unmap_page(tx_ring->dev,
1239 dma_unmap_addr(tx_buffer, dma),
1240 dma_unmap_len(tx_buffer, len),
1242 dma_unmap_len_set(tx_buffer, len, 0);
1246 /* move us one more past the eop_desc for start of next pkt */
1251 i -= tx_ring->count;
1252 tx_buffer = tx_ring->tx_buffer;
1253 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1256 /* issue prefetch for next Tx descriptor */
1259 /* update budget accounting */
1261 } while (likely(budget));
1263 i += tx_ring->count;
1264 tx_ring->next_to_clean = i;
1265 u64_stats_update_begin(&tx_ring->syncp);
1266 tx_ring->stats.bytes += total_bytes;
1267 tx_ring->stats.packets += total_packets;
1268 u64_stats_update_end(&tx_ring->syncp);
1269 q_vector->tx.total_bytes += total_bytes;
1270 q_vector->tx.total_packets += total_packets;
1272 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
1273 /* schedule immediate reset if we believe we hung */
1274 struct fm10k_hw *hw = &interface->hw;
1276 netif_err(interface, drv, tx_ring->netdev,
1277 "Detected Tx Unit Hang\n"
1279 " TDH, TDT <%x>, <%x>\n"
1280 " next_to_use <%x>\n"
1281 " next_to_clean <%x>\n",
1282 tx_ring->queue_index,
1283 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
1284 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
1285 tx_ring->next_to_use, i);
1287 netif_stop_subqueue(tx_ring->netdev,
1288 tx_ring->queue_index);
1290 netif_info(interface, probe, tx_ring->netdev,
1291 "tx hang %d detected on queue %d, resetting interface\n",
1292 interface->tx_timeout_count + 1,
1293 tx_ring->queue_index);
1295 fm10k_tx_timeout_reset(interface);
1297 /* the netdev is about to reset, no point in enabling stuff */
1301 /* notify netdev of completed buffers */
1302 netdev_tx_completed_queue(txring_txq(tx_ring),
1303 total_packets, total_bytes);
1305 #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2)
1306 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1307 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1308 /* Make sure that anybody stopping the queue after this
1309 * sees the new next_to_clean.
1312 if (__netif_subqueue_stopped(tx_ring->netdev,
1313 tx_ring->queue_index) &&
1314 !test_bit(__FM10K_DOWN, &interface->state)) {
1315 netif_wake_subqueue(tx_ring->netdev,
1316 tx_ring->queue_index);
1317 ++tx_ring->tx_stats.restart_queue;
1325 * fm10k_update_itr - update the dynamic ITR value based on packet size
1327 * Stores a new ITR value based on strictly on packet size. The
1328 * divisors and thresholds used by this function were determined based
1329 * on theoretical maximum wire speed and testing data, in order to
1330 * minimize response time while increasing bulk throughput.
1332 * @ring_container: Container for rings to have ITR updated
1334 static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
1336 unsigned int avg_wire_size, packets;
1338 /* Only update ITR if we are using adaptive setting */
1339 if (!(ring_container->itr & FM10K_ITR_ADAPTIVE))
1342 packets = ring_container->total_packets;
1346 avg_wire_size = ring_container->total_bytes / packets;
1348 /* Add 24 bytes to size to account for CRC, preamble, and gap */
1349 avg_wire_size += 24;
1351 /* Don't starve jumbo frames */
1352 if (avg_wire_size > 3000)
1353 avg_wire_size = 3000;
1355 /* Give a little boost to mid-size frames */
1356 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
1361 /* write back value and retain adaptive flag */
1362 ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE;
1365 ring_container->total_bytes = 0;
1366 ring_container->total_packets = 0;
1369 static void fm10k_qv_enable(struct fm10k_q_vector *q_vector)
1371 /* Enable auto-mask and clear the current mask */
1372 u32 itr = FM10K_ITR_ENABLE;
1375 fm10k_update_itr(&q_vector->tx);
1378 fm10k_update_itr(&q_vector->rx);
1380 /* Store Tx itr in timer slot 0 */
1381 itr |= (q_vector->tx.itr & FM10K_ITR_MAX);
1383 /* Shift Rx itr to timer slot 1 */
1384 itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT;
1386 /* Write the final value to the ITR register */
1387 writel(itr, q_vector->itr);
1390 static int fm10k_poll(struct napi_struct *napi, int budget)
1392 struct fm10k_q_vector *q_vector =
1393 container_of(napi, struct fm10k_q_vector, napi);
1394 struct fm10k_ring *ring;
1395 int per_ring_budget;
1396 bool clean_complete = true;
1398 fm10k_for_each_ring(ring, q_vector->tx)
1399 clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
1401 /* attempt to distribute budget to each queue fairly, but don't
1402 * allow the budget to go below 1 because we'll exit polling
1404 if (q_vector->rx.count > 1)
1405 per_ring_budget = max(budget/q_vector->rx.count, 1);
1407 per_ring_budget = budget;
1409 fm10k_for_each_ring(ring, q_vector->rx)
1410 clean_complete &= fm10k_clean_rx_irq(q_vector, ring,
1413 /* If all work not completed, return budget and keep polling */
1414 if (!clean_complete)
1417 /* all work done, exit the polling mode */
1418 napi_complete(napi);
1420 /* re-enable the q_vector */
1421 fm10k_qv_enable(q_vector);
1427 * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device
1428 * @interface: board private structure to initialize
1430 * When QoS (Quality of Service) is enabled, allocate queues for
1431 * each traffic class. If multiqueue isn't available,then abort QoS
1434 * This function handles all combinations of Qos and RSS.
1437 static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
1439 struct net_device *dev = interface->netdev;
1440 struct fm10k_ring_feature *f;
1444 /* Map queue offset and counts onto allocated tx queues */
1445 pcs = netdev_get_num_tc(dev);
1450 /* set QoS mask and indices */
1451 f = &interface->ring_feature[RING_F_QOS];
1453 f->mask = (1 << fls(pcs - 1)) - 1;
1455 /* determine the upper limit for our current DCB mode */
1456 rss_i = interface->hw.mac.max_queues / pcs;
1457 rss_i = 1 << (fls(rss_i) - 1);
1459 /* set RSS mask and indices */
1460 f = &interface->ring_feature[RING_F_RSS];
1461 rss_i = min_t(u16, rss_i, f->limit);
1463 f->mask = (1 << fls(rss_i - 1)) - 1;
1465 /* configure pause class to queue mapping */
1466 for (i = 0; i < pcs; i++)
1467 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
1469 interface->num_rx_queues = rss_i * pcs;
1470 interface->num_tx_queues = rss_i * pcs;
1476 * fm10k_set_rss_queues: Allocate queues for RSS
1477 * @interface: board private structure to initialize
1479 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
1480 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
1483 static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
1485 struct fm10k_ring_feature *f;
1488 f = &interface->ring_feature[RING_F_RSS];
1489 rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit);
1491 /* record indices and power of 2 mask for RSS */
1493 f->mask = (1 << fls(rss_i - 1)) - 1;
1495 interface->num_rx_queues = rss_i;
1496 interface->num_tx_queues = rss_i;
1502 * fm10k_set_num_queues: Allocate queues for device, feature dependent
1503 * @interface: board private structure to initialize
1505 * This is the top level queue allocation routine. The order here is very
1506 * important, starting with the "most" number of features turned on at once,
1507 * and ending with the smallest set of features. This way large combinations
1508 * can be allocated if they're turned on, and smaller combinations are the
1509 * fallthrough conditions.
1512 static void fm10k_set_num_queues(struct fm10k_intfc *interface)
1514 /* Start with base case */
1515 interface->num_rx_queues = 1;
1516 interface->num_tx_queues = 1;
1518 if (fm10k_set_qos_queues(interface))
1521 fm10k_set_rss_queues(interface);
1525 * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
1526 * @interface: board private structure to initialize
1527 * @v_count: q_vectors allocated on interface, used for ring interleaving
1528 * @v_idx: index of vector in interface struct
1529 * @txr_count: total number of Tx rings to allocate
1530 * @txr_idx: index of first Tx ring to allocate
1531 * @rxr_count: total number of Rx rings to allocate
1532 * @rxr_idx: index of first Rx ring to allocate
1534 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1536 static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
1537 unsigned int v_count, unsigned int v_idx,
1538 unsigned int txr_count, unsigned int txr_idx,
1539 unsigned int rxr_count, unsigned int rxr_idx)
1541 struct fm10k_q_vector *q_vector;
1542 struct fm10k_ring *ring;
1543 int ring_count, size;
1545 ring_count = txr_count + rxr_count;
1546 size = sizeof(struct fm10k_q_vector) +
1547 (sizeof(struct fm10k_ring) * ring_count);
1549 /* allocate q_vector and rings */
1550 q_vector = kzalloc(size, GFP_KERNEL);
1554 /* initialize NAPI */
1555 netif_napi_add(interface->netdev, &q_vector->napi,
1556 fm10k_poll, NAPI_POLL_WEIGHT);
1558 /* tie q_vector and interface together */
1559 interface->q_vector[v_idx] = q_vector;
1560 q_vector->interface = interface;
1561 q_vector->v_idx = v_idx;
1563 /* initialize pointer to rings */
1564 ring = q_vector->ring;
1566 /* save Tx ring container info */
1567 q_vector->tx.ring = ring;
1568 q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK;
1569 q_vector->tx.itr = interface->tx_itr;
1570 q_vector->tx.count = txr_count;
1573 /* assign generic ring traits */
1574 ring->dev = &interface->pdev->dev;
1575 ring->netdev = interface->netdev;
1577 /* configure backlink on ring */
1578 ring->q_vector = q_vector;
1580 /* apply Tx specific ring traits */
1581 ring->count = interface->tx_ring_count;
1582 ring->queue_index = txr_idx;
1584 /* assign ring to interface */
1585 interface->tx_ring[txr_idx] = ring;
1587 /* update count and index */
1591 /* push pointer to next ring */
1595 /* save Rx ring container info */
1596 q_vector->rx.ring = ring;
1597 q_vector->rx.itr = interface->rx_itr;
1598 q_vector->rx.count = rxr_count;
1601 /* assign generic ring traits */
1602 ring->dev = &interface->pdev->dev;
1603 ring->netdev = interface->netdev;
1604 rcu_assign_pointer(ring->l2_accel, interface->l2_accel);
1606 /* configure backlink on ring */
1607 ring->q_vector = q_vector;
1609 /* apply Rx specific ring traits */
1610 ring->count = interface->rx_ring_count;
1611 ring->queue_index = rxr_idx;
1613 /* assign ring to interface */
1614 interface->rx_ring[rxr_idx] = ring;
1616 /* update count and index */
1620 /* push pointer to next ring */
1624 fm10k_dbg_q_vector_init(q_vector);
1630 * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
1631 * @interface: board private structure to initialize
1632 * @v_idx: Index of vector to be freed
1634 * This function frees the memory allocated to the q_vector. In addition if
1635 * NAPI is enabled it will delete any references to the NAPI struct prior
1636 * to freeing the q_vector.
1638 static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
1640 struct fm10k_q_vector *q_vector = interface->q_vector[v_idx];
1641 struct fm10k_ring *ring;
1643 fm10k_dbg_q_vector_exit(q_vector);
1645 fm10k_for_each_ring(ring, q_vector->tx)
1646 interface->tx_ring[ring->queue_index] = NULL;
1648 fm10k_for_each_ring(ring, q_vector->rx)
1649 interface->rx_ring[ring->queue_index] = NULL;
1651 interface->q_vector[v_idx] = NULL;
1652 netif_napi_del(&q_vector->napi);
1653 kfree_rcu(q_vector, rcu);
1657 * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
1658 * @interface: board private structure to initialize
1660 * We allocate one q_vector per queue interrupt. If allocation fails we
1663 static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
1665 unsigned int q_vectors = interface->num_q_vectors;
1666 unsigned int rxr_remaining = interface->num_rx_queues;
1667 unsigned int txr_remaining = interface->num_tx_queues;
1668 unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1671 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1672 for (; rxr_remaining; v_idx++) {
1673 err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
1678 /* update counts and index */
1684 for (; v_idx < q_vectors; v_idx++) {
1685 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1686 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1688 err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
1695 /* update counts and index */
1696 rxr_remaining -= rqpv;
1697 txr_remaining -= tqpv;
1705 interface->num_tx_queues = 0;
1706 interface->num_rx_queues = 0;
1707 interface->num_q_vectors = 0;
1710 fm10k_free_q_vector(interface, v_idx);
1716 * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
1717 * @interface: board private structure to initialize
1719 * This function frees the memory allocated to the q_vectors. In addition if
1720 * NAPI is enabled it will delete any references to the NAPI struct prior
1721 * to freeing the q_vector.
1723 static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
1725 int v_idx = interface->num_q_vectors;
1727 interface->num_tx_queues = 0;
1728 interface->num_rx_queues = 0;
1729 interface->num_q_vectors = 0;
1732 fm10k_free_q_vector(interface, v_idx);
1736 * f10k_reset_msix_capability - reset MSI-X capability
1737 * @interface: board private structure to initialize
1739 * Reset the MSI-X capability back to its starting state
1741 static void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
1743 pci_disable_msix(interface->pdev);
1744 kfree(interface->msix_entries);
1745 interface->msix_entries = NULL;
1749 * f10k_init_msix_capability - configure MSI-X capability
1750 * @interface: board private structure to initialize
1752 * Attempt to configure the interrupts using the best available
1753 * capabilities of the hardware and the kernel.
1755 static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
1757 struct fm10k_hw *hw = &interface->hw;
1758 int v_budget, vector;
1760 /* It's easy to be greedy for MSI-X vectors, but it really
1761 * doesn't do us much good if we have a lot more vectors
1762 * than CPU's. So let's be conservative and only ask for
1763 * (roughly) the same number of vectors as there are CPU's.
1764 * the default is to use pairs of vectors
1766 v_budget = max(interface->num_rx_queues, interface->num_tx_queues);
1767 v_budget = min_t(u16, v_budget, num_online_cpus());
1769 /* account for vectors not related to queues */
1770 v_budget += NON_Q_VECTORS(hw);
1772 /* At the same time, hardware can only support a maximum of
1773 * hw.mac->max_msix_vectors vectors. With features
1774 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
1775 * descriptor queues supported by our device. Thus, we cap it off in
1776 * those rare cases where the cpu count also exceeds our vector limit.
1778 v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
1780 /* A failure in MSI-X entry allocation is fatal. */
1781 interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
1783 if (!interface->msix_entries)
1786 /* populate entry values */
1787 for (vector = 0; vector < v_budget; vector++)
1788 interface->msix_entries[vector].entry = vector;
1790 /* Attempt to enable MSI-X with requested value */
1791 v_budget = pci_enable_msix_range(interface->pdev,
1792 interface->msix_entries,
1796 kfree(interface->msix_entries);
1797 interface->msix_entries = NULL;
1801 /* record the number of queues available for q_vectors */
1802 interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
1808 * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS
1809 * @interface: Interface structure continaining rings and devices
1811 * Cache the descriptor ring offsets for Qos
1813 static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
1815 struct net_device *dev = interface->netdev;
1816 int pc, offset, rss_i, i, q_idx;
1817 u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
1818 u8 num_pcs = netdev_get_num_tc(dev);
1823 rss_i = interface->ring_feature[RING_F_RSS].indices;
1825 for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
1827 for (i = 0; i < rss_i; i++) {
1828 interface->tx_ring[offset + i]->reg_idx = q_idx;
1829 interface->tx_ring[offset + i]->qos_pc = pc;
1830 interface->rx_ring[offset + i]->reg_idx = q_idx;
1831 interface->rx_ring[offset + i]->qos_pc = pc;
1840 * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS
1841 * @interface: Interface structure continaining rings and devices
1843 * Cache the descriptor ring offsets for RSS
1845 static void fm10k_cache_ring_rss(struct fm10k_intfc *interface)
1849 for (i = 0; i < interface->num_rx_queues; i++)
1850 interface->rx_ring[i]->reg_idx = i;
1852 for (i = 0; i < interface->num_tx_queues; i++)
1853 interface->tx_ring[i]->reg_idx = i;
1857 * fm10k_assign_rings - Map rings to network devices
1858 * @interface: Interface structure containing rings and devices
1860 * This function is meant to go though and configure both the network
1861 * devices so that they contain rings, and configure the rings so that
1862 * they function with their network devices.
1864 static void fm10k_assign_rings(struct fm10k_intfc *interface)
1866 if (fm10k_cache_ring_qos(interface))
1869 fm10k_cache_ring_rss(interface);
1872 static void fm10k_init_reta(struct fm10k_intfc *interface)
1874 u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
1877 /* If the netdev is initialized we have to maintain table if possible */
1878 if (interface->netdev->reg_state) {
1879 for (i = FM10K_RETA_SIZE; i--;) {
1880 reta = interface->reta[i];
1881 if ((((reta << 24) >> 24) < rss_i) &&
1882 (((reta << 16) >> 24) < rss_i) &&
1883 (((reta << 8) >> 24) < rss_i) &&
1884 (((reta) >> 24) < rss_i))
1886 goto repopulate_reta;
1889 /* do nothing if all of the elements are in bounds */
1894 /* Populate the redirection table 4 entries at a time. To do this
1895 * we are generating the results for n and n+2 and then interleaving
1896 * those with the results with n+1 and n+3.
1898 for (i = FM10K_RETA_SIZE; i--;) {
1899 /* first pass generates n and n+2 */
1900 base = ((i * 0x00040004) + 0x00020000) * rss_i;
1901 reta = (base & 0x3F803F80) >> 7;
1903 /* second pass generates n+1 and n+3 */
1904 base += 0x00010001 * rss_i;
1905 reta |= (base & 0x3F803F80) << 1;
1907 interface->reta[i] = reta;
1912 * fm10k_init_queueing_scheme - Determine proper queueing scheme
1913 * @interface: board private structure to initialize
1915 * We determine which queueing scheme to use based on...
1916 * - Hardware queue count (num_*_queues)
1917 * - defined by miscellaneous hardware support/features (RSS, etc.)
1919 int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
1923 /* Number of supported queues */
1924 fm10k_set_num_queues(interface);
1926 /* Configure MSI-X capability */
1927 err = fm10k_init_msix_capability(interface);
1929 dev_err(&interface->pdev->dev,
1930 "Unable to initialize MSI-X capability\n");
1934 /* Allocate memory for queues */
1935 err = fm10k_alloc_q_vectors(interface);
1939 /* Map rings to devices, and map devices to physical queues */
1940 fm10k_assign_rings(interface);
1942 /* Initialize RSS redirection table */
1943 fm10k_init_reta(interface);
1949 * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
1950 * @interface: board private structure to clear queueing scheme on
1952 * We go through and clear queueing specific resources and reset the structure
1953 * to pre-load conditions
1955 void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface)
1957 fm10k_free_q_vectors(interface);
1958 fm10k_reset_msix_capability(interface);