1 /* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 #include <linux/types.h>
22 #include <linux/module.h>
26 #include <linux/if_macvlan.h>
27 #include <linux/prefetch.h>
31 #define DRV_VERSION "0.15.2-k"
32 const char fm10k_driver_version[] = DRV_VERSION;
33 char fm10k_driver_name[] = "fm10k";
34 static const char fm10k_driver_string[] =
35 "Intel(R) Ethernet Switch Host Interface Driver";
36 static const char fm10k_copyright[] =
37 "Copyright (c) 2013 Intel Corporation.";
39 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
40 MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_VERSION(DRV_VERSION);
44 /* single workqueue for entire fm10k driver */
45 struct workqueue_struct *fm10k_workqueue = NULL;
48 * fm10k_init_module - Driver Registration Routine
50 * fm10k_init_module is the first routine called when the driver is
51 * loaded. All it does is register with the PCI subsystem.
53 static int __init fm10k_init_module(void)
55 pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version);
56 pr_info("%s\n", fm10k_copyright);
58 /* create driver workqueue */
60 fm10k_workqueue = create_workqueue("fm10k");
64 return fm10k_register_pci_driver();
66 module_init(fm10k_init_module);
69 * fm10k_exit_module - Driver Exit Cleanup Routine
71 * fm10k_exit_module is called just before the driver is removed
74 static void __exit fm10k_exit_module(void)
76 fm10k_unregister_pci_driver();
80 /* destroy driver workqueue */
81 flush_workqueue(fm10k_workqueue);
82 destroy_workqueue(fm10k_workqueue);
83 fm10k_workqueue = NULL;
85 module_exit(fm10k_exit_module);
87 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
88 struct fm10k_rx_buffer *bi)
90 struct page *page = bi->page;
93 /* Only page will be NULL if buffer was consumed */
97 /* alloc new page for storage */
98 page = dev_alloc_page();
99 if (unlikely(!page)) {
100 rx_ring->rx_stats.alloc_failed++;
104 /* map page for use */
105 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
107 /* if mapping failed free memory back to system since
108 * there isn't much point in holding memory we can't use
110 if (dma_mapping_error(rx_ring->dev, dma)) {
113 rx_ring->rx_stats.alloc_failed++;
125 * fm10k_alloc_rx_buffers - Replace used receive buffers
126 * @rx_ring: ring to place buffers on
127 * @cleaned_count: number of buffers to replace
129 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
131 union fm10k_rx_desc *rx_desc;
132 struct fm10k_rx_buffer *bi;
133 u16 i = rx_ring->next_to_use;
139 rx_desc = FM10K_RX_DESC(rx_ring, i);
140 bi = &rx_ring->rx_buffer[i];
144 if (!fm10k_alloc_mapped_page(rx_ring, bi))
147 /* Refresh the desc even if buffer_addrs didn't change
148 * because each write-back erases this info.
150 rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
156 rx_desc = FM10K_RX_DESC(rx_ring, 0);
157 bi = rx_ring->rx_buffer;
161 /* clear the status bits for the next_to_use descriptor */
162 rx_desc->d.staterr = 0;
165 } while (cleaned_count);
169 if (rx_ring->next_to_use != i) {
170 /* record the next descriptor to use */
171 rx_ring->next_to_use = i;
173 /* update next to alloc since we have filled the ring */
174 rx_ring->next_to_alloc = i;
176 /* Force memory writes to complete before letting h/w
177 * know there are new descriptors to fetch. (Only
178 * applicable for weak-ordered memory model archs,
183 /* notify hardware of new descriptors */
184 writel(i, rx_ring->tail);
189 * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
190 * @rx_ring: rx descriptor ring to store buffers on
191 * @old_buff: donor buffer to have page reused
193 * Synchronizes page for reuse by the interface
195 static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
196 struct fm10k_rx_buffer *old_buff)
198 struct fm10k_rx_buffer *new_buff;
199 u16 nta = rx_ring->next_to_alloc;
201 new_buff = &rx_ring->rx_buffer[nta];
203 /* update, and store next to alloc */
205 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
207 /* transfer page from old buffer to new buffer */
208 *new_buff = *old_buff;
210 /* sync the buffer for use by the device */
211 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
212 old_buff->page_offset,
217 static inline bool fm10k_page_is_reserved(struct page *page)
219 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
222 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
224 unsigned int __maybe_unused truesize)
226 /* avoid re-using remote pages */
227 if (unlikely(fm10k_page_is_reserved(page)))
230 #if (PAGE_SIZE < 8192)
231 /* if we are only owner of page we can reuse it */
232 if (unlikely(page_count(page) != 1))
235 /* flip page offset to other buffer */
236 rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
238 /* move offset up to the next cache line */
239 rx_buffer->page_offset += truesize;
241 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
245 /* Even if we own the page, we are not allowed to use atomic_set()
246 * This would break get_page_unless_zero() users.
248 atomic_inc(&page->_count);
254 * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
255 * @rx_buffer: buffer containing page to add
256 * @rx_desc: descriptor containing length of buffer written by hardware
257 * @skb: sk_buff to place the data into
259 * This function will add the data contained in rx_buffer->page to the skb.
260 * This is done either through a direct copy if the data in the buffer is
261 * less than the skb header size, otherwise it will just attach the page as
264 * The function will then update the page offset if necessary and return
265 * true if the buffer can be reused by the interface.
267 static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
268 union fm10k_rx_desc *rx_desc,
271 struct page *page = rx_buffer->page;
272 unsigned char *va = page_address(page) + rx_buffer->page_offset;
273 unsigned int size = le16_to_cpu(rx_desc->w.length);
274 #if (PAGE_SIZE < 8192)
275 unsigned int truesize = FM10K_RX_BUFSZ;
277 unsigned int truesize = SKB_DATA_ALIGN(size);
279 unsigned int pull_len;
281 if (unlikely(skb_is_nonlinear(skb)))
284 if (likely(size <= FM10K_RX_HDR_LEN)) {
285 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
287 /* page is not reserved, we can reuse buffer as-is */
288 if (likely(!fm10k_page_is_reserved(page)))
291 /* this page cannot be reused so discard it */
296 /* we need the header to contain the greater of either ETH_HLEN or
297 * 60 bytes if the skb->len is less than 60 for skb_pad.
299 pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
301 /* align pull length to size of long to optimize memcpy performance */
302 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
304 /* update all of the pointers */
309 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
310 (unsigned long)va & ~PAGE_MASK, size, truesize);
312 return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
315 static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
316 union fm10k_rx_desc *rx_desc,
319 struct fm10k_rx_buffer *rx_buffer;
322 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
323 page = rx_buffer->page;
327 void *page_addr = page_address(page) +
328 rx_buffer->page_offset;
330 /* prefetch first cache line of first page */
332 #if L1_CACHE_BYTES < 128
333 prefetch(page_addr + L1_CACHE_BYTES);
336 /* allocate a skb to store the frags */
337 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
339 if (unlikely(!skb)) {
340 rx_ring->rx_stats.alloc_failed++;
344 /* we will be copying header into skb->data in
345 * pskb_may_pull so it is in our interest to prefetch
346 * it now to avoid a possible cache miss
348 prefetchw(skb->data);
351 /* we are reusing so sync this buffer for CPU use */
352 dma_sync_single_range_for_cpu(rx_ring->dev,
354 rx_buffer->page_offset,
358 /* pull page into skb */
359 if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) {
360 /* hand second half of page back to the ring */
361 fm10k_reuse_rx_page(rx_ring, rx_buffer);
363 /* we are not reusing the buffer so unmap it */
364 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
365 PAGE_SIZE, DMA_FROM_DEVICE);
368 /* clear contents of rx_buffer */
369 rx_buffer->page = NULL;
374 static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
375 union fm10k_rx_desc *rx_desc,
378 skb_checksum_none_assert(skb);
380 /* Rx checksum disabled via ethtool */
381 if (!(ring->netdev->features & NETIF_F_RXCSUM))
384 /* TCP/UDP checksum error bit is set */
385 if (fm10k_test_staterr(rx_desc,
386 FM10K_RXD_STATUS_L4E |
387 FM10K_RXD_STATUS_L4E2 |
388 FM10K_RXD_STATUS_IPE |
389 FM10K_RXD_STATUS_IPE2)) {
390 ring->rx_stats.csum_err++;
394 /* It must be a TCP or UDP packet with a valid checksum */
395 if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2))
396 skb->encapsulation = true;
397 else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS))
400 skb->ip_summed = CHECKSUM_UNNECESSARY;
403 #define FM10K_RSS_L4_TYPES_MASK \
404 ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \
405 (1ul << FM10K_RSSTYPE_IPV4_UDP) | \
406 (1ul << FM10K_RSSTYPE_IPV6_TCP) | \
407 (1ul << FM10K_RSSTYPE_IPV6_UDP))
409 static inline void fm10k_rx_hash(struct fm10k_ring *ring,
410 union fm10k_rx_desc *rx_desc,
415 if (!(ring->netdev->features & NETIF_F_RXHASH))
418 rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK;
422 skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
423 (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
424 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
427 static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring,
428 union fm10k_rx_desc *rx_desc,
431 struct fm10k_intfc *interface = rx_ring->q_vector->interface;
433 FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
435 if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED))
436 fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb),
437 le64_to_cpu(rx_desc->q.timestamp));
440 static void fm10k_type_trans(struct fm10k_ring *rx_ring,
441 union fm10k_rx_desc __maybe_unused *rx_desc,
444 struct net_device *dev = rx_ring->netdev;
445 struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel);
447 /* check to see if DGLORT belongs to a MACVLAN */
449 u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1;
451 idx -= l2_accel->dglort;
452 if (idx < l2_accel->size && l2_accel->macvlan[idx])
453 dev = l2_accel->macvlan[idx];
458 skb->protocol = eth_type_trans(skb, dev);
463 /* update MACVLAN statistics */
464 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1,
465 !!(rx_desc->w.hdr_info &
466 cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK)));
470 * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
471 * @rx_ring: rx descriptor ring packet is being transacted on
472 * @rx_desc: pointer to the EOP Rx descriptor
473 * @skb: pointer to current skb being populated
475 * This function checks the ring, descriptor, and packet information in
476 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
477 * other fields within the skb.
479 static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
480 union fm10k_rx_desc *rx_desc,
483 unsigned int len = skb->len;
485 fm10k_rx_hash(rx_ring, rx_desc, skb);
487 fm10k_rx_checksum(rx_ring, rx_desc, skb);
489 fm10k_rx_hwtstamp(rx_ring, rx_desc, skb);
491 FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
493 skb_record_rx_queue(skb, rx_ring->queue_index);
495 FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
497 if (rx_desc->w.vlan) {
498 u16 vid = le16_to_cpu(rx_desc->w.vlan);
500 if ((vid & VLAN_VID_MASK) != rx_ring->vid)
501 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
502 else if (vid & VLAN_PRIO_MASK)
503 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
504 vid & VLAN_PRIO_MASK);
507 fm10k_type_trans(rx_ring, rx_desc, skb);
513 * fm10k_is_non_eop - process handling of non-EOP buffers
514 * @rx_ring: Rx ring being processed
515 * @rx_desc: Rx descriptor for current buffer
517 * This function updates next to clean. If the buffer is an EOP buffer
518 * this function exits returning false, otherwise it will place the
519 * sk_buff in the next buffer to be chained and return true indicating
520 * that this is in fact a non-EOP buffer.
522 static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
523 union fm10k_rx_desc *rx_desc)
525 u32 ntc = rx_ring->next_to_clean + 1;
527 /* fetch, update, and store next to clean */
528 ntc = (ntc < rx_ring->count) ? ntc : 0;
529 rx_ring->next_to_clean = ntc;
531 prefetch(FM10K_RX_DESC(rx_ring, ntc));
533 if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP)))
540 * fm10k_cleanup_headers - Correct corrupted or empty headers
541 * @rx_ring: rx descriptor ring packet is being transacted on
542 * @rx_desc: pointer to the EOP Rx descriptor
543 * @skb: pointer to current skb being fixed
545 * Address the case where we are pulling data in on pages only
546 * and as such no data is present in the skb header.
548 * In addition if skb is not at least 60 bytes we need to pad it so that
549 * it is large enough to qualify as a valid Ethernet frame.
551 * Returns true if an error was encountered and skb was freed.
553 static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
554 union fm10k_rx_desc *rx_desc,
557 if (unlikely((fm10k_test_staterr(rx_desc,
558 FM10K_RXD_STATUS_RXE)))) {
559 dev_kfree_skb_any(skb);
560 rx_ring->rx_stats.errors++;
564 /* if eth_skb_pad returns an error the skb was freed */
565 if (eth_skb_pad(skb))
572 * fm10k_receive_skb - helper function to handle rx indications
573 * @q_vector: structure containing interrupt and ring information
574 * @skb: packet to send up
576 static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
579 napi_gro_receive(&q_vector->napi, skb);
582 static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
583 struct fm10k_ring *rx_ring,
586 struct sk_buff *skb = rx_ring->skb;
587 unsigned int total_bytes = 0, total_packets = 0;
588 u16 cleaned_count = fm10k_desc_unused(rx_ring);
590 while (likely(total_packets < budget)) {
591 union fm10k_rx_desc *rx_desc;
593 /* return some buffers to hardware, one at a time is too slow */
594 if (cleaned_count >= FM10K_RX_BUFFER_WRITE) {
595 fm10k_alloc_rx_buffers(rx_ring, cleaned_count);
599 rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
601 if (!rx_desc->d.staterr)
604 /* This memory barrier is needed to keep us from reading
605 * any other fields out of the rx_desc until we know the
606 * descriptor has been written back
610 /* retrieve a buffer from the ring */
611 skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
613 /* exit if we failed to retrieve a buffer */
619 /* fetch next buffer in frame if non-eop */
620 if (fm10k_is_non_eop(rx_ring, rx_desc))
623 /* verify the packet layout is correct */
624 if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
629 /* populate checksum, timestamp, VLAN, and protocol */
630 total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb);
632 fm10k_receive_skb(q_vector, skb);
634 /* reset skb pointer */
637 /* update budget accounting */
641 /* place incomplete frames back on ring for completion */
644 u64_stats_update_begin(&rx_ring->syncp);
645 rx_ring->stats.packets += total_packets;
646 rx_ring->stats.bytes += total_bytes;
647 u64_stats_update_end(&rx_ring->syncp);
648 q_vector->rx.total_packets += total_packets;
649 q_vector->rx.total_bytes += total_bytes;
651 return total_packets < budget;
654 #define VXLAN_HLEN (sizeof(struct udphdr) + 8)
655 static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
657 struct fm10k_intfc *interface = netdev_priv(skb->dev);
658 struct fm10k_vxlan_port *vxlan_port;
660 /* we can only offload a vxlan if we recognize it as such */
661 vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
662 struct fm10k_vxlan_port, list);
666 if (vxlan_port->port != udp_hdr(skb)->dest)
669 /* return offset of udp_hdr plus 8 bytes for VXLAN header */
670 return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN);
673 #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF)
674 #define NVGRE_TNI htons(0x2000)
675 struct fm10k_nvgre_hdr {
681 static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
683 struct fm10k_nvgre_hdr *nvgre_hdr;
684 int hlen = ip_hdrlen(skb);
686 /* currently only IPv4 is supported due to hlen above */
687 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
690 /* our transport header should be NVGRE */
691 nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen);
693 /* verify all reserved flags are 0 */
694 if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
697 /* report start of ethernet header */
698 if (nvgre_hdr->flags & NVGRE_TNI)
699 return (struct ethhdr *)(nvgre_hdr + 1);
701 return (struct ethhdr *)(&nvgre_hdr->tni);
704 __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
706 u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
707 struct ethhdr *eth_hdr;
709 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
710 skb->inner_protocol != htons(ETH_P_TEB))
713 switch (vlan_get_protocol(skb)) {
714 case htons(ETH_P_IP):
715 l4_hdr = ip_hdr(skb)->protocol;
717 case htons(ETH_P_IPV6):
718 l4_hdr = ipv6_hdr(skb)->nexthdr;
726 eth_hdr = fm10k_port_is_vxlan(skb);
729 eth_hdr = fm10k_gre_is_nvgre(skb);
738 switch (eth_hdr->h_proto) {
739 case htons(ETH_P_IP):
740 inner_l4_hdr = inner_ip_hdr(skb)->protocol;
742 case htons(ETH_P_IPV6):
743 inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
749 switch (inner_l4_hdr) {
751 inner_l4_hlen = inner_tcp_hdrlen(skb);
760 /* The hardware allows tunnel offloads only if the combined inner and
761 * outer header is 184 bytes or less
763 if (skb_inner_transport_header(skb) + inner_l4_hlen -
764 skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
767 return eth_hdr->h_proto;
770 static int fm10k_tso(struct fm10k_ring *tx_ring,
771 struct fm10k_tx_buffer *first)
773 struct sk_buff *skb = first->skb;
774 struct fm10k_tx_desc *tx_desc;
778 if (skb->ip_summed != CHECKSUM_PARTIAL)
781 if (!skb_is_gso(skb))
784 /* compute header lengths */
785 if (skb->encapsulation) {
786 if (!fm10k_tx_encap_offload(skb))
788 th = skb_inner_transport_header(skb);
790 th = skb_transport_header(skb);
793 /* compute offset from SOF to transport header and add header len */
794 hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2);
796 first->tx_flags |= FM10K_TX_FLAGS_CSUM;
798 /* update gso size and bytecount with header size */
799 first->gso_segs = skb_shinfo(skb)->gso_segs;
800 first->bytecount += (first->gso_segs - 1) * hdrlen;
802 /* populate Tx descriptor header size and mss */
803 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
804 tx_desc->hdrlen = hdrlen;
805 tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
809 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
810 if (!net_ratelimit())
811 netdev_err(tx_ring->netdev,
812 "TSO requested for unsupported tunnel, disabling offload\n");
816 static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
817 struct fm10k_tx_buffer *first)
819 struct sk_buff *skb = first->skb;
820 struct fm10k_tx_desc *tx_desc;
823 struct ipv6hdr *ipv6;
829 if (skb->ip_summed != CHECKSUM_PARTIAL)
832 if (skb->encapsulation) {
833 protocol = fm10k_tx_encap_offload(skb);
835 if (skb_checksum_help(skb)) {
836 dev_warn(tx_ring->dev,
837 "failed to offload encap csum!\n");
838 tx_ring->tx_stats.csum_err++;
842 network_hdr.raw = skb_inner_network_header(skb);
844 protocol = vlan_get_protocol(skb);
845 network_hdr.raw = skb_network_header(skb);
849 case htons(ETH_P_IP):
850 l4_hdr = network_hdr.ipv4->protocol;
852 case htons(ETH_P_IPV6):
853 l4_hdr = network_hdr.ipv6->nexthdr;
856 if (unlikely(net_ratelimit())) {
857 dev_warn(tx_ring->dev,
858 "partial checksum but ip version=%x!\n",
861 tx_ring->tx_stats.csum_err++;
870 if (skb->encapsulation)
873 if (unlikely(net_ratelimit())) {
874 dev_warn(tx_ring->dev,
875 "partial checksum but l4 proto=%x!\n",
878 tx_ring->tx_stats.csum_err++;
882 /* update TX checksum flag */
883 first->tx_flags |= FM10K_TX_FLAGS_CSUM;
886 /* populate Tx descriptor header size and mss */
887 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
892 #define FM10K_SET_FLAG(_input, _flag, _result) \
893 ((_flag <= _result) ? \
894 ((u32)(_input & _flag) * (_result / _flag)) : \
895 ((u32)(_input & _flag) / (_flag / _result)))
897 static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
899 /* set type for advanced descriptor with frame checksum insertion */
902 /* set timestamping bits */
903 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
904 likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
905 desc_flags |= FM10K_TXD_FLAG_TIME;
907 /* set checksum offload bits */
908 desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
909 FM10K_TXD_FLAG_CSUM);
914 static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
915 struct fm10k_tx_desc *tx_desc, u16 i,
916 dma_addr_t dma, unsigned int size, u8 desc_flags)
918 /* set RS and INT for last frame in a cache line */
919 if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0)
920 desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT;
922 /* record values to descriptor */
923 tx_desc->buffer_addr = cpu_to_le64(dma);
924 tx_desc->flags = desc_flags;
925 tx_desc->buflen = cpu_to_le16(size);
927 /* return true if we just wrapped the ring */
928 return i == tx_ring->count;
931 static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
933 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
935 /* Memory barrier before checking head and tail */
938 /* Check again in a case another CPU has just made room available */
939 if (likely(fm10k_desc_unused(tx_ring) < size))
942 /* A reprieve! - use start_queue because it doesn't call schedule */
943 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
944 ++tx_ring->tx_stats.restart_queue;
948 static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
950 if (likely(fm10k_desc_unused(tx_ring) >= size))
952 return __fm10k_maybe_stop_tx(tx_ring, size);
955 static void fm10k_tx_map(struct fm10k_ring *tx_ring,
956 struct fm10k_tx_buffer *first)
958 struct sk_buff *skb = first->skb;
959 struct fm10k_tx_buffer *tx_buffer;
960 struct fm10k_tx_desc *tx_desc;
961 struct skb_frag_struct *frag;
964 unsigned int data_len, size;
965 u32 tx_flags = first->tx_flags;
966 u16 i = tx_ring->next_to_use;
967 u8 flags = fm10k_tx_desc_flags(skb, tx_flags);
969 tx_desc = FM10K_TX_DESC(tx_ring, i);
971 /* add HW VLAN tag */
972 if (skb_vlan_tag_present(skb))
973 tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
977 size = skb_headlen(skb);
980 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
982 data_len = skb->data_len;
985 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
986 if (dma_mapping_error(tx_ring->dev, dma))
989 /* record length, and DMA address */
990 dma_unmap_len_set(tx_buffer, len, size);
991 dma_unmap_addr_set(tx_buffer, dma, dma);
993 while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) {
994 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
995 FM10K_MAX_DATA_PER_TXD, flags)) {
996 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1000 dma += FM10K_MAX_DATA_PER_TXD;
1001 size -= FM10K_MAX_DATA_PER_TXD;
1004 if (likely(!data_len))
1007 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
1008 dma, size, flags)) {
1009 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1013 size = skb_frag_size(frag);
1016 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1019 tx_buffer = &tx_ring->tx_buffer[i];
1022 /* write last descriptor with LAST bit set */
1023 flags |= FM10K_TXD_FLAG_LAST;
1025 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
1028 /* record bytecount for BQL */
1029 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1031 /* record SW timestamp if HW timestamp is not available */
1032 skb_tx_timestamp(first->skb);
1034 /* Force memory writes to complete before letting h/w know there
1035 * are new descriptors to fetch. (Only applicable for weak-ordered
1036 * memory model archs, such as IA-64).
1038 * We also need this memory barrier to make certain all of the
1039 * status bits have been updated before next_to_watch is written.
1043 /* set next_to_watch value indicating a packet is present */
1044 first->next_to_watch = tx_desc;
1046 tx_ring->next_to_use = i;
1048 /* Make sure there is space in the ring for the next send. */
1049 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
1051 /* notify HW of packet */
1052 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
1053 writel(i, tx_ring->tail);
1055 /* we need this if more than one processor can write to our tail
1056 * at a time, it synchronizes IO on IA64/Altix systems
1063 dev_err(tx_ring->dev, "TX DMA map failed\n");
1065 /* clear dma mappings for failed tx_buffer map */
1067 tx_buffer = &tx_ring->tx_buffer[i];
1068 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1069 if (tx_buffer == first)
1076 tx_ring->next_to_use = i;
1079 netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
1080 struct fm10k_ring *tx_ring)
1082 struct fm10k_tx_buffer *first;
1086 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1088 /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
1089 * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD,
1090 * + 2 desc gap to keep tail from touching head
1091 * otherwise try next time
1093 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1094 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1096 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
1097 tx_ring->tx_stats.tx_busy++;
1098 return NETDEV_TX_BUSY;
1101 /* record the location of the first descriptor for this packet */
1102 first = &tx_ring->tx_buffer[tx_ring->next_to_use];
1104 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
1105 first->gso_segs = 1;
1107 /* record initial flags and protocol */
1108 first->tx_flags = tx_flags;
1110 tso = fm10k_tso(tx_ring, first);
1114 fm10k_tx_csum(tx_ring, first);
1116 fm10k_tx_map(tx_ring, first);
1118 return NETDEV_TX_OK;
1121 dev_kfree_skb_any(first->skb);
1124 return NETDEV_TX_OK;
1127 static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
1129 return ring->stats.packets;
1132 static u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
1134 /* use SW head and tail until we have real hardware */
1135 u32 head = ring->next_to_clean;
1136 u32 tail = ring->next_to_use;
1138 return ((head <= tail) ? tail : tail + ring->count) - head;
1141 bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
1143 u32 tx_done = fm10k_get_tx_completed(tx_ring);
1144 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1145 u32 tx_pending = fm10k_get_tx_pending(tx_ring);
1147 clear_check_for_tx_hang(tx_ring);
1149 /* Check for a hung queue, but be thorough. This verifies
1150 * that a transmit has been completed since the previous
1151 * check AND there is at least one packet pending. By
1152 * requiring this to fail twice we avoid races with
1153 * clearing the ARMED bit and conditions where we
1154 * run the check_tx_hang logic with a transmit completion
1155 * pending but without time to complete it yet.
1157 if (!tx_pending || (tx_done_old != tx_done)) {
1158 /* update completed stats and continue */
1159 tx_ring->tx_stats.tx_done_old = tx_done;
1160 /* reset the countdown */
1161 clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
1166 /* make sure it is true for two checks in a row */
1167 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
1171 * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
1172 * @interface: driver private struct
1174 void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
1176 /* Do the reset outside of interrupt context */
1177 if (!test_bit(__FM10K_DOWN, &interface->state)) {
1178 interface->tx_timeout_count++;
1179 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1180 fm10k_service_event_schedule(interface);
1185 * fm10k_clean_tx_irq - Reclaim resources after transmit completes
1186 * @q_vector: structure containing interrupt and ring information
1187 * @tx_ring: tx ring to clean
1189 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
1190 struct fm10k_ring *tx_ring)
1192 struct fm10k_intfc *interface = q_vector->interface;
1193 struct fm10k_tx_buffer *tx_buffer;
1194 struct fm10k_tx_desc *tx_desc;
1195 unsigned int total_bytes = 0, total_packets = 0;
1196 unsigned int budget = q_vector->tx.work_limit;
1197 unsigned int i = tx_ring->next_to_clean;
1199 if (test_bit(__FM10K_DOWN, &interface->state))
1202 tx_buffer = &tx_ring->tx_buffer[i];
1203 tx_desc = FM10K_TX_DESC(tx_ring, i);
1204 i -= tx_ring->count;
1207 struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch;
1209 /* if next_to_watch is not set then there is no work pending */
1213 /* prevent any other reads prior to eop_desc */
1214 read_barrier_depends();
1216 /* if DD is not set pending work has not been completed */
1217 if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
1220 /* clear next_to_watch to prevent false hangs */
1221 tx_buffer->next_to_watch = NULL;
1223 /* update the statistics for this packet */
1224 total_bytes += tx_buffer->bytecount;
1225 total_packets += tx_buffer->gso_segs;
1228 dev_consume_skb_any(tx_buffer->skb);
1230 /* unmap skb header data */
1231 dma_unmap_single(tx_ring->dev,
1232 dma_unmap_addr(tx_buffer, dma),
1233 dma_unmap_len(tx_buffer, len),
1236 /* clear tx_buffer data */
1237 tx_buffer->skb = NULL;
1238 dma_unmap_len_set(tx_buffer, len, 0);
1240 /* unmap remaining buffers */
1241 while (tx_desc != eop_desc) {
1246 i -= tx_ring->count;
1247 tx_buffer = tx_ring->tx_buffer;
1248 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1251 /* unmap any remaining paged data */
1252 if (dma_unmap_len(tx_buffer, len)) {
1253 dma_unmap_page(tx_ring->dev,
1254 dma_unmap_addr(tx_buffer, dma),
1255 dma_unmap_len(tx_buffer, len),
1257 dma_unmap_len_set(tx_buffer, len, 0);
1261 /* move us one more past the eop_desc for start of next pkt */
1266 i -= tx_ring->count;
1267 tx_buffer = tx_ring->tx_buffer;
1268 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1271 /* issue prefetch for next Tx descriptor */
1274 /* update budget accounting */
1276 } while (likely(budget));
1278 i += tx_ring->count;
1279 tx_ring->next_to_clean = i;
1280 u64_stats_update_begin(&tx_ring->syncp);
1281 tx_ring->stats.bytes += total_bytes;
1282 tx_ring->stats.packets += total_packets;
1283 u64_stats_update_end(&tx_ring->syncp);
1284 q_vector->tx.total_bytes += total_bytes;
1285 q_vector->tx.total_packets += total_packets;
1287 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
1288 /* schedule immediate reset if we believe we hung */
1289 struct fm10k_hw *hw = &interface->hw;
1291 netif_err(interface, drv, tx_ring->netdev,
1292 "Detected Tx Unit Hang\n"
1294 " TDH, TDT <%x>, <%x>\n"
1295 " next_to_use <%x>\n"
1296 " next_to_clean <%x>\n",
1297 tx_ring->queue_index,
1298 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
1299 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
1300 tx_ring->next_to_use, i);
1302 netif_stop_subqueue(tx_ring->netdev,
1303 tx_ring->queue_index);
1305 netif_info(interface, probe, tx_ring->netdev,
1306 "tx hang %d detected on queue %d, resetting interface\n",
1307 interface->tx_timeout_count + 1,
1308 tx_ring->queue_index);
1310 fm10k_tx_timeout_reset(interface);
1312 /* the netdev is about to reset, no point in enabling stuff */
1316 /* notify netdev of completed buffers */
1317 netdev_tx_completed_queue(txring_txq(tx_ring),
1318 total_packets, total_bytes);
1320 #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2)
1321 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1322 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1323 /* Make sure that anybody stopping the queue after this
1324 * sees the new next_to_clean.
1327 if (__netif_subqueue_stopped(tx_ring->netdev,
1328 tx_ring->queue_index) &&
1329 !test_bit(__FM10K_DOWN, &interface->state)) {
1330 netif_wake_subqueue(tx_ring->netdev,
1331 tx_ring->queue_index);
1332 ++tx_ring->tx_stats.restart_queue;
1340 * fm10k_update_itr - update the dynamic ITR value based on packet size
1342 * Stores a new ITR value based on strictly on packet size. The
1343 * divisors and thresholds used by this function were determined based
1344 * on theoretical maximum wire speed and testing data, in order to
1345 * minimize response time while increasing bulk throughput.
1347 * @ring_container: Container for rings to have ITR updated
1349 static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
1351 unsigned int avg_wire_size, packets;
1353 /* Only update ITR if we are using adaptive setting */
1354 if (!(ring_container->itr & FM10K_ITR_ADAPTIVE))
1357 packets = ring_container->total_packets;
1361 avg_wire_size = ring_container->total_bytes / packets;
1363 /* Add 24 bytes to size to account for CRC, preamble, and gap */
1364 avg_wire_size += 24;
1366 /* Don't starve jumbo frames */
1367 if (avg_wire_size > 3000)
1368 avg_wire_size = 3000;
1370 /* Give a little boost to mid-size frames */
1371 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
1376 /* write back value and retain adaptive flag */
1377 ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE;
1380 ring_container->total_bytes = 0;
1381 ring_container->total_packets = 0;
1384 static void fm10k_qv_enable(struct fm10k_q_vector *q_vector)
1386 /* Enable auto-mask and clear the current mask */
1387 u32 itr = FM10K_ITR_ENABLE;
1390 fm10k_update_itr(&q_vector->tx);
1393 fm10k_update_itr(&q_vector->rx);
1395 /* Store Tx itr in timer slot 0 */
1396 itr |= (q_vector->tx.itr & FM10K_ITR_MAX);
1398 /* Shift Rx itr to timer slot 1 */
1399 itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT;
1401 /* Write the final value to the ITR register */
1402 writel(itr, q_vector->itr);
1405 static int fm10k_poll(struct napi_struct *napi, int budget)
1407 struct fm10k_q_vector *q_vector =
1408 container_of(napi, struct fm10k_q_vector, napi);
1409 struct fm10k_ring *ring;
1410 int per_ring_budget;
1411 bool clean_complete = true;
1413 fm10k_for_each_ring(ring, q_vector->tx)
1414 clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
1416 /* attempt to distribute budget to each queue fairly, but don't
1417 * allow the budget to go below 1 because we'll exit polling
1419 if (q_vector->rx.count > 1)
1420 per_ring_budget = max(budget/q_vector->rx.count, 1);
1422 per_ring_budget = budget;
1424 fm10k_for_each_ring(ring, q_vector->rx)
1425 clean_complete &= fm10k_clean_rx_irq(q_vector, ring,
1428 /* If all work not completed, return budget and keep polling */
1429 if (!clean_complete)
1432 /* all work done, exit the polling mode */
1433 napi_complete(napi);
1435 /* re-enable the q_vector */
1436 fm10k_qv_enable(q_vector);
1442 * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device
1443 * @interface: board private structure to initialize
1445 * When QoS (Quality of Service) is enabled, allocate queues for
1446 * each traffic class. If multiqueue isn't available,then abort QoS
1449 * This function handles all combinations of Qos and RSS.
1452 static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
1454 struct net_device *dev = interface->netdev;
1455 struct fm10k_ring_feature *f;
1459 /* Map queue offset and counts onto allocated tx queues */
1460 pcs = netdev_get_num_tc(dev);
1465 /* set QoS mask and indices */
1466 f = &interface->ring_feature[RING_F_QOS];
1468 f->mask = (1 << fls(pcs - 1)) - 1;
1470 /* determine the upper limit for our current DCB mode */
1471 rss_i = interface->hw.mac.max_queues / pcs;
1472 rss_i = 1 << (fls(rss_i) - 1);
1474 /* set RSS mask and indices */
1475 f = &interface->ring_feature[RING_F_RSS];
1476 rss_i = min_t(u16, rss_i, f->limit);
1478 f->mask = (1 << fls(rss_i - 1)) - 1;
1480 /* configure pause class to queue mapping */
1481 for (i = 0; i < pcs; i++)
1482 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
1484 interface->num_rx_queues = rss_i * pcs;
1485 interface->num_tx_queues = rss_i * pcs;
1491 * fm10k_set_rss_queues: Allocate queues for RSS
1492 * @interface: board private structure to initialize
1494 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
1495 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
1498 static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
1500 struct fm10k_ring_feature *f;
1503 f = &interface->ring_feature[RING_F_RSS];
1504 rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit);
1506 /* record indices and power of 2 mask for RSS */
1508 f->mask = (1 << fls(rss_i - 1)) - 1;
1510 interface->num_rx_queues = rss_i;
1511 interface->num_tx_queues = rss_i;
1517 * fm10k_set_num_queues: Allocate queues for device, feature dependent
1518 * @interface: board private structure to initialize
1520 * This is the top level queue allocation routine. The order here is very
1521 * important, starting with the "most" number of features turned on at once,
1522 * and ending with the smallest set of features. This way large combinations
1523 * can be allocated if they're turned on, and smaller combinations are the
1524 * fallthrough conditions.
1527 static void fm10k_set_num_queues(struct fm10k_intfc *interface)
1529 /* Start with base case */
1530 interface->num_rx_queues = 1;
1531 interface->num_tx_queues = 1;
1533 if (fm10k_set_qos_queues(interface))
1536 fm10k_set_rss_queues(interface);
1540 * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
1541 * @interface: board private structure to initialize
1542 * @v_count: q_vectors allocated on interface, used for ring interleaving
1543 * @v_idx: index of vector in interface struct
1544 * @txr_count: total number of Tx rings to allocate
1545 * @txr_idx: index of first Tx ring to allocate
1546 * @rxr_count: total number of Rx rings to allocate
1547 * @rxr_idx: index of first Rx ring to allocate
1549 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1551 static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
1552 unsigned int v_count, unsigned int v_idx,
1553 unsigned int txr_count, unsigned int txr_idx,
1554 unsigned int rxr_count, unsigned int rxr_idx)
1556 struct fm10k_q_vector *q_vector;
1557 struct fm10k_ring *ring;
1558 int ring_count, size;
1560 ring_count = txr_count + rxr_count;
1561 size = sizeof(struct fm10k_q_vector) +
1562 (sizeof(struct fm10k_ring) * ring_count);
1564 /* allocate q_vector and rings */
1565 q_vector = kzalloc(size, GFP_KERNEL);
1569 /* initialize NAPI */
1570 netif_napi_add(interface->netdev, &q_vector->napi,
1571 fm10k_poll, NAPI_POLL_WEIGHT);
1573 /* tie q_vector and interface together */
1574 interface->q_vector[v_idx] = q_vector;
1575 q_vector->interface = interface;
1576 q_vector->v_idx = v_idx;
1578 /* initialize pointer to rings */
1579 ring = q_vector->ring;
1581 /* save Tx ring container info */
1582 q_vector->tx.ring = ring;
1583 q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK;
1584 q_vector->tx.itr = interface->tx_itr;
1585 q_vector->tx.count = txr_count;
1588 /* assign generic ring traits */
1589 ring->dev = &interface->pdev->dev;
1590 ring->netdev = interface->netdev;
1592 /* configure backlink on ring */
1593 ring->q_vector = q_vector;
1595 /* apply Tx specific ring traits */
1596 ring->count = interface->tx_ring_count;
1597 ring->queue_index = txr_idx;
1599 /* assign ring to interface */
1600 interface->tx_ring[txr_idx] = ring;
1602 /* update count and index */
1606 /* push pointer to next ring */
1610 /* save Rx ring container info */
1611 q_vector->rx.ring = ring;
1612 q_vector->rx.itr = interface->rx_itr;
1613 q_vector->rx.count = rxr_count;
1616 /* assign generic ring traits */
1617 ring->dev = &interface->pdev->dev;
1618 ring->netdev = interface->netdev;
1619 rcu_assign_pointer(ring->l2_accel, interface->l2_accel);
1621 /* configure backlink on ring */
1622 ring->q_vector = q_vector;
1624 /* apply Rx specific ring traits */
1625 ring->count = interface->rx_ring_count;
1626 ring->queue_index = rxr_idx;
1628 /* assign ring to interface */
1629 interface->rx_ring[rxr_idx] = ring;
1631 /* update count and index */
1635 /* push pointer to next ring */
1639 fm10k_dbg_q_vector_init(q_vector);
1645 * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
1646 * @interface: board private structure to initialize
1647 * @v_idx: Index of vector to be freed
1649 * This function frees the memory allocated to the q_vector. In addition if
1650 * NAPI is enabled it will delete any references to the NAPI struct prior
1651 * to freeing the q_vector.
1653 static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
1655 struct fm10k_q_vector *q_vector = interface->q_vector[v_idx];
1656 struct fm10k_ring *ring;
1658 fm10k_dbg_q_vector_exit(q_vector);
1660 fm10k_for_each_ring(ring, q_vector->tx)
1661 interface->tx_ring[ring->queue_index] = NULL;
1663 fm10k_for_each_ring(ring, q_vector->rx)
1664 interface->rx_ring[ring->queue_index] = NULL;
1666 interface->q_vector[v_idx] = NULL;
1667 netif_napi_del(&q_vector->napi);
1668 kfree_rcu(q_vector, rcu);
1672 * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
1673 * @interface: board private structure to initialize
1675 * We allocate one q_vector per queue interrupt. If allocation fails we
1678 static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
1680 unsigned int q_vectors = interface->num_q_vectors;
1681 unsigned int rxr_remaining = interface->num_rx_queues;
1682 unsigned int txr_remaining = interface->num_tx_queues;
1683 unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1686 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1687 for (; rxr_remaining; v_idx++) {
1688 err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
1693 /* update counts and index */
1699 for (; v_idx < q_vectors; v_idx++) {
1700 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1701 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1703 err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
1710 /* update counts and index */
1711 rxr_remaining -= rqpv;
1712 txr_remaining -= tqpv;
1720 interface->num_tx_queues = 0;
1721 interface->num_rx_queues = 0;
1722 interface->num_q_vectors = 0;
1725 fm10k_free_q_vector(interface, v_idx);
1731 * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
1732 * @interface: board private structure to initialize
1734 * This function frees the memory allocated to the q_vectors. In addition if
1735 * NAPI is enabled it will delete any references to the NAPI struct prior
1736 * to freeing the q_vector.
1738 static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
1740 int v_idx = interface->num_q_vectors;
1742 interface->num_tx_queues = 0;
1743 interface->num_rx_queues = 0;
1744 interface->num_q_vectors = 0;
1747 fm10k_free_q_vector(interface, v_idx);
1751 * f10k_reset_msix_capability - reset MSI-X capability
1752 * @interface: board private structure to initialize
1754 * Reset the MSI-X capability back to its starting state
1756 static void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
1758 pci_disable_msix(interface->pdev);
1759 kfree(interface->msix_entries);
1760 interface->msix_entries = NULL;
1764 * f10k_init_msix_capability - configure MSI-X capability
1765 * @interface: board private structure to initialize
1767 * Attempt to configure the interrupts using the best available
1768 * capabilities of the hardware and the kernel.
1770 static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
1772 struct fm10k_hw *hw = &interface->hw;
1773 int v_budget, vector;
1775 /* It's easy to be greedy for MSI-X vectors, but it really
1776 * doesn't do us much good if we have a lot more vectors
1777 * than CPU's. So let's be conservative and only ask for
1778 * (roughly) the same number of vectors as there are CPU's.
1779 * the default is to use pairs of vectors
1781 v_budget = max(interface->num_rx_queues, interface->num_tx_queues);
1782 v_budget = min_t(u16, v_budget, num_online_cpus());
1784 /* account for vectors not related to queues */
1785 v_budget += NON_Q_VECTORS(hw);
1787 /* At the same time, hardware can only support a maximum of
1788 * hw.mac->max_msix_vectors vectors. With features
1789 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
1790 * descriptor queues supported by our device. Thus, we cap it off in
1791 * those rare cases where the cpu count also exceeds our vector limit.
1793 v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
1795 /* A failure in MSI-X entry allocation is fatal. */
1796 interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
1798 if (!interface->msix_entries)
1801 /* populate entry values */
1802 for (vector = 0; vector < v_budget; vector++)
1803 interface->msix_entries[vector].entry = vector;
1805 /* Attempt to enable MSI-X with requested value */
1806 v_budget = pci_enable_msix_range(interface->pdev,
1807 interface->msix_entries,
1811 kfree(interface->msix_entries);
1812 interface->msix_entries = NULL;
1816 /* record the number of queues available for q_vectors */
1817 interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
1823 * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS
1824 * @interface: Interface structure continaining rings and devices
1826 * Cache the descriptor ring offsets for Qos
1828 static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
1830 struct net_device *dev = interface->netdev;
1831 int pc, offset, rss_i, i, q_idx;
1832 u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
1833 u8 num_pcs = netdev_get_num_tc(dev);
1838 rss_i = interface->ring_feature[RING_F_RSS].indices;
1840 for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
1842 for (i = 0; i < rss_i; i++) {
1843 interface->tx_ring[offset + i]->reg_idx = q_idx;
1844 interface->tx_ring[offset + i]->qos_pc = pc;
1845 interface->rx_ring[offset + i]->reg_idx = q_idx;
1846 interface->rx_ring[offset + i]->qos_pc = pc;
1855 * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS
1856 * @interface: Interface structure continaining rings and devices
1858 * Cache the descriptor ring offsets for RSS
1860 static void fm10k_cache_ring_rss(struct fm10k_intfc *interface)
1864 for (i = 0; i < interface->num_rx_queues; i++)
1865 interface->rx_ring[i]->reg_idx = i;
1867 for (i = 0; i < interface->num_tx_queues; i++)
1868 interface->tx_ring[i]->reg_idx = i;
1872 * fm10k_assign_rings - Map rings to network devices
1873 * @interface: Interface structure containing rings and devices
1875 * This function is meant to go though and configure both the network
1876 * devices so that they contain rings, and configure the rings so that
1877 * they function with their network devices.
1879 static void fm10k_assign_rings(struct fm10k_intfc *interface)
1881 if (fm10k_cache_ring_qos(interface))
1884 fm10k_cache_ring_rss(interface);
1887 static void fm10k_init_reta(struct fm10k_intfc *interface)
1889 u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
1892 /* If the netdev is initialized we have to maintain table if possible */
1893 if (interface->netdev->reg_state) {
1894 for (i = FM10K_RETA_SIZE; i--;) {
1895 reta = interface->reta[i];
1896 if ((((reta << 24) >> 24) < rss_i) &&
1897 (((reta << 16) >> 24) < rss_i) &&
1898 (((reta << 8) >> 24) < rss_i) &&
1899 (((reta) >> 24) < rss_i))
1901 goto repopulate_reta;
1904 /* do nothing if all of the elements are in bounds */
1909 /* Populate the redirection table 4 entries at a time. To do this
1910 * we are generating the results for n and n+2 and then interleaving
1911 * those with the results with n+1 and n+3.
1913 for (i = FM10K_RETA_SIZE; i--;) {
1914 /* first pass generates n and n+2 */
1915 base = ((i * 0x00040004) + 0x00020000) * rss_i;
1916 reta = (base & 0x3F803F80) >> 7;
1918 /* second pass generates n+1 and n+3 */
1919 base += 0x00010001 * rss_i;
1920 reta |= (base & 0x3F803F80) << 1;
1922 interface->reta[i] = reta;
1927 * fm10k_init_queueing_scheme - Determine proper queueing scheme
1928 * @interface: board private structure to initialize
1930 * We determine which queueing scheme to use based on...
1931 * - Hardware queue count (num_*_queues)
1932 * - defined by miscellaneous hardware support/features (RSS, etc.)
1934 int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
1938 /* Number of supported queues */
1939 fm10k_set_num_queues(interface);
1941 /* Configure MSI-X capability */
1942 err = fm10k_init_msix_capability(interface);
1944 dev_err(&interface->pdev->dev,
1945 "Unable to initialize MSI-X capability\n");
1949 /* Allocate memory for queues */
1950 err = fm10k_alloc_q_vectors(interface);
1954 /* Map rings to devices, and map devices to physical queues */
1955 fm10k_assign_rings(interface);
1957 /* Initialize RSS redirection table */
1958 fm10k_init_reta(interface);
1964 * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
1965 * @interface: board private structure to clear queueing scheme on
1967 * We go through and clear queueing specific resources and reset the structure
1968 * to pre-load conditions
1970 void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface)
1972 fm10k_free_q_vectors(interface);
1973 fm10k_reset_msix_capability(interface);