2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/moduleparam.h>
43 #include <linux/numa.h>
44 #include <linux/pci.h>
45 #include <linux/utsname.h>
46 #include <linux/version.h>
47 #include <linux/vmalloc.h>
50 #include "ena_netdev.h"
51 #include "ena_pci_id_tbl.h"
53 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
55 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
56 MODULE_DESCRIPTION(DEVICE_NAME);
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(DRV_MODULE_VERSION);
60 /* Time in jiffies before concluding the transmitter is hung. */
61 #define TX_TIMEOUT (5 * HZ)
63 #define ENA_NAPI_BUDGET 64
65 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
66 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
67 static int debug = -1;
68 module_param(debug, int, 0);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71 static struct ena_aenq_handlers aenq_handlers;
73 static struct workqueue_struct *ena_wq;
75 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
77 static int ena_rss_init_default(struct ena_adapter *adapter);
79 static void ena_tx_timeout(struct net_device *dev)
81 struct ena_adapter *adapter = netdev_priv(dev);
83 /* Change the state of the device to trigger reset
84 * Check that we are not in the middle or a trigger already
87 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
90 u64_stats_update_begin(&adapter->syncp);
91 adapter->dev_stats.tx_timeout++;
92 u64_stats_update_end(&adapter->syncp);
94 netif_err(adapter, tx_err, dev, "Transmit time out\n");
97 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
101 for (i = 0; i < adapter->num_queues; i++)
102 adapter->rx_ring[i].mtu = mtu;
105 static int ena_change_mtu(struct net_device *dev, int new_mtu)
107 struct ena_adapter *adapter = netdev_priv(dev);
110 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
112 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
113 update_rx_ring_mtu(adapter, new_mtu);
116 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
123 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
125 #ifdef CONFIG_RFS_ACCEL
129 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
130 if (!adapter->netdev->rx_cpu_rmap)
132 for (i = 0; i < adapter->num_queues; i++) {
133 int irq_idx = ENA_IO_IRQ_IDX(i);
135 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
136 pci_irq_vector(adapter->pdev, irq_idx));
138 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
139 adapter->netdev->rx_cpu_rmap = NULL;
143 #endif /* CONFIG_RFS_ACCEL */
147 static void ena_init_io_rings_common(struct ena_adapter *adapter,
148 struct ena_ring *ring, u16 qid)
151 ring->pdev = adapter->pdev;
152 ring->dev = &adapter->pdev->dev;
153 ring->netdev = adapter->netdev;
154 ring->napi = &adapter->ena_napi[qid].napi;
155 ring->adapter = adapter;
156 ring->ena_dev = adapter->ena_dev;
157 ring->per_napi_packets = 0;
158 ring->per_napi_bytes = 0;
160 u64_stats_init(&ring->syncp);
163 static void ena_init_io_rings(struct ena_adapter *adapter)
165 struct ena_com_dev *ena_dev;
166 struct ena_ring *txr, *rxr;
169 ena_dev = adapter->ena_dev;
171 for (i = 0; i < adapter->num_queues; i++) {
172 txr = &adapter->tx_ring[i];
173 rxr = &adapter->rx_ring[i];
175 /* TX/RX common ring state */
176 ena_init_io_rings_common(adapter, txr, i);
177 ena_init_io_rings_common(adapter, rxr, i);
179 /* TX specific ring state */
180 txr->ring_size = adapter->tx_ring_size;
181 txr->tx_max_header_size = ena_dev->tx_max_header_size;
182 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
183 txr->sgl_size = adapter->max_tx_sgl_size;
184 txr->smoothed_interval =
185 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
187 /* RX specific ring state */
188 rxr->ring_size = adapter->rx_ring_size;
189 rxr->rx_copybreak = adapter->rx_copybreak;
190 rxr->sgl_size = adapter->max_rx_sgl_size;
191 rxr->smoothed_interval =
192 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
193 rxr->empty_rx_queue = 0;
197 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
198 * @adapter: network interface device structure
201 * Return 0 on success, negative on failure
203 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
205 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
206 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
209 if (tx_ring->tx_buffer_info) {
210 netif_err(adapter, ifup,
211 adapter->netdev, "tx_buffer_info info is not NULL");
215 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
216 node = cpu_to_node(ena_irq->cpu);
218 tx_ring->tx_buffer_info = vzalloc_node(size, node);
219 if (!tx_ring->tx_buffer_info) {
220 tx_ring->tx_buffer_info = vzalloc(size);
221 if (!tx_ring->tx_buffer_info)
225 size = sizeof(u16) * tx_ring->ring_size;
226 tx_ring->free_tx_ids = vzalloc_node(size, node);
227 if (!tx_ring->free_tx_ids) {
228 tx_ring->free_tx_ids = vzalloc(size);
229 if (!tx_ring->free_tx_ids) {
230 vfree(tx_ring->tx_buffer_info);
235 /* Req id ring for TX out of order completions */
236 for (i = 0; i < tx_ring->ring_size; i++)
237 tx_ring->free_tx_ids[i] = i;
239 /* Reset tx statistics */
240 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
242 tx_ring->next_to_use = 0;
243 tx_ring->next_to_clean = 0;
244 tx_ring->cpu = ena_irq->cpu;
248 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
249 * @adapter: network interface device structure
252 * Free all transmit software resources
254 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
256 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
258 vfree(tx_ring->tx_buffer_info);
259 tx_ring->tx_buffer_info = NULL;
261 vfree(tx_ring->free_tx_ids);
262 tx_ring->free_tx_ids = NULL;
265 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
266 * @adapter: private structure
268 * Return 0 on success, negative on failure
270 static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
274 for (i = 0; i < adapter->num_queues; i++) {
275 rc = ena_setup_tx_resources(adapter, i);
284 netif_err(adapter, ifup, adapter->netdev,
285 "Tx queue %d: allocation failed\n", i);
287 /* rewind the index freeing the rings as we go */
289 ena_free_tx_resources(adapter, i);
293 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
294 * @adapter: board private structure
296 * Free all transmit software resources
298 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
302 for (i = 0; i < adapter->num_queues; i++)
303 ena_free_tx_resources(adapter, i);
306 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
307 * @adapter: network interface device structure
310 * Returns 0 on success, negative on failure
312 static int ena_setup_rx_resources(struct ena_adapter *adapter,
315 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
316 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
319 if (rx_ring->rx_buffer_info) {
320 netif_err(adapter, ifup, adapter->netdev,
321 "rx_buffer_info is not NULL");
325 /* alloc extra element so in rx path
326 * we can always prefetch rx_info + 1
328 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
329 node = cpu_to_node(ena_irq->cpu);
331 rx_ring->rx_buffer_info = vzalloc_node(size, node);
332 if (!rx_ring->rx_buffer_info) {
333 rx_ring->rx_buffer_info = vzalloc(size);
334 if (!rx_ring->rx_buffer_info)
338 /* Reset rx statistics */
339 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
341 rx_ring->next_to_clean = 0;
342 rx_ring->next_to_use = 0;
343 rx_ring->cpu = ena_irq->cpu;
348 /* ena_free_rx_resources - Free I/O Rx Resources
349 * @adapter: network interface device structure
352 * Free all receive software resources
354 static void ena_free_rx_resources(struct ena_adapter *adapter,
357 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
359 vfree(rx_ring->rx_buffer_info);
360 rx_ring->rx_buffer_info = NULL;
363 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
364 * @adapter: board private structure
366 * Return 0 on success, negative on failure
368 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
372 for (i = 0; i < adapter->num_queues; i++) {
373 rc = ena_setup_rx_resources(adapter, i);
382 netif_err(adapter, ifup, adapter->netdev,
383 "Rx queue %d: allocation failed\n", i);
385 /* rewind the index freeing the rings as we go */
387 ena_free_rx_resources(adapter, i);
391 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
392 * @adapter: board private structure
394 * Free all receive software resources
396 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
400 for (i = 0; i < adapter->num_queues; i++)
401 ena_free_rx_resources(adapter, i);
404 static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
405 struct ena_rx_buffer *rx_info, gfp_t gfp)
407 struct ena_com_buf *ena_buf;
411 /* if previous allocated page is not used */
412 if (unlikely(rx_info->page))
415 page = alloc_page(gfp);
416 if (unlikely(!page)) {
417 u64_stats_update_begin(&rx_ring->syncp);
418 rx_ring->rx_stats.page_alloc_fail++;
419 u64_stats_update_end(&rx_ring->syncp);
423 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
425 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
426 u64_stats_update_begin(&rx_ring->syncp);
427 rx_ring->rx_stats.dma_mapping_err++;
428 u64_stats_update_end(&rx_ring->syncp);
433 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
434 "alloc page %p, rx_info %p\n", page, rx_info);
436 rx_info->page = page;
437 rx_info->page_offset = 0;
438 ena_buf = &rx_info->ena_buf;
439 ena_buf->paddr = dma;
440 ena_buf->len = PAGE_SIZE;
445 static void ena_free_rx_page(struct ena_ring *rx_ring,
446 struct ena_rx_buffer *rx_info)
448 struct page *page = rx_info->page;
449 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
451 if (unlikely(!page)) {
452 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
453 "Trying to free unallocated buffer\n");
457 dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
461 rx_info->page = NULL;
464 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
470 next_to_use = rx_ring->next_to_use;
472 for (i = 0; i < num; i++) {
473 struct ena_rx_buffer *rx_info =
474 &rx_ring->rx_buffer_info[next_to_use];
476 rc = ena_alloc_rx_page(rx_ring, rx_info,
477 __GFP_COLD | GFP_ATOMIC | __GFP_COMP);
478 if (unlikely(rc < 0)) {
479 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
480 "failed to alloc buffer for rx queue %d\n",
484 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
488 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
489 "failed to add buffer for rx queue %d\n",
493 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
497 if (unlikely(i < num)) {
498 u64_stats_update_begin(&rx_ring->syncp);
499 rx_ring->rx_stats.refil_partial++;
500 u64_stats_update_end(&rx_ring->syncp);
501 netdev_warn(rx_ring->netdev,
502 "refilled rx qid %d with only %d buffers (from %d)\n",
503 rx_ring->qid, i, num);
507 /* Add memory barrier to make sure the desc were written before
511 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
514 rx_ring->next_to_use = next_to_use;
519 static void ena_free_rx_bufs(struct ena_adapter *adapter,
522 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
525 for (i = 0; i < rx_ring->ring_size; i++) {
526 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
529 ena_free_rx_page(rx_ring, rx_info);
533 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
534 * @adapter: board private structure
537 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
539 struct ena_ring *rx_ring;
542 for (i = 0; i < adapter->num_queues; i++) {
543 rx_ring = &adapter->rx_ring[i];
544 bufs_num = rx_ring->ring_size - 1;
545 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
547 if (unlikely(rc != bufs_num))
548 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
549 "refilling Queue %d failed. allocated %d buffers from: %d\n",
554 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
558 for (i = 0; i < adapter->num_queues; i++)
559 ena_free_rx_bufs(adapter, i);
562 /* ena_free_tx_bufs - Free Tx Buffers per Queue
563 * @tx_ring: TX ring for which buffers be freed
565 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
567 bool print_once = true;
570 for (i = 0; i < tx_ring->ring_size; i++) {
571 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
572 struct ena_com_buf *ena_buf;
580 netdev_notice(tx_ring->netdev,
581 "free uncompleted tx skb qid %d idx 0x%x\n",
585 netdev_dbg(tx_ring->netdev,
586 "free uncompleted tx skb qid %d idx 0x%x\n",
590 ena_buf = tx_info->bufs;
591 dma_unmap_single(tx_ring->dev,
596 /* unmap remaining mapped pages */
597 nr_frags = tx_info->num_of_bufs - 1;
598 for (j = 0; j < nr_frags; j++) {
600 dma_unmap_page(tx_ring->dev,
606 dev_kfree_skb_any(tx_info->skb);
608 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
612 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
614 struct ena_ring *tx_ring;
617 for (i = 0; i < adapter->num_queues; i++) {
618 tx_ring = &adapter->tx_ring[i];
619 ena_free_tx_bufs(tx_ring);
623 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
628 for (i = 0; i < adapter->num_queues; i++) {
629 ena_qid = ENA_IO_TXQ_IDX(i);
630 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
634 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
639 for (i = 0; i < adapter->num_queues; i++) {
640 ena_qid = ENA_IO_RXQ_IDX(i);
641 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
645 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
647 ena_destroy_all_tx_queues(adapter);
648 ena_destroy_all_rx_queues(adapter);
651 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
653 struct ena_tx_buffer *tx_info = NULL;
655 if (likely(req_id < tx_ring->ring_size)) {
656 tx_info = &tx_ring->tx_buffer_info[req_id];
657 if (likely(tx_info->skb))
662 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
663 "tx_info doesn't have valid skb\n");
665 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
666 "Invalid req_id: %hu\n", req_id);
668 u64_stats_update_begin(&tx_ring->syncp);
669 tx_ring->tx_stats.bad_req_id++;
670 u64_stats_update_end(&tx_ring->syncp);
672 /* Trigger device reset */
673 set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
677 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
679 struct netdev_queue *txq;
688 next_to_clean = tx_ring->next_to_clean;
689 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
691 while (tx_pkts < budget) {
692 struct ena_tx_buffer *tx_info;
694 struct ena_com_buf *ena_buf;
697 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
702 rc = validate_tx_req_id(tx_ring, req_id);
706 tx_info = &tx_ring->tx_buffer_info[req_id];
709 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
713 tx_info->last_jiffies = 0;
715 if (likely(tx_info->num_of_bufs != 0)) {
716 ena_buf = tx_info->bufs;
718 dma_unmap_single(tx_ring->dev,
719 dma_unmap_addr(ena_buf, paddr),
720 dma_unmap_len(ena_buf, len),
723 /* unmap remaining mapped pages */
724 nr_frags = tx_info->num_of_bufs - 1;
725 for (i = 0; i < nr_frags; i++) {
727 dma_unmap_page(tx_ring->dev,
728 dma_unmap_addr(ena_buf, paddr),
729 dma_unmap_len(ena_buf, len),
734 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
735 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
738 tx_bytes += skb->len;
741 total_done += tx_info->tx_descs;
743 tx_ring->free_tx_ids[next_to_clean] = req_id;
744 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
748 tx_ring->next_to_clean = next_to_clean;
749 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
750 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
752 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
754 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
755 "tx_poll: q %d done. total pkts: %d\n",
756 tx_ring->qid, tx_pkts);
758 /* need to make the rings circular update visible to
759 * ena_start_xmit() before checking for netif_queue_stopped().
763 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
764 ENA_TX_WAKEUP_THRESH;
765 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
766 __netif_tx_lock(txq, smp_processor_id());
767 above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
768 ENA_TX_WAKEUP_THRESH;
769 if (netif_tx_queue_stopped(txq) && above_thresh) {
770 netif_tx_wake_queue(txq);
771 u64_stats_update_begin(&tx_ring->syncp);
772 tx_ring->tx_stats.queue_wakeup++;
773 u64_stats_update_end(&tx_ring->syncp);
775 __netif_tx_unlock(txq);
778 tx_ring->per_napi_bytes += tx_bytes;
779 tx_ring->per_napi_packets += tx_pkts;
784 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
785 struct ena_com_rx_buf_info *ena_bufs,
790 struct ena_rx_buffer *rx_info =
791 &rx_ring->rx_buffer_info[*next_to_clean];
796 len = ena_bufs[0].len;
797 if (unlikely(!rx_info->page)) {
798 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
803 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
804 "rx_info %p page %p\n",
805 rx_info, rx_info->page);
807 /* save virt address of first buffer */
808 va = page_address(rx_info->page) + rx_info->page_offset;
809 prefetch(va + NET_IP_ALIGN);
811 if (len <= rx_ring->rx_copybreak) {
812 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
813 rx_ring->rx_copybreak);
814 if (unlikely(!skb)) {
815 u64_stats_update_begin(&rx_ring->syncp);
816 rx_ring->rx_stats.skb_alloc_fail++;
817 u64_stats_update_end(&rx_ring->syncp);
818 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
819 "Failed to allocate skb\n");
823 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
824 "rx allocated small packet. len %d. data_len %d\n",
825 skb->len, skb->data_len);
827 /* sync this buffer for CPU use */
828 dma_sync_single_for_cpu(rx_ring->dev,
829 dma_unmap_addr(&rx_info->ena_buf, paddr),
832 skb_copy_to_linear_data(skb, va, len);
833 dma_sync_single_for_device(rx_ring->dev,
834 dma_unmap_addr(&rx_info->ena_buf, paddr),
839 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
840 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
845 skb = napi_get_frags(rx_ring->napi);
846 if (unlikely(!skb)) {
847 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
848 "Failed allocating skb\n");
849 u64_stats_update_begin(&rx_ring->syncp);
850 rx_ring->rx_stats.skb_alloc_fail++;
851 u64_stats_update_end(&rx_ring->syncp);
856 dma_unmap_page(rx_ring->dev,
857 dma_unmap_addr(&rx_info->ena_buf, paddr),
858 PAGE_SIZE, DMA_FROM_DEVICE);
860 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
861 rx_info->page_offset, len, PAGE_SIZE);
863 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
864 "rx skb updated. len %d. data_len %d\n",
865 skb->len, skb->data_len);
867 rx_info->page = NULL;
869 ENA_RX_RING_IDX_NEXT(*next_to_clean,
871 if (likely(--descs == 0))
873 rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
874 len = ena_bufs[++buf].len;
880 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
881 * @adapter: structure containing adapter specific data
882 * @ena_rx_ctx: received packet context/metadata
883 * @skb: skb currently being received and modified
885 static inline void ena_rx_checksum(struct ena_ring *rx_ring,
886 struct ena_com_rx_ctx *ena_rx_ctx,
889 /* Rx csum disabled */
890 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
891 skb->ip_summed = CHECKSUM_NONE;
895 /* For fragmented packets the checksum isn't valid */
896 if (ena_rx_ctx->frag) {
897 skb->ip_summed = CHECKSUM_NONE;
901 /* if IP and error */
902 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
903 (ena_rx_ctx->l3_csum_err))) {
904 /* ipv4 checksum error */
905 skb->ip_summed = CHECKSUM_NONE;
906 u64_stats_update_begin(&rx_ring->syncp);
907 rx_ring->rx_stats.bad_csum++;
908 u64_stats_update_end(&rx_ring->syncp);
909 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
910 "RX IPv4 header checksum error\n");
915 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
916 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
917 if (unlikely(ena_rx_ctx->l4_csum_err)) {
918 /* TCP/UDP checksum error */
919 u64_stats_update_begin(&rx_ring->syncp);
920 rx_ring->rx_stats.bad_csum++;
921 u64_stats_update_end(&rx_ring->syncp);
922 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
923 "RX L4 checksum error\n");
924 skb->ip_summed = CHECKSUM_NONE;
928 skb->ip_summed = CHECKSUM_UNNECESSARY;
932 static void ena_set_rx_hash(struct ena_ring *rx_ring,
933 struct ena_com_rx_ctx *ena_rx_ctx,
936 enum pkt_hash_types hash_type;
938 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
939 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
940 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
942 hash_type = PKT_HASH_TYPE_L4;
944 hash_type = PKT_HASH_TYPE_NONE;
946 /* Override hash type if the packet is fragmented */
947 if (ena_rx_ctx->frag)
948 hash_type = PKT_HASH_TYPE_NONE;
950 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
954 /* ena_clean_rx_irq - Cleanup RX irq
955 * @rx_ring: RX ring to clean
956 * @napi: napi handler
957 * @budget: how many packets driver is allowed to clean
959 * Returns the number of cleaned buffers.
961 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
964 u16 next_to_clean = rx_ring->next_to_clean;
965 u32 res_budget, work_done;
967 struct ena_com_rx_ctx ena_rx_ctx;
968 struct ena_adapter *adapter;
971 int refill_threshold;
974 int rx_copybreak_pkt = 0;
976 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
977 "%s qid %d\n", __func__, rx_ring->qid);
981 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
982 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
983 ena_rx_ctx.descs = 0;
984 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
985 rx_ring->ena_com_io_sq,
990 if (unlikely(ena_rx_ctx.descs == 0))
993 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
994 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
995 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
996 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
998 /* allocate skb and fill it */
999 skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
1002 /* exit if we failed to retrieve a buffer */
1003 if (unlikely(!skb)) {
1004 next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean,
1006 rx_ring->ring_size);
1010 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1012 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1014 skb_record_rx_queue(skb, rx_ring->qid);
1016 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1017 total_len += rx_ring->ena_bufs[0].len;
1019 napi_gro_receive(napi, skb);
1021 total_len += skb->len;
1022 napi_gro_frags(napi);
1026 } while (likely(res_budget));
1028 work_done = budget - res_budget;
1029 rx_ring->per_napi_bytes += total_len;
1030 rx_ring->per_napi_packets += work_done;
1031 u64_stats_update_begin(&rx_ring->syncp);
1032 rx_ring->rx_stats.bytes += total_len;
1033 rx_ring->rx_stats.cnt += work_done;
1034 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1035 u64_stats_update_end(&rx_ring->syncp);
1037 rx_ring->next_to_clean = next_to_clean;
1039 refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
1040 refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
1042 /* Optimization, try to batch new rx buffers */
1043 if (refill_required > refill_threshold) {
1044 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1045 ena_refill_rx_bufs(rx_ring, refill_required);
1051 adapter = netdev_priv(rx_ring->netdev);
1053 u64_stats_update_begin(&rx_ring->syncp);
1054 rx_ring->rx_stats.bad_desc_num++;
1055 u64_stats_update_end(&rx_ring->syncp);
1057 /* Too many desc from the device. Trigger reset */
1058 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1063 inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
1064 struct ena_ring *tx_ring)
1066 /* We apply adaptive moderation on Rx path only.
1067 * Tx uses static interrupt moderation.
1069 ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
1070 rx_ring->per_napi_packets,
1071 rx_ring->per_napi_bytes,
1072 &rx_ring->smoothed_interval,
1073 &rx_ring->moder_tbl_idx);
1075 /* Reset per napi packets/bytes */
1076 tx_ring->per_napi_packets = 0;
1077 tx_ring->per_napi_bytes = 0;
1078 rx_ring->per_napi_packets = 0;
1079 rx_ring->per_napi_bytes = 0;
1082 static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
1083 struct ena_ring *rx_ring)
1085 struct ena_eth_io_intr_reg intr_reg;
1087 /* Update intr register: rx intr delay,
1088 * tx intr delay and interrupt unmask
1090 ena_com_update_intr_reg(&intr_reg,
1091 rx_ring->smoothed_interval,
1092 tx_ring->smoothed_interval,
1095 /* It is a shared MSI-X.
1096 * Tx and Rx CQ have pointer to it.
1097 * So we use one of them to reach the intr reg
1099 ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1102 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1103 struct ena_ring *rx_ring)
1105 int cpu = get_cpu();
1108 /* Check only one ring since the 2 rings are running on the same cpu */
1109 if (likely(tx_ring->cpu == cpu))
1112 numa_node = cpu_to_node(cpu);
1115 if (numa_node != NUMA_NO_NODE) {
1116 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1117 ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
1128 static int ena_io_poll(struct napi_struct *napi, int budget)
1130 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1131 struct ena_ring *tx_ring, *rx_ring;
1136 int napi_comp_call = 0;
1139 tx_ring = ena_napi->tx_ring;
1140 rx_ring = ena_napi->rx_ring;
1142 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1144 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1145 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1146 napi_complete_done(napi, 0);
1150 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1151 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1153 /* If the device is about to reset or down, avoid unmask
1154 * the interrupt and return 0 so NAPI won't reschedule
1156 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1157 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1158 napi_complete_done(napi, 0);
1161 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1164 /* Update numa and unmask the interrupt only when schedule
1165 * from the interrupt context (vs from sk_busy_loop)
1167 if (napi_complete_done(napi, rx_work_done)) {
1168 /* Tx and Rx share the same interrupt vector */
1169 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1170 ena_adjust_intr_moderation(rx_ring, tx_ring);
1172 ena_unmask_interrupt(tx_ring, rx_ring);
1175 ena_update_ring_numa_node(tx_ring, rx_ring);
1182 u64_stats_update_begin(&tx_ring->syncp);
1183 tx_ring->tx_stats.napi_comp += napi_comp_call;
1184 tx_ring->tx_stats.tx_poll++;
1185 u64_stats_update_end(&tx_ring->syncp);
1190 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1192 struct ena_adapter *adapter = (struct ena_adapter *)data;
1194 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1196 /* Don't call the aenq handler before probe is done */
1197 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1198 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1203 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1204 * @irq: interrupt number
1205 * @data: pointer to a network interface private napi device structure
1207 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1209 struct ena_napi *ena_napi = data;
1211 napi_schedule(&ena_napi->napi);
1216 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
1220 /* Reserved the max msix vectors we might need */
1221 msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
1223 netif_dbg(adapter, probe, adapter->netdev,
1224 "trying to enable MSI-X, vectors %d\n", msix_vecs);
1226 rc = pci_alloc_irq_vectors(adapter->pdev, msix_vecs, msix_vecs,
1229 netif_err(adapter, probe, adapter->netdev,
1230 "Failed to enable MSI-X, vectors %d rc %d\n",
1235 netif_dbg(adapter, probe, adapter->netdev, "enable MSI-X, vectors %d\n",
1238 if (msix_vecs >= 1) {
1239 if (ena_init_rx_cpu_rmap(adapter))
1240 netif_warn(adapter, probe, adapter->netdev,
1241 "Failed to map IRQs to CPUs\n");
1244 adapter->msix_vecs = msix_vecs;
1249 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1253 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1254 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1255 pci_name(adapter->pdev));
1256 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1257 ena_intr_msix_mgmnt;
1258 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1259 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1260 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1261 cpu = cpumask_first(cpu_online_mask);
1262 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1263 cpumask_set_cpu(cpu,
1264 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1267 static void ena_setup_io_intr(struct ena_adapter *adapter)
1269 struct net_device *netdev;
1270 int irq_idx, i, cpu;
1272 netdev = adapter->netdev;
1274 for (i = 0; i < adapter->num_queues; i++) {
1275 irq_idx = ENA_IO_IRQ_IDX(i);
1276 cpu = i % num_online_cpus();
1278 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1279 "%s-Tx-Rx-%d", netdev->name, i);
1280 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1281 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1282 adapter->irq_tbl[irq_idx].vector =
1283 pci_irq_vector(adapter->pdev, irq_idx);
1284 adapter->irq_tbl[irq_idx].cpu = cpu;
1286 cpumask_set_cpu(cpu,
1287 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
1291 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1293 unsigned long flags = 0;
1294 struct ena_irq *irq;
1297 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1298 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1301 netif_err(adapter, probe, adapter->netdev,
1302 "failed to request admin irq\n");
1306 netif_dbg(adapter, probe, adapter->netdev,
1307 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1308 irq->affinity_hint_mask.bits[0], irq->vector);
1310 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1315 static int ena_request_io_irq(struct ena_adapter *adapter)
1317 unsigned long flags = 0;
1318 struct ena_irq *irq;
1321 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1322 irq = &adapter->irq_tbl[i];
1323 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1326 netif_err(adapter, ifup, adapter->netdev,
1327 "Failed to request I/O IRQ. index %d rc %d\n",
1332 netif_dbg(adapter, ifup, adapter->netdev,
1333 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1334 i, irq->affinity_hint_mask.bits[0], irq->vector);
1336 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1342 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1343 irq = &adapter->irq_tbl[k];
1344 free_irq(irq->vector, irq->data);
1350 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1352 struct ena_irq *irq;
1354 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1355 synchronize_irq(irq->vector);
1356 irq_set_affinity_hint(irq->vector, NULL);
1357 free_irq(irq->vector, irq->data);
1360 static void ena_free_io_irq(struct ena_adapter *adapter)
1362 struct ena_irq *irq;
1365 #ifdef CONFIG_RFS_ACCEL
1366 if (adapter->msix_vecs >= 1) {
1367 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1368 adapter->netdev->rx_cpu_rmap = NULL;
1370 #endif /* CONFIG_RFS_ACCEL */
1372 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1373 irq = &adapter->irq_tbl[i];
1374 irq_set_affinity_hint(irq->vector, NULL);
1375 free_irq(irq->vector, irq->data);
1379 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1383 if (!netif_running(adapter->netdev))
1386 for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
1387 synchronize_irq(adapter->irq_tbl[i].vector);
1390 static void ena_del_napi(struct ena_adapter *adapter)
1394 for (i = 0; i < adapter->num_queues; i++)
1395 netif_napi_del(&adapter->ena_napi[i].napi);
1398 static void ena_init_napi(struct ena_adapter *adapter)
1400 struct ena_napi *napi;
1403 for (i = 0; i < adapter->num_queues; i++) {
1404 napi = &adapter->ena_napi[i];
1406 netif_napi_add(adapter->netdev,
1407 &adapter->ena_napi[i].napi,
1410 napi->rx_ring = &adapter->rx_ring[i];
1411 napi->tx_ring = &adapter->tx_ring[i];
1416 static void ena_napi_disable_all(struct ena_adapter *adapter)
1420 for (i = 0; i < adapter->num_queues; i++)
1421 napi_disable(&adapter->ena_napi[i].napi);
1424 static void ena_napi_enable_all(struct ena_adapter *adapter)
1428 for (i = 0; i < adapter->num_queues; i++)
1429 napi_enable(&adapter->ena_napi[i].napi);
1432 static void ena_restore_ethtool_params(struct ena_adapter *adapter)
1434 adapter->tx_usecs = 0;
1435 adapter->rx_usecs = 0;
1436 adapter->tx_frames = 1;
1437 adapter->rx_frames = 1;
1440 /* Configure the Rx forwarding */
1441 static int ena_rss_configure(struct ena_adapter *adapter)
1443 struct ena_com_dev *ena_dev = adapter->ena_dev;
1446 /* In case the RSS table wasn't initialized by probe */
1447 if (!ena_dev->rss.tbl_log_size) {
1448 rc = ena_rss_init_default(adapter);
1449 if (rc && (rc != -EPERM)) {
1450 netif_err(adapter, ifup, adapter->netdev,
1451 "Failed to init RSS rc: %d\n", rc);
1456 /* Set indirect table */
1457 rc = ena_com_indirect_table_set(ena_dev);
1458 if (unlikely(rc && rc != -EPERM))
1461 /* Configure hash function (if supported) */
1462 rc = ena_com_set_hash_function(ena_dev);
1463 if (unlikely(rc && (rc != -EPERM)))
1466 /* Configure hash inputs (if supported) */
1467 rc = ena_com_set_hash_ctrl(ena_dev);
1468 if (unlikely(rc && (rc != -EPERM)))
1474 static int ena_up_complete(struct ena_adapter *adapter)
1478 rc = ena_rss_configure(adapter);
1482 ena_init_napi(adapter);
1484 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1486 ena_refill_all_rx_bufs(adapter);
1488 /* enable transmits */
1489 netif_tx_start_all_queues(adapter->netdev);
1491 ena_restore_ethtool_params(adapter);
1493 ena_napi_enable_all(adapter);
1495 /* Enable completion queues interrupt */
1496 for (i = 0; i < adapter->num_queues; i++)
1497 ena_unmask_interrupt(&adapter->tx_ring[i],
1498 &adapter->rx_ring[i]);
1500 /* schedule napi in case we had pending packets
1501 * from the last time we disable napi
1503 for (i = 0; i < adapter->num_queues; i++)
1504 napi_schedule(&adapter->ena_napi[i].napi);
1509 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1511 struct ena_com_create_io_ctx ctx = { 0 };
1512 struct ena_com_dev *ena_dev;
1513 struct ena_ring *tx_ring;
1518 ena_dev = adapter->ena_dev;
1520 tx_ring = &adapter->tx_ring[qid];
1521 msix_vector = ENA_IO_IRQ_IDX(qid);
1522 ena_qid = ENA_IO_TXQ_IDX(qid);
1524 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1526 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1527 ctx.msix_vector = msix_vector;
1528 ctx.queue_size = adapter->tx_ring_size;
1529 ctx.numa_node = cpu_to_node(tx_ring->cpu);
1531 rc = ena_com_create_io_queue(ena_dev, &ctx);
1533 netif_err(adapter, ifup, adapter->netdev,
1534 "Failed to create I/O TX queue num %d rc: %d\n",
1539 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1540 &tx_ring->ena_com_io_sq,
1541 &tx_ring->ena_com_io_cq);
1543 netif_err(adapter, ifup, adapter->netdev,
1544 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1546 ena_com_destroy_io_queue(ena_dev, ena_qid);
1550 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1554 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
1556 struct ena_com_dev *ena_dev = adapter->ena_dev;
1559 for (i = 0; i < adapter->num_queues; i++) {
1560 rc = ena_create_io_tx_queue(adapter, i);
1569 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1574 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1576 struct ena_com_dev *ena_dev;
1577 struct ena_com_create_io_ctx ctx = { 0 };
1578 struct ena_ring *rx_ring;
1583 ena_dev = adapter->ena_dev;
1585 rx_ring = &adapter->rx_ring[qid];
1586 msix_vector = ENA_IO_IRQ_IDX(qid);
1587 ena_qid = ENA_IO_RXQ_IDX(qid);
1590 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1591 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1592 ctx.msix_vector = msix_vector;
1593 ctx.queue_size = adapter->rx_ring_size;
1594 ctx.numa_node = cpu_to_node(rx_ring->cpu);
1596 rc = ena_com_create_io_queue(ena_dev, &ctx);
1598 netif_err(adapter, ifup, adapter->netdev,
1599 "Failed to create I/O RX queue num %d rc: %d\n",
1604 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1605 &rx_ring->ena_com_io_sq,
1606 &rx_ring->ena_com_io_cq);
1608 netif_err(adapter, ifup, adapter->netdev,
1609 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1611 ena_com_destroy_io_queue(ena_dev, ena_qid);
1615 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1620 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1622 struct ena_com_dev *ena_dev = adapter->ena_dev;
1625 for (i = 0; i < adapter->num_queues; i++) {
1626 rc = ena_create_io_rx_queue(adapter, i);
1635 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1640 static int ena_up(struct ena_adapter *adapter)
1644 netdev_dbg(adapter->netdev, "%s\n", __func__);
1646 ena_setup_io_intr(adapter);
1648 rc = ena_request_io_irq(adapter);
1652 /* allocate transmit descriptors */
1653 rc = ena_setup_all_tx_resources(adapter);
1657 /* allocate receive descriptors */
1658 rc = ena_setup_all_rx_resources(adapter);
1662 /* Create TX queues */
1663 rc = ena_create_all_io_tx_queues(adapter);
1665 goto err_create_tx_queues;
1667 /* Create RX queues */
1668 rc = ena_create_all_io_rx_queues(adapter);
1670 goto err_create_rx_queues;
1672 rc = ena_up_complete(adapter);
1676 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1677 netif_carrier_on(adapter->netdev);
1679 u64_stats_update_begin(&adapter->syncp);
1680 adapter->dev_stats.interface_up++;
1681 u64_stats_update_end(&adapter->syncp);
1683 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1688 ena_destroy_all_rx_queues(adapter);
1689 err_create_rx_queues:
1690 ena_destroy_all_tx_queues(adapter);
1691 err_create_tx_queues:
1692 ena_free_all_io_rx_resources(adapter);
1694 ena_free_all_io_tx_resources(adapter);
1696 ena_free_io_irq(adapter);
1702 static void ena_down(struct ena_adapter *adapter)
1704 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
1706 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1708 u64_stats_update_begin(&adapter->syncp);
1709 adapter->dev_stats.interface_down++;
1710 u64_stats_update_end(&adapter->syncp);
1712 netif_carrier_off(adapter->netdev);
1713 netif_tx_disable(adapter->netdev);
1715 /* After this point the napi handler won't enable the tx queue */
1716 ena_napi_disable_all(adapter);
1718 /* After destroy the queue there won't be any new interrupts */
1720 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
1723 rc = ena_com_dev_reset(adapter->ena_dev);
1725 dev_err(&adapter->pdev->dev, "Device reset failed\n");
1728 ena_destroy_all_io_queues(adapter);
1730 ena_disable_io_intr_sync(adapter);
1731 ena_free_io_irq(adapter);
1732 ena_del_napi(adapter);
1734 ena_free_all_tx_bufs(adapter);
1735 ena_free_all_rx_bufs(adapter);
1736 ena_free_all_io_tx_resources(adapter);
1737 ena_free_all_io_rx_resources(adapter);
1740 /* ena_open - Called when a network interface is made active
1741 * @netdev: network interface device structure
1743 * Returns 0 on success, negative value on failure
1745 * The open entry point is called when a network interface is made
1746 * active by the system (IFF_UP). At this point all resources needed
1747 * for transmit and receive operations are allocated, the interrupt
1748 * handler is registered with the OS, the watchdog timer is started,
1749 * and the stack is notified that the interface is ready.
1751 static int ena_open(struct net_device *netdev)
1753 struct ena_adapter *adapter = netdev_priv(netdev);
1756 /* Notify the stack of the actual queue counts. */
1757 rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
1759 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
1763 rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
1765 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
1769 rc = ena_up(adapter);
1776 /* ena_close - Disables a network interface
1777 * @netdev: network interface device structure
1779 * Returns 0, this is not allowed to fail
1781 * The close entry point is called when an interface is de-activated
1782 * by the OS. The hardware is still under the drivers control, but
1783 * needs to be disabled. A global MAC reset is issued to stop the
1784 * hardware, and all transmit and receive resources are freed.
1786 static int ena_close(struct net_device *netdev)
1788 struct ena_adapter *adapter = netdev_priv(netdev);
1790 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
1792 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1798 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
1800 u32 mss = skb_shinfo(skb)->gso_size;
1801 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
1804 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
1805 ena_tx_ctx->l4_csum_enable = 1;
1807 ena_tx_ctx->tso_enable = 1;
1808 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
1809 ena_tx_ctx->l4_csum_partial = 0;
1811 ena_tx_ctx->tso_enable = 0;
1812 ena_meta->l4_hdr_len = 0;
1813 ena_tx_ctx->l4_csum_partial = 1;
1816 switch (ip_hdr(skb)->version) {
1818 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
1819 if (ip_hdr(skb)->frag_off & htons(IP_DF))
1822 ena_tx_ctx->l3_csum_enable = 1;
1823 l4_protocol = ip_hdr(skb)->protocol;
1826 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
1827 l4_protocol = ipv6_hdr(skb)->nexthdr;
1833 if (l4_protocol == IPPROTO_TCP)
1834 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
1836 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
1838 ena_meta->mss = mss;
1839 ena_meta->l3_hdr_len = skb_network_header_len(skb);
1840 ena_meta->l3_hdr_offset = skb_network_offset(skb);
1841 ena_tx_ctx->meta_valid = 1;
1844 ena_tx_ctx->meta_valid = 0;
1848 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
1849 struct sk_buff *skb)
1851 int num_frags, header_len, rc;
1853 num_frags = skb_shinfo(skb)->nr_frags;
1854 header_len = skb_headlen(skb);
1856 if (num_frags < tx_ring->sgl_size)
1859 if ((num_frags == tx_ring->sgl_size) &&
1860 (header_len < tx_ring->tx_max_header_size))
1863 u64_stats_update_begin(&tx_ring->syncp);
1864 tx_ring->tx_stats.linearize++;
1865 u64_stats_update_end(&tx_ring->syncp);
1867 rc = skb_linearize(skb);
1869 u64_stats_update_begin(&tx_ring->syncp);
1870 tx_ring->tx_stats.linearize_failed++;
1871 u64_stats_update_end(&tx_ring->syncp);
1877 /* Called with netif_tx_lock. */
1878 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
1880 struct ena_adapter *adapter = netdev_priv(dev);
1881 struct ena_tx_buffer *tx_info;
1882 struct ena_com_tx_ctx ena_tx_ctx;
1883 struct ena_ring *tx_ring;
1884 struct netdev_queue *txq;
1885 struct ena_com_buf *ena_buf;
1893 int qid, rc, nb_hw_desc;
1896 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
1897 /* Determine which tx ring we will be placed on */
1898 qid = skb_get_queue_mapping(skb);
1899 tx_ring = &adapter->tx_ring[qid];
1900 txq = netdev_get_tx_queue(dev, qid);
1902 rc = ena_check_and_linearize_skb(tx_ring, skb);
1904 goto error_drop_packet;
1906 skb_tx_timestamp(skb);
1907 len = skb_headlen(skb);
1909 next_to_use = tx_ring->next_to_use;
1910 req_id = tx_ring->free_tx_ids[next_to_use];
1911 tx_info = &tx_ring->tx_buffer_info[req_id];
1912 tx_info->num_of_bufs = 0;
1914 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
1915 ena_buf = tx_info->bufs;
1918 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1919 /* prepared the push buffer */
1920 push_len = min_t(u32, len, tx_ring->tx_max_header_size);
1921 header_len = push_len;
1922 push_hdr = skb->data;
1925 header_len = min_t(u32, len, tx_ring->tx_max_header_size);
1929 netif_dbg(adapter, tx_queued, dev,
1930 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
1931 push_hdr, push_len);
1933 if (len > push_len) {
1934 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
1935 len - push_len, DMA_TO_DEVICE);
1936 if (dma_mapping_error(tx_ring->dev, dma))
1937 goto error_report_dma_error;
1939 ena_buf->paddr = dma;
1940 ena_buf->len = len - push_len;
1943 tx_info->num_of_bufs++;
1946 last_frag = skb_shinfo(skb)->nr_frags;
1948 for (i = 0; i < last_frag; i++) {
1949 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1951 len = skb_frag_size(frag);
1952 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
1954 if (dma_mapping_error(tx_ring->dev, dma))
1955 goto error_report_dma_error;
1957 ena_buf->paddr = dma;
1962 tx_info->num_of_bufs += last_frag;
1964 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
1965 ena_tx_ctx.ena_bufs = tx_info->bufs;
1966 ena_tx_ctx.push_header = push_hdr;
1967 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
1968 ena_tx_ctx.req_id = req_id;
1969 ena_tx_ctx.header_len = header_len;
1971 /* set flags and meta data */
1972 ena_tx_csum(&ena_tx_ctx, skb);
1974 /* prepare the packet's descriptors to dma engine */
1975 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
1979 netif_err(adapter, tx_queued, dev,
1980 "failed to prepare tx bufs\n");
1981 u64_stats_update_begin(&tx_ring->syncp);
1982 tx_ring->tx_stats.queue_stop++;
1983 tx_ring->tx_stats.prepare_ctx_err++;
1984 u64_stats_update_end(&tx_ring->syncp);
1985 netif_tx_stop_queue(txq);
1986 goto error_unmap_dma;
1989 netdev_tx_sent_queue(txq, skb->len);
1991 u64_stats_update_begin(&tx_ring->syncp);
1992 tx_ring->tx_stats.cnt++;
1993 tx_ring->tx_stats.bytes += skb->len;
1994 u64_stats_update_end(&tx_ring->syncp);
1996 tx_info->tx_descs = nb_hw_desc;
1997 tx_info->last_jiffies = jiffies;
1999 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2000 tx_ring->ring_size);
2002 /* This WMB is aimed to:
2003 * 1 - perform smp barrier before reading next_to_completion
2004 * 2 - make sure the desc were written before trigger DB
2008 /* stop the queue when no more space available, the packet can have up
2009 * to sgl_size + 2. one for the meta descriptor and one for header
2010 * (if the header is larger than tx_max_header_size).
2012 if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
2013 (tx_ring->sgl_size + 2))) {
2014 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2017 netif_tx_stop_queue(txq);
2018 u64_stats_update_begin(&tx_ring->syncp);
2019 tx_ring->tx_stats.queue_stop++;
2020 u64_stats_update_end(&tx_ring->syncp);
2022 /* There is a rare condition where this function decide to
2023 * stop the queue but meanwhile clean_tx_irq updates
2024 * next_to_completion and terminates.
2025 * The queue will remain stopped forever.
2026 * To solve this issue this function perform rmb, check
2027 * the wakeup condition and wake up the queue if needed.
2031 if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
2032 > ENA_TX_WAKEUP_THRESH) {
2033 netif_tx_wake_queue(txq);
2034 u64_stats_update_begin(&tx_ring->syncp);
2035 tx_ring->tx_stats.queue_wakeup++;
2036 u64_stats_update_end(&tx_ring->syncp);
2040 if (netif_xmit_stopped(txq) || !skb->xmit_more) {
2041 /* trigger the dma engine */
2042 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2043 u64_stats_update_begin(&tx_ring->syncp);
2044 tx_ring->tx_stats.doorbells++;
2045 u64_stats_update_end(&tx_ring->syncp);
2048 return NETDEV_TX_OK;
2050 error_report_dma_error:
2051 u64_stats_update_begin(&tx_ring->syncp);
2052 tx_ring->tx_stats.dma_mapping_err++;
2053 u64_stats_update_end(&tx_ring->syncp);
2054 netdev_warn(adapter->netdev, "failed to map skb\n");
2056 tx_info->skb = NULL;
2060 /* save value of frag that failed */
2063 /* start back at beginning and unmap skb */
2064 tx_info->skb = NULL;
2065 ena_buf = tx_info->bufs;
2066 dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2067 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2069 /* unmap remaining mapped pages */
2070 for (i = 0; i < last_frag; i++) {
2072 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
2073 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
2080 return NETDEV_TX_OK;
2083 #ifdef CONFIG_NET_POLL_CONTROLLER
2084 static void ena_netpoll(struct net_device *netdev)
2086 struct ena_adapter *adapter = netdev_priv(netdev);
2089 /* Dont schedule NAPI if the driver is in the middle of reset
2090 * or netdev is down.
2093 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
2094 test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2097 for (i = 0; i < adapter->num_queues; i++)
2098 napi_schedule(&adapter->ena_napi[i].napi);
2100 #endif /* CONFIG_NET_POLL_CONTROLLER */
2102 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2103 void *accel_priv, select_queue_fallback_t fallback)
2106 /* we suspect that this is good for in--kernel network services that
2107 * want to loop incoming skb rx to tx in normal user generated traffic,
2108 * most probably we will not get to this
2110 if (skb_rx_queue_recorded(skb))
2111 qid = skb_get_rx_queue(skb);
2113 qid = fallback(dev, skb);
2118 static void ena_config_host_info(struct ena_com_dev *ena_dev)
2120 struct ena_admin_host_info *host_info;
2123 /* Allocate only the host info */
2124 rc = ena_com_allocate_host_info(ena_dev);
2126 pr_err("Cannot allocate host info\n");
2130 host_info = ena_dev->host_attr.host_info;
2132 host_info->os_type = ENA_ADMIN_OS_LINUX;
2133 host_info->kernel_ver = LINUX_VERSION_CODE;
2134 strncpy(host_info->kernel_ver_str, utsname()->version,
2135 sizeof(host_info->kernel_ver_str) - 1);
2136 host_info->os_dist = 0;
2137 strncpy(host_info->os_dist_str, utsname()->release,
2138 sizeof(host_info->os_dist_str) - 1);
2139 host_info->driver_version =
2140 (DRV_MODULE_VER_MAJOR) |
2141 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2142 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT);
2144 rc = ena_com_set_host_attributes(ena_dev);
2147 pr_warn("Cannot set host attributes\n");
2149 pr_err("Cannot set host attributes\n");
2157 ena_com_delete_host_info(ena_dev);
2160 static void ena_config_debug_area(struct ena_adapter *adapter)
2162 u32 debug_area_size;
2165 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2166 if (ss_count <= 0) {
2167 netif_err(adapter, drv, adapter->netdev,
2168 "SS count is negative\n");
2172 /* allocate 32 bytes for each string and 64bit for the value */
2173 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2175 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2177 pr_err("Cannot allocate debug area\n");
2181 rc = ena_com_set_host_attributes(adapter->ena_dev);
2184 netif_warn(adapter, drv, adapter->netdev,
2185 "Cannot set host attributes\n");
2187 netif_err(adapter, drv, adapter->netdev,
2188 "Cannot set host attributes\n");
2194 ena_com_delete_debug_area(adapter->ena_dev);
2197 static void ena_get_stats64(struct net_device *netdev,
2198 struct rtnl_link_stats64 *stats)
2200 struct ena_adapter *adapter = netdev_priv(netdev);
2201 struct ena_ring *rx_ring, *tx_ring;
2206 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2209 for (i = 0; i < adapter->num_queues; i++) {
2212 tx_ring = &adapter->tx_ring[i];
2215 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
2216 packets = tx_ring->tx_stats.cnt;
2217 bytes = tx_ring->tx_stats.bytes;
2218 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
2220 stats->tx_packets += packets;
2221 stats->tx_bytes += bytes;
2223 rx_ring = &adapter->rx_ring[i];
2226 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
2227 packets = rx_ring->rx_stats.cnt;
2228 bytes = rx_ring->rx_stats.bytes;
2229 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
2231 stats->rx_packets += packets;
2232 stats->rx_bytes += bytes;
2236 start = u64_stats_fetch_begin_irq(&adapter->syncp);
2237 rx_drops = adapter->dev_stats.rx_drops;
2238 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
2240 stats->rx_dropped = rx_drops;
2242 stats->multicast = 0;
2243 stats->collisions = 0;
2245 stats->rx_length_errors = 0;
2246 stats->rx_crc_errors = 0;
2247 stats->rx_frame_errors = 0;
2248 stats->rx_fifo_errors = 0;
2249 stats->rx_missed_errors = 0;
2250 stats->tx_window_errors = 0;
2252 stats->rx_errors = 0;
2253 stats->tx_errors = 0;
2256 static const struct net_device_ops ena_netdev_ops = {
2257 .ndo_open = ena_open,
2258 .ndo_stop = ena_close,
2259 .ndo_start_xmit = ena_start_xmit,
2260 .ndo_select_queue = ena_select_queue,
2261 .ndo_get_stats64 = ena_get_stats64,
2262 .ndo_tx_timeout = ena_tx_timeout,
2263 .ndo_change_mtu = ena_change_mtu,
2264 .ndo_set_mac_address = NULL,
2265 .ndo_validate_addr = eth_validate_addr,
2266 #ifdef CONFIG_NET_POLL_CONTROLLER
2267 .ndo_poll_controller = ena_netpoll,
2268 #endif /* CONFIG_NET_POLL_CONTROLLER */
2271 static void ena_device_io_suspend(struct work_struct *work)
2273 struct ena_adapter *adapter =
2274 container_of(work, struct ena_adapter, suspend_io_task);
2275 struct net_device *netdev = adapter->netdev;
2277 /* ena_napi_disable_all disables only the IO handling.
2278 * We are still subject to AENQ keep alive watchdog.
2280 u64_stats_update_begin(&adapter->syncp);
2281 adapter->dev_stats.io_suspend++;
2282 u64_stats_update_begin(&adapter->syncp);
2283 ena_napi_disable_all(adapter);
2284 netif_tx_lock(netdev);
2285 netif_device_detach(netdev);
2286 netif_tx_unlock(netdev);
2289 static void ena_device_io_resume(struct work_struct *work)
2291 struct ena_adapter *adapter =
2292 container_of(work, struct ena_adapter, resume_io_task);
2293 struct net_device *netdev = adapter->netdev;
2295 u64_stats_update_begin(&adapter->syncp);
2296 adapter->dev_stats.io_resume++;
2297 u64_stats_update_end(&adapter->syncp);
2299 netif_device_attach(netdev);
2300 ena_napi_enable_all(adapter);
2303 static int ena_device_validate_params(struct ena_adapter *adapter,
2304 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2306 struct net_device *netdev = adapter->netdev;
2309 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2312 netif_err(adapter, drv, netdev,
2313 "Error, mac address are different\n");
2317 if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
2318 (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
2319 netif_err(adapter, drv, netdev,
2320 "Error, device doesn't support enough queues\n");
2324 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
2325 netif_err(adapter, drv, netdev,
2326 "Error, device max mtu is smaller than netdev MTU\n");
2333 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2334 struct ena_com_dev_get_features_ctx *get_feat_ctx,
2337 struct device *dev = &pdev->dev;
2338 bool readless_supported;
2343 rc = ena_com_mmio_reg_read_request_init(ena_dev);
2345 dev_err(dev, "failed to init mmio read less\n");
2349 /* The PCIe configuration space revision id indicate if mmio reg
2352 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
2353 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2355 rc = ena_com_dev_reset(ena_dev);
2357 dev_err(dev, "Can not reset device\n");
2358 goto err_mmio_read_less;
2361 rc = ena_com_validate_version(ena_dev);
2363 dev_err(dev, "device version is too low\n");
2364 goto err_mmio_read_less;
2367 dma_width = ena_com_get_dma_width(ena_dev);
2368 if (dma_width < 0) {
2369 dev_err(dev, "Invalid dma width value %d", dma_width);
2371 goto err_mmio_read_less;
2374 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2376 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
2377 goto err_mmio_read_less;
2380 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2382 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2384 goto err_mmio_read_less;
2387 /* ENA admin level init */
2388 rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
2391 "Can not initialize ena admin queue with device\n");
2392 goto err_mmio_read_less;
2395 /* To enable the msix interrupts the driver needs to know the number
2396 * of queues. So the driver uses polling mode to retrieve this
2399 ena_com_set_admin_polling_mode(ena_dev, true);
2401 ena_config_host_info(ena_dev);
2403 /* Get Device Attributes*/
2404 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2406 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
2407 goto err_admin_init;
2410 /* Try to turn all the available aenq groups */
2411 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2412 BIT(ENA_ADMIN_FATAL_ERROR) |
2413 BIT(ENA_ADMIN_WARNING) |
2414 BIT(ENA_ADMIN_NOTIFICATION) |
2415 BIT(ENA_ADMIN_KEEP_ALIVE);
2417 aenq_groups &= get_feat_ctx->aenq.supported_groups;
2419 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2421 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
2422 goto err_admin_init;
2425 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2430 ena_com_delete_host_info(ena_dev);
2431 ena_com_admin_destroy(ena_dev);
2433 ena_com_mmio_reg_read_request_destroy(ena_dev);
2438 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
2441 struct ena_com_dev *ena_dev = adapter->ena_dev;
2442 struct device *dev = &adapter->pdev->dev;
2445 rc = ena_enable_msix(adapter, io_vectors);
2447 dev_err(dev, "Can not reserve msix vectors\n");
2451 ena_setup_mgmnt_intr(adapter);
2453 rc = ena_request_mgmnt_irq(adapter);
2455 dev_err(dev, "Can not setup management interrupts\n");
2456 goto err_disable_msix;
2459 ena_com_set_admin_polling_mode(ena_dev, false);
2461 ena_com_admin_aenq_enable(ena_dev);
2466 pci_free_irq_vectors(adapter->pdev);
2470 static void ena_fw_reset_device(struct work_struct *work)
2472 struct ena_com_dev_get_features_ctx get_feat_ctx;
2473 struct ena_adapter *adapter =
2474 container_of(work, struct ena_adapter, reset_task);
2475 struct net_device *netdev = adapter->netdev;
2476 struct ena_com_dev *ena_dev = adapter->ena_dev;
2477 struct pci_dev *pdev = adapter->pdev;
2478 bool dev_up, wd_state;
2481 if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2483 "device reset schedule while reset bit is off\n");
2487 netif_carrier_off(netdev);
2489 del_timer_sync(&adapter->timer_service);
2493 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2494 ena_com_set_admin_running_state(ena_dev, false);
2496 /* After calling ena_close the tx queues and the napi
2497 * are disabled so no one can interfere or touch the
2502 ena_free_mgmnt_irq(adapter);
2504 pci_free_irq_vectors(adapter->pdev);
2506 ena_com_abort_admin_commands(ena_dev);
2508 ena_com_wait_for_abort_completion(ena_dev);
2510 ena_com_admin_destroy(ena_dev);
2512 ena_com_mmio_reg_read_request_destroy(ena_dev);
2514 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2516 /* Finish with the destroy part. Start the init part */
2518 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
2520 dev_err(&pdev->dev, "Can not initialize device\n");
2523 adapter->wd_state = wd_state;
2525 rc = ena_device_validate_params(adapter, &get_feat_ctx);
2527 dev_err(&pdev->dev, "Validation of device parameters failed\n");
2528 goto err_device_destroy;
2531 rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2532 adapter->num_queues);
2534 dev_err(&pdev->dev, "Enable MSI-X failed\n");
2535 goto err_device_destroy;
2537 /* If the interface was up before the reset bring it up */
2539 rc = ena_up(adapter);
2541 dev_err(&pdev->dev, "Failed to create I/O queues\n");
2542 goto err_disable_msix;
2546 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2550 dev_err(&pdev->dev, "Device reset completed successfully\n");
2554 ena_free_mgmnt_irq(adapter);
2555 pci_free_irq_vectors(adapter->pdev);
2557 ena_com_admin_destroy(ena_dev);
2561 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2564 "Reset attempt failed. Can not reset the device\n");
2567 static void check_for_missing_tx_completions(struct ena_adapter *adapter)
2569 struct ena_tx_buffer *tx_buf;
2570 unsigned long last_jiffies;
2571 struct ena_ring *tx_ring;
2575 /* Make sure the driver doesn't turn the device in other process */
2578 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2581 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2584 budget = ENA_MONITORED_TX_QUEUES;
2586 for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2587 tx_ring = &adapter->tx_ring[i];
2589 for (j = 0; j < tx_ring->ring_size; j++) {
2590 tx_buf = &tx_ring->tx_buffer_info[j];
2591 last_jiffies = tx_buf->last_jiffies;
2592 if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) {
2593 netif_notice(adapter, tx_err, adapter->netdev,
2594 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2597 u64_stats_update_begin(&tx_ring->syncp);
2598 missed_tx = tx_ring->tx_stats.missing_tx_comp++;
2599 u64_stats_update_end(&tx_ring->syncp);
2601 /* Clear last jiffies so the lost buffer won't
2604 tx_buf->last_jiffies = 0;
2606 if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) {
2607 netif_err(adapter, tx_err, adapter->netdev,
2608 "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n",
2609 missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS);
2610 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2620 adapter->last_monitored_tx_qid = i % adapter->num_queues;
2623 /* trigger napi schedule after 2 consecutive detections */
2624 #define EMPTY_RX_REFILL 2
2625 /* For the rare case where the device runs out of Rx descriptors and the
2626 * napi handler failed to refill new Rx descriptors (due to a lack of memory
2628 * This case will lead to a deadlock:
2629 * The device won't send interrupts since all the new Rx packets will be dropped
2630 * The napi handler won't allocate new Rx descriptors so the device will be
2631 * able to send new packets.
2633 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2634 * It is recommended to have at least 512MB, with a minimum of 128MB for
2635 * constrained environment).
2637 * When such a situation is detected - Reschedule napi
2639 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2641 struct ena_ring *rx_ring;
2642 int i, refill_required;
2644 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2647 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2650 for (i = 0; i < adapter->num_queues; i++) {
2651 rx_ring = &adapter->rx_ring[i];
2654 ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
2655 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
2656 rx_ring->empty_rx_queue++;
2658 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
2659 u64_stats_update_begin(&rx_ring->syncp);
2660 rx_ring->rx_stats.empty_rx_ring++;
2661 u64_stats_update_end(&rx_ring->syncp);
2663 netif_err(adapter, drv, adapter->netdev,
2664 "trigger refill for ring %d\n", i);
2666 napi_schedule(rx_ring->napi);
2667 rx_ring->empty_rx_queue = 0;
2670 rx_ring->empty_rx_queue = 0;
2675 /* Check for keep alive expiration */
2676 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2678 unsigned long keep_alive_expired;
2680 if (!adapter->wd_state)
2683 keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies
2684 + ENA_DEVICE_KALIVE_TIMEOUT);
2685 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
2686 netif_err(adapter, drv, adapter->netdev,
2687 "Keep alive watchdog timeout.\n");
2688 u64_stats_update_begin(&adapter->syncp);
2689 adapter->dev_stats.wd_expired++;
2690 u64_stats_update_end(&adapter->syncp);
2691 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2695 static void check_for_admin_com_state(struct ena_adapter *adapter)
2697 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
2698 netif_err(adapter, drv, adapter->netdev,
2699 "ENA admin queue is not in running state!\n");
2700 u64_stats_update_begin(&adapter->syncp);
2701 adapter->dev_stats.admin_q_pause++;
2702 u64_stats_update_end(&adapter->syncp);
2703 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2707 static void ena_update_host_info(struct ena_admin_host_info *host_info,
2708 struct net_device *netdev)
2710 host_info->supported_network_features[0] =
2711 netdev->features & GENMASK_ULL(31, 0);
2712 host_info->supported_network_features[1] =
2713 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
2716 static void ena_timer_service(unsigned long data)
2718 struct ena_adapter *adapter = (struct ena_adapter *)data;
2719 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
2720 struct ena_admin_host_info *host_info =
2721 adapter->ena_dev->host_attr.host_info;
2723 check_for_missing_keep_alive(adapter);
2725 check_for_admin_com_state(adapter);
2727 check_for_missing_tx_completions(adapter);
2729 check_for_empty_rx_ring(adapter);
2732 ena_dump_stats_to_buf(adapter, debug_area);
2735 ena_update_host_info(host_info, adapter->netdev);
2737 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2738 netif_err(adapter, drv, adapter->netdev,
2739 "Trigger reset is on\n");
2740 ena_dump_stats_to_dmesg(adapter);
2741 queue_work(ena_wq, &adapter->reset_task);
2745 /* Reset the timer */
2746 mod_timer(&adapter->timer_service, jiffies + HZ);
2749 static int ena_calc_io_queue_num(struct pci_dev *pdev,
2750 struct ena_com_dev *ena_dev,
2751 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2753 int io_sq_num, io_queue_num;
2755 /* In case of LLQ use the llq number in the get feature cmd */
2756 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2757 io_sq_num = get_feat_ctx->max_queues.max_llq_num;
2759 if (io_sq_num == 0) {
2761 "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n");
2763 ena_dev->tx_mem_queue_type =
2764 ENA_ADMIN_PLACEMENT_POLICY_HOST;
2765 io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2768 io_sq_num = get_feat_ctx->max_queues.max_sq_num;
2771 io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
2772 io_queue_num = min_t(int, io_queue_num, io_sq_num);
2773 io_queue_num = min_t(int, io_queue_num,
2774 get_feat_ctx->max_queues.max_cq_num);
2775 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
2776 io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
2777 if (unlikely(!io_queue_num)) {
2778 dev_err(&pdev->dev, "The device doesn't have io queues\n");
2782 return io_queue_num;
2785 static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
2786 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2790 has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
2792 /* Enable push mode if device supports LLQ */
2793 if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0))
2794 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2796 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2799 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
2800 struct net_device *netdev)
2802 netdev_features_t dev_features = 0;
2804 /* Set offload features */
2805 if (feat->offload.tx &
2806 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
2807 dev_features |= NETIF_F_IP_CSUM;
2809 if (feat->offload.tx &
2810 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
2811 dev_features |= NETIF_F_IPV6_CSUM;
2813 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
2814 dev_features |= NETIF_F_TSO;
2816 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
2817 dev_features |= NETIF_F_TSO6;
2819 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
2820 dev_features |= NETIF_F_TSO_ECN;
2822 if (feat->offload.rx_supported &
2823 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
2824 dev_features |= NETIF_F_RXCSUM;
2826 if (feat->offload.rx_supported &
2827 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
2828 dev_features |= NETIF_F_RXCSUM;
2836 netdev->hw_features |= netdev->features;
2837 netdev->vlan_features |= netdev->features;
2840 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
2841 struct ena_com_dev_get_features_ctx *feat)
2843 struct net_device *netdev = adapter->netdev;
2845 /* Copy mac address */
2846 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
2847 eth_hw_addr_random(netdev);
2848 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2850 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
2851 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
2854 /* Set offload features */
2855 ena_set_dev_offloads(feat, netdev);
2857 adapter->max_mtu = feat->dev_attr.max_mtu;
2858 netdev->max_mtu = adapter->max_mtu;
2859 netdev->min_mtu = ENA_MIN_MTU;
2862 static int ena_rss_init_default(struct ena_adapter *adapter)
2864 struct ena_com_dev *ena_dev = adapter->ena_dev;
2865 struct device *dev = &adapter->pdev->dev;
2869 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
2871 dev_err(dev, "Cannot init indirect table\n");
2875 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
2876 val = ethtool_rxfh_indir_default(i, adapter->num_queues);
2877 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
2878 ENA_IO_RXQ_IDX(val));
2879 if (unlikely(rc && (rc != -EPERM))) {
2880 dev_err(dev, "Cannot fill indirect table\n");
2881 goto err_fill_indir;
2885 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
2886 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
2887 if (unlikely(rc && (rc != -EPERM))) {
2888 dev_err(dev, "Cannot fill hash function\n");
2889 goto err_fill_indir;
2892 rc = ena_com_set_default_hash_ctrl(ena_dev);
2893 if (unlikely(rc && (rc != -EPERM))) {
2894 dev_err(dev, "Cannot fill hash control\n");
2895 goto err_fill_indir;
2901 ena_com_rss_destroy(ena_dev);
2907 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
2911 if (ena_dev->mem_bar)
2912 devm_iounmap(&pdev->dev, ena_dev->mem_bar);
2914 devm_iounmap(&pdev->dev, ena_dev->reg_bar);
2916 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
2917 pci_release_selected_regions(pdev, release_bars);
2920 static int ena_calc_queue_size(struct pci_dev *pdev,
2921 struct ena_com_dev *ena_dev,
2922 u16 *max_tx_sgl_size,
2923 u16 *max_rx_sgl_size,
2924 struct ena_com_dev_get_features_ctx *get_feat_ctx)
2926 u32 queue_size = ENA_DEFAULT_RING_SIZE;
2928 queue_size = min_t(u32, queue_size,
2929 get_feat_ctx->max_queues.max_cq_depth);
2930 queue_size = min_t(u32, queue_size,
2931 get_feat_ctx->max_queues.max_sq_depth);
2933 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
2934 queue_size = min_t(u32, queue_size,
2935 get_feat_ctx->max_queues.max_llq_depth);
2937 queue_size = rounddown_pow_of_two(queue_size);
2939 if (unlikely(!queue_size)) {
2940 dev_err(&pdev->dev, "Invalid queue size\n");
2944 *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
2945 get_feat_ctx->max_queues.max_packet_tx_descs);
2946 *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
2947 get_feat_ctx->max_queues.max_packet_rx_descs);
2952 /* ena_probe - Device Initialization Routine
2953 * @pdev: PCI device information struct
2954 * @ent: entry in ena_pci_tbl
2956 * Returns 0 on success, negative on failure
2958 * ena_probe initializes an adapter identified by a pci_dev structure.
2959 * The OS initialization, configuring of the adapter private structure,
2960 * and a hardware reset occur.
2962 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2964 struct ena_com_dev_get_features_ctx get_feat_ctx;
2965 static int version_printed;
2966 struct net_device *netdev;
2967 struct ena_adapter *adapter;
2968 struct ena_com_dev *ena_dev = NULL;
2969 static int adapters_found;
2970 int io_queue_num, bars, rc;
2972 u16 tx_sgl_size = 0;
2973 u16 rx_sgl_size = 0;
2976 dev_dbg(&pdev->dev, "%s\n", __func__);
2978 if (version_printed++ == 0)
2979 dev_info(&pdev->dev, "%s", version);
2981 rc = pci_enable_device_mem(pdev);
2983 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
2987 pci_set_master(pdev);
2989 ena_dev = vzalloc(sizeof(*ena_dev));
2992 goto err_disable_device;
2995 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
2996 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
2998 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
3000 goto err_free_ena_dev;
3003 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
3004 pci_resource_start(pdev, ENA_REG_BAR),
3005 pci_resource_len(pdev, ENA_REG_BAR));
3006 if (!ena_dev->reg_bar) {
3007 dev_err(&pdev->dev, "failed to remap regs bar\n");
3009 goto err_free_region;
3012 ena_dev->dmadev = &pdev->dev;
3014 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
3016 dev_err(&pdev->dev, "ena device init failed\n");
3019 goto err_free_region;
3022 ena_set_push_mode(pdev, ena_dev, &get_feat_ctx);
3024 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3025 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3026 pci_resource_start(pdev, ENA_MEM_BAR),
3027 pci_resource_len(pdev, ENA_MEM_BAR));
3028 if (!ena_dev->mem_bar) {
3030 goto err_device_destroy;
3034 /* initial Tx interrupt delay, Assumes 1 usec granularity.
3035 * Updated during device initialization with the real granularity
3037 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
3038 io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
3039 queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
3040 &rx_sgl_size, &get_feat_ctx);
3041 if ((queue_size <= 0) || (io_queue_num <= 0)) {
3043 goto err_device_destroy;
3046 dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n",
3047 io_queue_num, queue_size);
3049 /* dev zeroed in init_etherdev */
3050 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
3052 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
3054 goto err_device_destroy;
3057 SET_NETDEV_DEV(netdev, &pdev->dev);
3059 adapter = netdev_priv(netdev);
3060 pci_set_drvdata(pdev, adapter);
3062 adapter->ena_dev = ena_dev;
3063 adapter->netdev = netdev;
3064 adapter->pdev = pdev;
3066 ena_set_conf_feat_params(adapter, &get_feat_ctx);
3068 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3070 adapter->tx_ring_size = queue_size;
3071 adapter->rx_ring_size = queue_size;
3073 adapter->max_tx_sgl_size = tx_sgl_size;
3074 adapter->max_rx_sgl_size = rx_sgl_size;
3076 adapter->num_queues = io_queue_num;
3077 adapter->last_monitored_tx_qid = 0;
3079 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
3080 adapter->wd_state = wd_state;
3082 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
3084 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
3087 "Failed to query interrupt moderation feature\n");
3088 goto err_netdev_destroy;
3090 ena_init_io_rings(adapter);
3092 netdev->netdev_ops = &ena_netdev_ops;
3093 netdev->watchdog_timeo = TX_TIMEOUT;
3094 ena_set_ethtool_ops(netdev);
3096 netdev->priv_flags |= IFF_UNICAST_FLT;
3098 u64_stats_init(&adapter->syncp);
3100 rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3103 "Failed to enable and set the admin interrupts\n");
3104 goto err_worker_destroy;
3106 rc = ena_rss_init_default(adapter);
3107 if (rc && (rc != -EPERM)) {
3108 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
3112 ena_config_debug_area(adapter);
3114 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
3116 netif_carrier_off(netdev);
3118 rc = register_netdev(netdev);
3120 dev_err(&pdev->dev, "Cannot register net device\n");
3124 INIT_WORK(&adapter->suspend_io_task, ena_device_io_suspend);
3125 INIT_WORK(&adapter->resume_io_task, ena_device_io_resume);
3126 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
3128 adapter->last_keep_alive_jiffies = jiffies;
3130 setup_timer(&adapter->timer_service, ena_timer_service,
3131 (unsigned long)adapter);
3132 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3134 dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
3135 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
3136 netdev->dev_addr, io_queue_num);
3138 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3145 ena_com_delete_debug_area(ena_dev);
3146 ena_com_rss_destroy(ena_dev);
3148 ena_com_dev_reset(ena_dev);
3149 ena_free_mgmnt_irq(adapter);
3150 pci_free_irq_vectors(adapter->pdev);
3152 ena_com_destroy_interrupt_moderation(ena_dev);
3153 del_timer(&adapter->timer_service);
3154 cancel_work_sync(&adapter->suspend_io_task);
3155 cancel_work_sync(&adapter->resume_io_task);
3157 free_netdev(netdev);
3159 ena_com_delete_host_info(ena_dev);
3160 ena_com_admin_destroy(ena_dev);
3162 ena_release_bars(ena_dev, pdev);
3166 pci_disable_device(pdev);
3170 /*****************************************************************************/
3171 static int ena_sriov_configure(struct pci_dev *dev, int numvfs)
3176 rc = pci_enable_sriov(dev, numvfs);
3179 "pci_enable_sriov failed to enable: %d vfs with the error: %d\n",
3188 pci_disable_sriov(dev);
3195 /*****************************************************************************/
3196 /*****************************************************************************/
3198 /* ena_remove - Device Removal Routine
3199 * @pdev: PCI device information struct
3201 * ena_remove is called by the PCI subsystem to alert the driver
3202 * that it should release a PCI device.
3204 static void ena_remove(struct pci_dev *pdev)
3206 struct ena_adapter *adapter = pci_get_drvdata(pdev);
3207 struct ena_com_dev *ena_dev;
3208 struct net_device *netdev;
3210 ena_dev = adapter->ena_dev;
3211 netdev = adapter->netdev;
3213 #ifdef CONFIG_RFS_ACCEL
3214 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
3215 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
3216 netdev->rx_cpu_rmap = NULL;
3218 #endif /* CONFIG_RFS_ACCEL */
3220 unregister_netdev(netdev);
3221 del_timer_sync(&adapter->timer_service);
3223 cancel_work_sync(&adapter->reset_task);
3225 cancel_work_sync(&adapter->suspend_io_task);
3227 cancel_work_sync(&adapter->resume_io_task);
3229 /* Reset the device only if the device is running. */
3230 if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3231 ena_com_dev_reset(ena_dev);
3233 ena_free_mgmnt_irq(adapter);
3235 pci_free_irq_vectors(adapter->pdev);
3237 free_netdev(netdev);
3239 ena_com_mmio_reg_read_request_destroy(ena_dev);
3241 ena_com_abort_admin_commands(ena_dev);
3243 ena_com_wait_for_abort_completion(ena_dev);
3245 ena_com_admin_destroy(ena_dev);
3247 ena_com_rss_destroy(ena_dev);
3249 ena_com_delete_debug_area(ena_dev);
3251 ena_com_delete_host_info(ena_dev);
3253 ena_release_bars(ena_dev, pdev);
3255 pci_disable_device(pdev);
3257 ena_com_destroy_interrupt_moderation(ena_dev);
3262 static struct pci_driver ena_pci_driver = {
3263 .name = DRV_MODULE_NAME,
3264 .id_table = ena_pci_tbl,
3266 .remove = ena_remove,
3267 .sriov_configure = ena_sriov_configure,
3270 static int __init ena_init(void)
3272 pr_info("%s", version);
3274 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
3276 pr_err("Failed to create workqueue\n");
3280 return pci_register_driver(&ena_pci_driver);
3283 static void __exit ena_cleanup(void)
3285 pci_unregister_driver(&ena_pci_driver);
3288 destroy_workqueue(ena_wq);
3293 /******************************************************************************
3294 ******************************** AENQ Handlers *******************************
3295 *****************************************************************************/
3296 /* ena_update_on_link_change:
3297 * Notify the network interface about the change in link status
3299 static void ena_update_on_link_change(void *adapter_data,
3300 struct ena_admin_aenq_entry *aenq_e)
3302 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3303 struct ena_admin_aenq_link_change_desc *aenq_desc =
3304 (struct ena_admin_aenq_link_change_desc *)aenq_e;
3305 int status = aenq_desc->flags &
3306 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3309 netdev_dbg(adapter->netdev, "%s\n", __func__);
3310 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3311 netif_carrier_on(adapter->netdev);
3313 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3314 netif_carrier_off(adapter->netdev);
3318 static void ena_keep_alive_wd(void *adapter_data,
3319 struct ena_admin_aenq_entry *aenq_e)
3321 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3323 adapter->last_keep_alive_jiffies = jiffies;
3326 static void ena_notification(void *adapter_data,
3327 struct ena_admin_aenq_entry *aenq_e)
3329 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3331 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3332 "Invalid group(%x) expected %x\n",
3333 aenq_e->aenq_common_desc.group,
3334 ENA_ADMIN_NOTIFICATION);
3336 switch (aenq_e->aenq_common_desc.syndrom) {
3337 case ENA_ADMIN_SUSPEND:
3338 /* Suspend just the IO queues.
3339 * We deliberately don't suspend admin so the timer and
3340 * the keep_alive events should remain.
3342 queue_work(ena_wq, &adapter->suspend_io_task);
3344 case ENA_ADMIN_RESUME:
3345 queue_work(ena_wq, &adapter->resume_io_task);
3348 netif_err(adapter, drv, adapter->netdev,
3349 "Invalid aenq notification link state %d\n",
3350 aenq_e->aenq_common_desc.syndrom);
3354 /* This handler will called for unknown event group or unimplemented handlers*/
3355 static void unimplemented_aenq_handler(void *data,
3356 struct ena_admin_aenq_entry *aenq_e)
3358 struct ena_adapter *adapter = (struct ena_adapter *)data;
3360 netif_err(adapter, drv, adapter->netdev,
3361 "Unknown event was received or event with unimplemented handler\n");
3364 static struct ena_aenq_handlers aenq_handlers = {
3366 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3367 [ENA_ADMIN_NOTIFICATION] = ena_notification,
3368 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3370 .unimplemented_handler = unimplemented_aenq_handler
3373 module_init(ena_init);
3374 module_exit(ena_cleanup);