]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/net/ethernet/qlogic/qede/qede_main.c
bpf: xdp: Allow head adjustment in XDP prog
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qede / qede_main.c
index 85f46dbecd5b57b5cdb537900a51dbd8cfa4dcc1..aecdd1c5c0ea24a368085c0b2a41f5891ce55ac1 100644 (file)
@@ -94,11 +94,26 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
 
 #define TX_TIMEOUT             (5 * HZ)
 
+/* Utilize last protocol index for XDP */
+#define XDP_PI 11
+
 static void qede_remove(struct pci_dev *pdev);
-static int qede_alloc_rx_buffer(struct qede_dev *edev,
-                               struct qede_rx_queue *rxq);
+static void qede_shutdown(struct pci_dev *pdev);
 static void qede_link_update(void *dev, struct qed_link_output *link);
 
+/* The qede lock is used to protect driver state change and driver flows that
+ * are not reentrant.
+ */
+void __qede_lock(struct qede_dev *edev)
+{
+       mutex_lock(&edev->qede_lock);
+}
+
+void __qede_unlock(struct qede_dev *edev)
+{
+       mutex_unlock(&edev->qede_lock);
+}
+
 #ifdef CONFIG_QED_SRIOV
 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
                            __be16 vlan_proto)
@@ -166,15 +181,20 @@ static struct pci_driver qede_pci_driver = {
        .id_table = qede_pci_tbl,
        .probe = qede_probe,
        .remove = qede_remove,
+       .shutdown = qede_shutdown,
 #ifdef CONFIG_QED_SRIOV
        .sriov_configure = qede_sriov_configure,
 #endif
 };
 
-static void qede_force_mac(void *dev, u8 *mac)
+static void qede_force_mac(void *dev, u8 *mac, bool forced)
 {
        struct qede_dev *edev = dev;
 
+       /* MAC hints take effect only if we haven't set one already */
+       if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
+               return;
+
        ether_addr_copy(edev->ndev->dev_addr, mac);
        ether_addr_copy(edev->primary_mac, mac);
 }
@@ -284,12 +304,12 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
                            struct qede_tx_queue *txq, int *len)
 {
        u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
-       struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+       struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
        struct eth_tx_1st_bd *first_bd;
        struct eth_tx_bd *tx_data_bd;
        int bds_consumed = 0;
        int nbds;
-       bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+       bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
        int i, split_bd_len = 0;
 
        if (unlikely(!skb)) {
@@ -329,20 +349,19 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
 
        /* Free skb */
        dev_kfree_skb_any(skb);
-       txq->sw_tx_ring[idx].skb = NULL;
-       txq->sw_tx_ring[idx].flags = 0;
+       txq->sw_tx_ring.skbs[idx].skb = NULL;
+       txq->sw_tx_ring.skbs[idx].flags = 0;
 
        return 0;
 }
 
 /* Unmap the data and free skb when mapping failed during start_xmit */
-static void qede_free_failed_tx_pkt(struct qede_dev *edev,
-                                   struct qede_tx_queue *txq,
+static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
                                    struct eth_tx_1st_bd *first_bd,
                                    int nbd, bool data_split)
 {
        u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
-       struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+       struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
        struct eth_tx_bd *tx_data_bd;
        int i, split_bd_len = 0;
 
@@ -359,7 +378,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
                nbd--;
        }
 
-       dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+       dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
                         BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
 
        /* Unmap the data of the skb frags */
@@ -367,7 +386,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
                tx_data_bd = (struct eth_tx_bd *)
                        qed_chain_produce(&txq->tx_pbl);
                if (tx_data_bd->nbytes)
-                       dma_unmap_page(&edev->pdev->dev,
+                       dma_unmap_page(txq->dev,
                                       BD_UNMAP_ADDR(tx_data_bd),
                                       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
        }
@@ -378,12 +397,11 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
 
        /* Free skb */
        dev_kfree_skb_any(skb);
-       txq->sw_tx_ring[idx].skb = NULL;
-       txq->sw_tx_ring[idx].flags = 0;
+       txq->sw_tx_ring.skbs[idx].skb = NULL;
+       txq->sw_tx_ring.skbs[idx].flags = 0;
 }
 
-static u32 qede_xmit_type(struct qede_dev *edev,
-                         struct sk_buff *skb, int *ipv6_ext)
+static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
 {
        u32 rc = XMIT_L4_CSUM;
        __be16 l3_proto;
@@ -396,8 +414,19 @@ static u32 qede_xmit_type(struct qede_dev *edev,
            (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
                *ipv6_ext = 1;
 
-       if (skb->encapsulation)
+       if (skb->encapsulation) {
                rc |= XMIT_ENC;
+               if (skb_is_gso(skb)) {
+                       unsigned short gso_type = skb_shinfo(skb)->gso_type;
+
+                       if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
+                           (gso_type & SKB_GSO_GRE_CSUM))
+                               rc |= XMIT_ENC_GSO_L4_CSUM;
+
+                       rc |= XMIT_LSO;
+                       return rc;
+               }
+       }
 
        if (skb_is_gso(skb))
                rc |= XMIT_LSO;
@@ -439,18 +468,16 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
        second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
 }
 
-static int map_frag_to_bd(struct qede_dev *edev,
+static int map_frag_to_bd(struct qede_tx_queue *txq,
                          skb_frag_t *frag, struct eth_tx_bd *bd)
 {
        dma_addr_t mapping;
 
        /* Map skb non-linear frag data for DMA */
-       mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+       mapping = skb_frag_dma_map(txq->dev, frag, 0,
                                   skb_frag_size(frag), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
-               DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+       if (unlikely(dma_mapping_error(txq->dev, mapping)))
                return -ENOMEM;
-       }
 
        /* Setup the data pointer of the frag data */
        BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
@@ -470,8 +497,7 @@ static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
 
 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
-                            u8 xmit_type)
+static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
 {
        int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
 
@@ -507,6 +533,47 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
        mmiowb();
 }
 
+static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
+                        struct sw_rx_data *metadata, u16 padding, u16 length)
+{
+       struct qede_tx_queue *txq = fp->xdp_tx;
+       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       struct eth_tx_1st_bd *first_bd;
+
+       if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+               txq->stopped_cnt++;
+               return -ENOMEM;
+       }
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+       memset(first_bd, 0, sizeof(*first_bd));
+       first_bd->data.bd_flags.bitfields =
+           BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
+       first_bd->data.bitfields |=
+           (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+           ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+       first_bd->data.nbds = 1;
+
+       /* We can safely ignore the offset, as it's 0 for XDP */
+       BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+
+       /* Synchronize the buffer back to device, as program [probably]
+        * has changed it.
+        */
+       dma_sync_single_for_device(&edev->pdev->dev,
+                                  metadata->mapping + padding,
+                                  length, PCI_DMA_TODEVICE);
+
+       txq->sw_tx_ring.pages[idx] = metadata->data;
+       txq->sw_tx_prod++;
+
+       /* Mark the fastpath for future XDP doorbell */
+       fp->xdp_xmit = 1;
+
+       return 0;
+}
+
 /* Main transmit function */
 static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                                   struct net_device *ndev)
@@ -530,15 +597,15 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
        /* Get tx-queue context and netdev index */
        txq_index = skb_get_queue_mapping(skb);
        WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
-       txq = QEDE_TX_QUEUE(edev, txq_index);
+       txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
        netdev_txq = netdev_get_tx_queue(ndev, txq_index);
 
        WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
 
-       xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+       xmit_type = qede_xmit_type(skb, &ipv6_ext);
 
 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-       if (qede_pkt_req_lin(edev, skb, xmit_type)) {
+       if (qede_pkt_req_lin(skb, xmit_type)) {
                if (skb_linearize(skb)) {
                        DP_NOTICE(edev,
                                  "SKB linearization failed - silently dropping this SKB\n");
@@ -550,7 +617,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
 
        /* Fill the entry in the SW ring and the BDs in the FW ring */
        idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
-       txq->sw_tx_ring[idx].skb = skb;
+       txq->sw_tx_ring.skbs[idx].skb = skb;
        first_bd = (struct eth_tx_1st_bd *)
                   qed_chain_produce(&txq->tx_pbl);
        memset(first_bd, 0, sizeof(*first_bd));
@@ -558,11 +625,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
 
        /* Map skb linear data for DMA and set in the first BD */
-       mapping = dma_map_single(&edev->pdev->dev, skb->data,
+       mapping = dma_map_single(txq->dev, skb->data,
                                 skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+       if (unlikely(dma_mapping_error(txq->dev, mapping))) {
                DP_NOTICE(edev, "SKB mapping failed\n");
-               qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+               qede_free_failed_tx_pkt(txq, first_bd, 0, false);
                qede_update_tx_producer(txq);
                return NETDEV_TX_OK;
        }
@@ -633,6 +700,12 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                if (unlikely(xmit_type & XMIT_ENC)) {
                        first_bd->data.bd_flags.bitfields |=
                                1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+
+                       if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
+                               u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+
+                               first_bd->data.bd_flags.bitfields |= 1 << tmp;
+                       }
                        hlen = qede_get_skb_hlen(skb, true);
                } else {
                        first_bd->data.bd_flags.bitfields |=
@@ -664,7 +737,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                        /* this marks the BD as one that has no
                         * individual mapping
                         */
-                       txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+                       txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
 
                        first_bd->nbytes = cpu_to_le16(hlen);
 
@@ -680,12 +753,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
        /* Handle fragmented skb */
        /* special handle for frags inside 2nd and 3rd bds.. */
        while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
-               rc = map_frag_to_bd(edev,
+               rc = map_frag_to_bd(txq,
                                    &skb_shinfo(skb)->frags[frag_idx],
                                    tx_data_bd);
                if (rc) {
-                       qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
-                                               data_split);
+                       qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
                        qede_update_tx_producer(txq);
                        return NETDEV_TX_OK;
                }
@@ -705,12 +777,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
 
                memset(tx_data_bd, 0, sizeof(*tx_data_bd));
 
-               rc = map_frag_to_bd(edev,
+               rc = map_frag_to_bd(txq,
                                    &skb_shinfo(skb)->frags[frag_idx],
                                    tx_data_bd);
                if (rc) {
-                       qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
-                                               data_split);
+                       qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
                        qede_update_tx_producer(txq);
                        return NETDEV_TX_OK;
                }
@@ -775,6 +846,27 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
        return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
 }
 
+static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+       struct eth_tx_1st_bd *bd;
+       u16 hw_bd_cons;
+
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       barrier();
+
+       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+               bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+               dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
+                                PAGE_SIZE, DMA_BIDIRECTIONAL);
+               __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
+                                                 NUM_TX_BDS_MAX]);
+
+               txq->sw_tx_cons++;
+               txq->xmit_pkts++;
+       }
+}
+
 static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
        struct netdev_queue *netdev_txq;
@@ -858,16 +950,6 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq)
        return hw_comp_cons != sw_comp_cons;
 }
 
-static bool qede_has_tx_work(struct qede_fastpath *fp)
-{
-       u8 tc;
-
-       for (tc = 0; tc < fp->edev->num_tc; tc++)
-               if (qede_txq_has_work(&fp->txqs[tc]))
-                       return true;
-       return false;
-}
-
 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
 {
        qed_chain_consume(&rxq->rx_bd_ring);
@@ -877,8 +959,7 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
 /* This function reuses the buffer(from an offset) from
  * consumer index to producer index in the bd ring
  */
-static inline void qede_reuse_page(struct qede_dev *edev,
-                                  struct qede_rx_queue *rxq,
+static inline void qede_reuse_page(struct qede_rx_queue *rxq,
                                   struct sw_rx_data *curr_cons)
 {
        struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
@@ -900,27 +981,62 @@ static inline void qede_reuse_page(struct qede_dev *edev,
 /* In case of allocation failures reuse buffers
  * from consumer index to produce buffers for firmware
  */
-void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
-                            struct qede_dev *edev, u8 count)
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
 {
        struct sw_rx_data *curr_cons;
 
        for (; count > 0; count--) {
                curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
-               qede_reuse_page(edev, rxq, curr_cons);
+               qede_reuse_page(rxq, curr_cons);
                qede_rx_bd_ring_consume(rxq);
        }
 }
 
-static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
-                                        struct qede_rx_queue *rxq,
+static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+{
+       struct sw_rx_data *sw_rx_data;
+       struct eth_rx_bd *rx_bd;
+       dma_addr_t mapping;
+       struct page *data;
+
+       data = alloc_pages(GFP_ATOMIC, 0);
+       if (unlikely(!data))
+               return -ENOMEM;
+
+       /* Map the entire page as it would be used
+        * for multiple RX buffer segment size mapping.
+        */
+       mapping = dma_map_page(rxq->dev, data, 0,
+                              PAGE_SIZE, rxq->data_direction);
+       if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
+               __free_page(data);
+               return -ENOMEM;
+       }
+
+       sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+       sw_rx_data->page_offset = 0;
+       sw_rx_data->data = data;
+       sw_rx_data->mapping = mapping;
+
+       /* Advance PROD and get BD pointer */
+       rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+       WARN_ON(!rx_bd);
+       rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+       rxq->sw_rx_prod++;
+
+       return 0;
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
                                         struct sw_rx_data *curr_cons)
 {
        /* Move to the next segment in the page */
        curr_cons->page_offset += rxq->rx_buf_seg_size;
 
        if (curr_cons->page_offset == PAGE_SIZE) {
-               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+               if (unlikely(qede_alloc_rx_buffer(rxq))) {
                        /* Since we failed to allocate new buffer
                         * current buffer can be used again.
                         */
@@ -929,15 +1045,15 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
                        return -ENOMEM;
                }
 
-               dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
+               dma_unmap_page(rxq->dev, curr_cons->mapping,
+                              PAGE_SIZE, rxq->data_direction);
        } else {
                /* Increment refcount of the page as we don't want
                 * network stack to take the ownership of the page
                 * which can be recycled multiple times by the driver.
                 */
                page_ref_inc(curr_cons->data);
-               qede_reuse_page(edev, rxq, curr_cons);
+               qede_reuse_page(rxq, curr_cons);
        }
 
        return 0;
@@ -971,22 +1087,20 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
        mmiowb();
 }
 
-static u32 qede_get_rxhash(struct qede_dev *edev,
-                          u8 bitfields,
-                          __le32 rss_hash, enum pkt_hash_types *rxhash_type)
+static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
 {
+       enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
        enum rss_hash_type htype;
+       u32 hash = 0;
 
        htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
-
-       if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
-               *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
-                               (htype == RSS_HASH_TYPE_IPV6)) ?
-                               PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
-               return le32_to_cpu(rss_hash);
+       if (htype) {
+               hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+                            (htype == RSS_HASH_TYPE_IPV6)) ?
+                           PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+               hash = le32_to_cpu(rss_hash);
        }
-       *rxhash_type = PKT_HASH_TYPE_NONE;
-       return 0;
+       skb_set_hash(skb, hash, hash_type);
 }
 
 static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
@@ -1002,12 +1116,14 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
 
 static inline void qede_skb_receive(struct qede_dev *edev,
                                    struct qede_fastpath *fp,
+                                   struct qede_rx_queue *rxq,
                                    struct sk_buff *skb, u16 vlan_tag)
 {
        if (vlan_tag)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
 
        napi_gro_receive(&fp->napi, skb);
+       fp->rxq->rcv_pkts++;
 }
 
 static void qede_set_gro_params(struct qede_dev *edev,
@@ -1035,7 +1151,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
        struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
        struct sk_buff *skb = tpa_info->skb;
 
-       if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+       if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
                goto out;
 
        /* Add one frag and update the appropriate fields in the skb */
@@ -1043,7 +1159,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
                           current_bd->data, current_bd->page_offset,
                           len_on_bd);
 
-       if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
+       if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
                /* Incr page ref count to reuse on allocation failure
                 * so that it doesn't get freed while freeing SKB.
                 */
@@ -1061,8 +1177,9 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
        return 0;
 
 out:
-       tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
-       qede_recycle_rx_bd_ring(rxq, edev, 1);
+       tpa_info->state = QEDE_AGG_STATE_ERROR;
+       qede_recycle_rx_bd_ring(rxq, 1);
+
        return -ENOMEM;
 }
 
@@ -1073,12 +1190,10 @@ static void qede_tpa_start(struct qede_dev *edev,
        struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
        struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
        struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
-       struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
-       dma_addr_t mapping = tpa_info->replace_buf_mapping;
+       struct sw_rx_data *replace_buf = &tpa_info->buffer;
+       dma_addr_t mapping = tpa_info->buffer_mapping;
        struct sw_rx_data *sw_rx_data_cons;
        struct sw_rx_data *sw_rx_data_prod;
-       enum pkt_hash_types rxhash_type;
-       u32 rxhash;
 
        sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
        sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
@@ -1099,11 +1214,11 @@ static void qede_tpa_start(struct qede_dev *edev,
        /* move partial skb from cons to pool (don't unmap yet)
         * save mapping, incase we drop the packet later on.
         */
-       tpa_info->start_buf = *sw_rx_data_cons;
+       tpa_info->buffer = *sw_rx_data_cons;
        mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
                           le32_to_cpu(rx_bd_cons->addr.lo));
 
-       tpa_info->start_buf_mapping = mapping;
+       tpa_info->buffer_mapping = mapping;
        rxq->sw_rx_cons++;
 
        /* set tpa state to start only if we are able to allocate skb
@@ -1114,27 +1229,27 @@ static void qede_tpa_start(struct qede_dev *edev,
                                         le16_to_cpu(cqe->len_on_first_bd));
        if (unlikely(!tpa_info->skb)) {
                DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
-               tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+               tpa_info->state = QEDE_AGG_STATE_ERROR;
                goto cons_buf;
        }
 
-       skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
-       memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe));
-
        /* Start filling in the aggregation info */
+       skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
        tpa_info->frag_id = 0;
-       tpa_info->agg_state = QEDE_AGG_STATE_START;
+       tpa_info->state = QEDE_AGG_STATE_START;
 
-       rxhash = qede_get_rxhash(edev, cqe->bitfields,
-                                cqe->rss_hash, &rxhash_type);
-       skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
+       /* Store some information from first CQE */
+       tpa_info->start_cqe_placement_offset = cqe->placement_offset;
+       tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
        if ((le16_to_cpu(cqe->pars_flags.flags) >>
             PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
-                   PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+           PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
                tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
        else
                tpa_info->vlan_tag = 0;
 
+       qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
+
        /* This is needed in order to enable forwarding support */
        qede_set_gro_params(edev, tpa_info->skb, cqe);
 
@@ -1146,7 +1261,7 @@ cons_buf: /* We still need to handle bd_len_list to consume buffers */
        if (unlikely(cqe->ext_bd_len_list[1])) {
                DP_ERR(edev,
                       "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
-               tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
+               tpa_info->state = QEDE_AGG_STATE_ERROR;
        }
 }
 
@@ -1197,7 +1312,7 @@ static void qede_gro_receive(struct qede_dev *edev,
 
 #ifdef CONFIG_INET
        if (skb_shinfo(skb)->gso_size) {
-               skb_set_network_header(skb, 0);
+               skb_reset_network_header(skb);
 
                switch (skb->protocol) {
                case htons(ETH_P_IP):
@@ -1216,7 +1331,7 @@ static void qede_gro_receive(struct qede_dev *edev,
 
 send_skb:
        skb_record_rx_queue(skb, fp->rxq->rxq_id);
-       qede_skb_receive(edev, fp, skb, vlan_tag);
+       qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
 }
 
 static inline void qede_tpa_cont(struct qede_dev *edev,
@@ -1253,7 +1368,7 @@ static void qede_tpa_end(struct qede_dev *edev,
                DP_ERR(edev,
                       "Strange - TPA emd with more than a single len_list entry\n");
 
-       if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START))
+       if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
                goto err;
 
        /* Sanity */
@@ -1267,14 +1382,9 @@ static void qede_tpa_end(struct qede_dev *edev,
                       le16_to_cpu(cqe->total_packet_len), skb->len);
 
        memcpy(skb->data,
-              page_address(tpa_info->start_buf.data) +
-               tpa_info->start_cqe.placement_offset +
-               tpa_info->start_buf.page_offset,
-              le16_to_cpu(tpa_info->start_cqe.len_on_first_bd));
-
-       /* Recycle [mapped] start buffer for the next replacement */
-       tpa_info->replace_buf = tpa_info->start_buf;
-       tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
+              page_address(tpa_info->buffer.data) +
+              tpa_info->start_cqe_placement_offset +
+              tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
 
        /* Finalize the SKB */
        skb->protocol = eth_type_trans(skb, edev->ndev);
@@ -1287,18 +1397,11 @@ static void qede_tpa_end(struct qede_dev *edev,
 
        qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
 
-       tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+       tpa_info->state = QEDE_AGG_STATE_NONE;
 
        return;
 err:
-       /* The BD starting the aggregation is still mapped; Re-use it for
-        * future aggregations [as replacement buffer]
-        */
-       memcpy(&tpa_info->replace_buf, &tpa_info->start_buf,
-              sizeof(struct sw_rx_data));
-       tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping;
-       tpa_info->start_buf.data = NULL;
-       tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+       tpa_info->state = QEDE_AGG_STATE_NONE;
        dev_kfree_skb_any(tpa_info->skb);
        tpa_info->skb = NULL;
 }
@@ -1380,238 +1483,364 @@ static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
        return false;
 }
 
-static int qede_rx_int(struct qede_fastpath *fp, int budget)
+/* Return true iff packet is to be passed to stack */
+static bool qede_rx_xdp(struct qede_dev *edev,
+                       struct qede_fastpath *fp,
+                       struct qede_rx_queue *rxq,
+                       struct bpf_prog *prog,
+                       struct sw_rx_data *bd,
+                       struct eth_fast_path_rx_reg_cqe *cqe)
 {
-       struct qede_dev *edev = fp->edev;
-       struct qede_rx_queue *rxq = fp->rxq;
-
-       u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag;
-       int rx_pkt = 0;
-       u8 csum_flag;
+       u16 len = le16_to_cpu(cqe->len_on_first_bd);
+       struct xdp_buff xdp;
+       enum xdp_action act;
 
-       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
-       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+       xdp.data = page_address(bd->data) + cqe->placement_offset;
+       xdp.data_end = xdp.data + len;
 
-       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
-        * / BD in the while-loop before reading hw_comp_cons. If the CQE is
-        * read before it is written by FW, then FW writes CQE and SB, and then
-        * the CPU reads the hw_comp_cons, it will use an old CQE.
+       /* Queues always have a full reset currently, so for the time
+        * being until there's atomic program replace just mark read
+        * side for map helpers.
         */
-       rmb();
+       rcu_read_lock();
+       act = bpf_prog_run_xdp(prog, &xdp);
+       rcu_read_unlock();
 
-       /* Loop to complete all indicated BDs */
-       while (sw_comp_cons != hw_comp_cons) {
-               struct eth_fast_path_rx_reg_cqe *fp_cqe;
-               enum pkt_hash_types rxhash_type;
-               enum eth_rx_cqe_type cqe_type;
-               struct sw_rx_data *sw_rx_data;
-               union eth_rx_cqe *cqe;
-               struct sk_buff *skb;
-               struct page *data;
-               __le16 flags;
-               u16 len, pad;
-               u32 rx_hash;
-
-               /* Get the CQE from the completion ring */
-               cqe = (union eth_rx_cqe *)
-                       qed_chain_consume(&rxq->rx_comp_ring);
-               cqe_type = cqe->fast_path_regular.type;
-
-               if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
-                       edev->ops->eth_cqe_completion(
-                                       edev->cdev, fp->id,
-                                       (struct eth_slow_path_rx_cqe *)cqe);
-                       goto next_cqe;
+       if (act == XDP_PASS)
+               return true;
+
+       /* Count number of packets not to be passed to stack */
+       rxq->xdp_no_pass++;
+
+       switch (act) {
+       case XDP_TX:
+               /* We need the replacement buffer before transmit. */
+               if (qede_alloc_rx_buffer(rxq)) {
+                       qede_recycle_rx_bd_ring(rxq, 1);
+                       return false;
                }
 
-               if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) {
-                       switch (cqe_type) {
-                       case ETH_RX_CQE_TYPE_TPA_START:
-                               qede_tpa_start(edev, rxq,
-                                              &cqe->fast_path_tpa_start);
-                               goto next_cqe;
-                       case ETH_RX_CQE_TYPE_TPA_CONT:
-                               qede_tpa_cont(edev, rxq,
-                                             &cqe->fast_path_tpa_cont);
-                               goto next_cqe;
-                       case ETH_RX_CQE_TYPE_TPA_END:
-                               qede_tpa_end(edev, fp,
-                                            &cqe->fast_path_tpa_end);
-                               goto next_rx_only;
-                       default:
-                               break;
-                       }
+               /* Now if there's a transmission problem, we'd still have to
+                * throw current buffer, as replacement was already allocated.
+                */
+               if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
+                       dma_unmap_page(rxq->dev, bd->mapping,
+                                      PAGE_SIZE, DMA_BIDIRECTIONAL);
+                       __free_page(bd->data);
                }
 
-               /* Get the data from the SW ring */
-               sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
-               sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
-               data = sw_rx_data->data;
-
-               fp_cqe = &cqe->fast_path_regular;
-               len =  le16_to_cpu(fp_cqe->len_on_first_bd);
-               pad = fp_cqe->placement_offset;
-               flags = cqe->fast_path_regular.pars_flags.flags;
-
-               /* If this is an error packet then drop it */
-               parse_flag = le16_to_cpu(flags);
-
-               csum_flag = qede_check_csum(parse_flag);
-               if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
-                       if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
-                                                     parse_flag)) {
-                               rxq->rx_ip_frags++;
-                               goto alloc_skb;
-                       }
+               /* Regardless, we've consumed an Rx BD */
+               qede_rx_bd_ring_consume(rxq);
+               return false;
 
-                       DP_NOTICE(edev,
-                                 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
-                                 sw_comp_cons, parse_flag);
-                       rxq->rx_hw_errors++;
-                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
-                       goto next_cqe;
-               }
+       default:
+               bpf_warn_invalid_xdp_action(act);
+       case XDP_ABORTED:
+       case XDP_DROP:
+               qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
+       }
 
-alloc_skb:
-               skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
-               if (unlikely(!skb)) {
-                       DP_NOTICE(edev,
-                                 "skb allocation failed, dropping incoming packet\n");
-                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
-                       rxq->rx_alloc_errors++;
-                       goto next_cqe;
+       return false;
+}
+
+static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
+                                           struct qede_rx_queue *rxq,
+                                           struct sw_rx_data *bd, u16 len,
+                                           u16 pad)
+{
+       unsigned int offset = bd->page_offset;
+       struct skb_frag_struct *frag;
+       struct page *page = bd->data;
+       unsigned int pull_len;
+       struct sk_buff *skb;
+       unsigned char *va;
+
+       /* Allocate a new SKB with a sufficient large header len */
+       skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+       if (unlikely(!skb))
+               return NULL;
+
+       /* Copy data into SKB - if it's small, we can simply copy it and
+        * re-use the already allcoated & mapped memory.
+        */
+       if (len + pad <= edev->rx_copybreak) {
+               memcpy(skb_put(skb, len),
+                      page_address(page) + pad + offset, len);
+               qede_reuse_page(rxq, bd);
+               goto out;
+       }
+
+       frag = &skb_shinfo(skb)->frags[0];
+
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                       page, pad + offset, len, rxq->rx_buf_seg_size);
+
+       va = skb_frag_address(frag);
+       pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
+
+       /* Align the pull_len to optimize memcpy */
+       memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+
+       /* Correct the skb & frag sizes offset after the pull */
+       skb_frag_size_sub(frag, pull_len);
+       frag->page_offset += pull_len;
+       skb->data_len -= pull_len;
+       skb->tail += pull_len;
+
+       if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
+               /* Incr page ref count to reuse on allocation failure so
+                * that it doesn't get freed while freeing SKB [as its
+                * already mapped there].
+                */
+               page_ref_inc(page);
+               dev_kfree_skb_any(skb);
+               return NULL;
+       }
+
+out:
+       /* We've consumed the first BD and prepared an SKB */
+       qede_rx_bd_ring_consume(rxq);
+       return skb;
+}
+
+static int qede_rx_build_jumbo(struct qede_dev *edev,
+                              struct qede_rx_queue *rxq,
+                              struct sk_buff *skb,
+                              struct eth_fast_path_rx_reg_cqe *cqe,
+                              u16 first_bd_len)
+{
+       u16 pkt_len = le16_to_cpu(cqe->pkt_len);
+       struct sw_rx_data *bd;
+       u16 bd_cons_idx;
+       u8 num_frags;
+
+       pkt_len -= first_bd_len;
+
+       /* We've already used one BD for the SKB. Now take care of the rest */
+       for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
+               u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+                   pkt_len;
+
+               if (unlikely(!cur_size)) {
+                       DP_ERR(edev,
+                              "Still got %d BDs for mapping jumbo, but length became 0\n",
+                              num_frags);
+                       goto out;
                }
 
-               /* Copy data into SKB */
-               if (len + pad <= edev->rx_copybreak) {
-                       memcpy(skb_put(skb, len),
-                              page_address(data) + pad +
-                               sw_rx_data->page_offset, len);
-                       qede_reuse_page(edev, rxq, sw_rx_data);
+               /* We need a replacement buffer for each BD */
+               if (unlikely(qede_alloc_rx_buffer(rxq)))
+                       goto out;
+
+               /* Now that we've allocated the replacement buffer,
+                * we can safely consume the next BD and map it to the SKB.
+                */
+               bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+               bd = &rxq->sw_rx_ring[bd_cons_idx];
+               qede_rx_bd_ring_consume(rxq);
+
+               dma_unmap_page(rxq->dev, bd->mapping,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
+
+               skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+                                  bd->data, 0, cur_size);
+
+               skb->truesize += PAGE_SIZE;
+               skb->data_len += cur_size;
+               skb->len += cur_size;
+               pkt_len -= cur_size;
+       }
+
+       if (unlikely(pkt_len))
+               DP_ERR(edev,
+                      "Mapped all BDs of jumbo, but still have %d bytes\n",
+                      pkt_len);
+
+out:
+       return num_frags;
+}
+
+static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
+                                  struct qede_fastpath *fp,
+                                  struct qede_rx_queue *rxq,
+                                  union eth_rx_cqe *cqe,
+                                  enum eth_rx_cqe_type type)
+{
+       switch (type) {
+       case ETH_RX_CQE_TYPE_TPA_START:
+               qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
+               return 0;
+       case ETH_RX_CQE_TYPE_TPA_CONT:
+               qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
+               return 0;
+       case ETH_RX_CQE_TYPE_TPA_END:
+               qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static int qede_rx_process_cqe(struct qede_dev *edev,
+                              struct qede_fastpath *fp,
+                              struct qede_rx_queue *rxq)
+{
+       struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
+       struct eth_fast_path_rx_reg_cqe *fp_cqe;
+       u16 len, pad, bd_cons_idx, parse_flag;
+       enum eth_rx_cqe_type cqe_type;
+       union eth_rx_cqe *cqe;
+       struct sw_rx_data *bd;
+       struct sk_buff *skb;
+       __le16 flags;
+       u8 csum_flag;
+
+       /* Get the CQE from the completion ring */
+       cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+       cqe_type = cqe->fast_path_regular.type;
+
+       /* Process an unlikely slowpath event */
+       if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+               struct eth_slow_path_rx_cqe *sp_cqe;
+
+               sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
+               edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
+               return 0;
+       }
+
+       /* Handle TPA cqes */
+       if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
+               return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
+
+       /* Get the data from the SW ring; Consume it only after it's evident
+        * we wouldn't recycle it.
+        */
+       bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+       bd = &rxq->sw_rx_ring[bd_cons_idx];
+
+       fp_cqe = &cqe->fast_path_regular;
+       len = le16_to_cpu(fp_cqe->len_on_first_bd);
+       pad = fp_cqe->placement_offset;
+
+       /* Run eBPF program if one is attached */
+       if (xdp_prog)
+               if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
+                       return 1;
+
+       /* If this is an error packet then drop it */
+       flags = cqe->fast_path_regular.pars_flags.flags;
+       parse_flag = le16_to_cpu(flags);
+
+       csum_flag = qede_check_csum(parse_flag);
+       if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+               if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
+                       rxq->rx_ip_frags++;
                } else {
-                       struct skb_frag_struct *frag;
-                       unsigned int pull_len;
-                       unsigned char *va;
-
-                       frag = &skb_shinfo(skb)->frags[0];
-
-                       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data,
-                                       pad + sw_rx_data->page_offset,
-                                       len, rxq->rx_buf_seg_size);
-
-                       va = skb_frag_address(frag);
-                       pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
-
-                       /* Align the pull_len to optimize memcpy */
-                       memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
-
-                       skb_frag_size_sub(frag, pull_len);
-                       frag->page_offset += pull_len;
-                       skb->data_len -= pull_len;
-                       skb->tail += pull_len;
-
-                       if (unlikely(qede_realloc_rx_buffer(edev, rxq,
-                                                           sw_rx_data))) {
-                               DP_ERR(edev, "Failed to allocate rx buffer\n");
-                               /* Incr page ref count to reuse on allocation
-                                * failure so that it doesn't get freed while
-                                * freeing SKB.
-                                */
-
-                               page_ref_inc(sw_rx_data->data);
-                               rxq->rx_alloc_errors++;
-                               qede_recycle_rx_bd_ring(rxq, edev,
-                                                       fp_cqe->bd_num);
-                               dev_kfree_skb_any(skb);
-                               goto next_cqe;
-                       }
+                       DP_NOTICE(edev,
+                                 "CQE has error, flags = %x, dropping incoming packet\n",
+                                 parse_flag);
+                       rxq->rx_hw_errors++;
+                       qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+                       return 0;
                }
+       }
 
-               qede_rx_bd_ring_consume(rxq);
+       /* Basic validation passed; Need to prepare an SKB. This would also
+        * guarantee to finally consume the first BD upon success.
+        */
+       skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
+       if (!skb) {
+               rxq->rx_alloc_errors++;
+               qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+               return 0;
+       }
 
-               if (fp_cqe->bd_num != 1) {
-                       u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
-                       u8 num_frags;
-
-                       pkt_len -= len;
-
-                       for (num_frags = fp_cqe->bd_num - 1; num_frags > 0;
-                            num_frags--) {
-                               u16 cur_size = pkt_len > rxq->rx_buf_size ?
-                                               rxq->rx_buf_size : pkt_len;
-                               if (unlikely(!cur_size)) {
-                                       DP_ERR(edev,
-                                              "Still got %d BDs for mapping jumbo, but length became 0\n",
-                                              num_frags);
-                                       qede_recycle_rx_bd_ring(rxq, edev,
-                                                               num_frags);
-                                       dev_kfree_skb_any(skb);
-                                       goto next_cqe;
-                               }
-
-                               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
-                                       qede_recycle_rx_bd_ring(rxq, edev,
-                                                               num_frags);
-                                       dev_kfree_skb_any(skb);
-                                       goto next_cqe;
-                               }
-
-                               sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
-                               sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
-                               qede_rx_bd_ring_consume(rxq);
-
-                               dma_unmap_page(&edev->pdev->dev,
-                                              sw_rx_data->mapping,
-                                              PAGE_SIZE, DMA_FROM_DEVICE);
-
-                               skb_fill_page_desc(skb,
-                                                  skb_shinfo(skb)->nr_frags++,
-                                                  sw_rx_data->data, 0,
-                                                  cur_size);
-
-                               skb->truesize += PAGE_SIZE;
-                               skb->data_len += cur_size;
-                               skb->len += cur_size;
-                               pkt_len -= cur_size;
-                       }
+       /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
+        * by a single cqe.
+        */
+       if (fp_cqe->bd_num > 1) {
+               u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
+                                                        fp_cqe, len);
 
-                       if (unlikely(pkt_len))
-                               DP_ERR(edev,
-                                      "Mapped all BDs of jumbo, but still have %d bytes\n",
-                                      pkt_len);
+               if (unlikely(unmapped_frags > 0)) {
+                       qede_recycle_rx_bd_ring(rxq, unmapped_frags);
+                       dev_kfree_skb_any(skb);
+                       return 0;
                }
+       }
 
-               skb->protocol = eth_type_trans(skb, edev->ndev);
+       /* The SKB contains all the data. Now prepare meta-magic */
+       skb->protocol = eth_type_trans(skb, edev->ndev);
+       qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
+       qede_set_skb_csum(skb, csum_flag);
+       skb_record_rx_queue(skb, rxq->rxq_id);
 
-               rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
-                                         fp_cqe->rss_hash, &rxhash_type);
+       /* SKB is prepared - pass it to stack */
+       qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
 
-               skb_set_hash(skb, rx_hash, rxhash_type);
+       return 1;
+}
 
-               qede_set_skb_csum(skb, csum_flag);
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+       struct qede_rx_queue *rxq = fp->rxq;
+       struct qede_dev *edev = fp->edev;
+       u16 hw_comp_cons, sw_comp_cons;
+       int work_done = 0;
 
-               skb_record_rx_queue(skb, fp->rxq->rxq_id);
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
 
-               qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
-next_rx_only:
-               rx_pkt++;
+       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+        * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+        * read before it is written by FW, then FW writes CQE and SB, and then
+        * the CPU reads the hw_comp_cons, it will use an old CQE.
+        */
+       rmb();
 
-next_cqe: /* don't consume bd rx buffer */
+       /* Loop to complete all indicated BDs */
+       while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
+               qede_rx_process_cqe(edev, fp, rxq);
                qed_chain_recycle_consumed(&rxq->rx_comp_ring);
                sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-               /* CR TPA - revisit how to handle budget in TPA perhaps
-                * increase on "end"
-                */
-               if (rx_pkt == budget)
-                       break;
-       } /* repeat while sw_comp_cons != hw_comp_cons... */
+               work_done++;
+       }
 
        /* Update producers */
        qede_update_rx_prod(edev, rxq);
 
-       rxq->rcv_pkts += rx_pkt;
+       return work_done;
+}
+
+static bool qede_poll_is_more_work(struct qede_fastpath *fp)
+{
+       qed_sb_update_sb_idx(fp->sb_info);
+
+       /* *_has_*_work() reads the status block, thus we need to ensure that
+        * status block indices have been actually read (qed_sb_update_sb_idx)
+        * prior to this check (*_has_*_work) so that we won't write the
+        * "newer" value of the status block to HW (if there was a DMA right
+        * after qede_has_rx_work and if there is no rmb, the memory reading
+        * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
+        * In this case there will never be another interrupt until there is
+        * another update of the status block, while there is still unhandled
+        * work.
+        */
+       rmb();
+
+       if (likely(fp->type & QEDE_FASTPATH_RX))
+               if (qede_has_rx_work(fp->rxq))
+                       return true;
 
-       return rx_pkt;
+       if (fp->type & QEDE_FASTPATH_XDP)
+               if (qede_txq_has_work(fp->xdp_tx))
+                       return true;
+
+       if (likely(fp->type & QEDE_FASTPATH_TX))
+               if (qede_txq_has_work(fp->txq))
+                       return true;
+
+       return false;
 }
 
 static int qede_poll(struct napi_struct *napi, int budget)
@@ -1620,48 +1849,35 @@ static int qede_poll(struct napi_struct *napi, int budget)
                                                napi);
        struct qede_dev *edev = fp->edev;
        int rx_work_done = 0;
-       u8 tc;
 
-       for (tc = 0; tc < edev->num_tc; tc++)
-               if (likely(fp->type & QEDE_FASTPATH_TX) &&
-                   qede_txq_has_work(&fp->txqs[tc]))
-                       qede_tx_int(edev, &fp->txqs[tc]);
+       if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
+               qede_tx_int(edev, fp->txq);
+
+       if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
+               qede_xdp_tx_int(edev, fp->xdp_tx);
 
        rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
                        qede_has_rx_work(fp->rxq)) ?
                        qede_rx_int(fp, budget) : 0;
        if (rx_work_done < budget) {
-               qed_sb_update_sb_idx(fp->sb_info);
-               /* *_has_*_work() reads the status block,
-                * thus we need to ensure that status block indices
-                * have been actually read (qed_sb_update_sb_idx)
-                * prior to this check (*_has_*_work) so that
-                * we won't write the "newer" value of the status block
-                * to HW (if there was a DMA right after
-                * qede_has_rx_work and if there is no rmb, the memory
-                * reading (qed_sb_update_sb_idx) may be postponed
-                * to right before *_ack_sb). In this case there
-                * will never be another interrupt until there is
-                * another update of the status block, while there
-                * is still unhandled work.
-                */
-               rmb();
-
-               /* Fall out from the NAPI loop if needed */
-               if (!((likely(fp->type & QEDE_FASTPATH_RX) &&
-                      qede_has_rx_work(fp->rxq)) ||
-                     (likely(fp->type & QEDE_FASTPATH_TX) &&
-                      qede_has_tx_work(fp)))) {
+               if (!qede_poll_is_more_work(fp)) {
                        napi_complete(napi);
 
                        /* Update and reenable interrupts */
-                       qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
-                                  1 /*update*/);
+                       qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
                } else {
                        rx_work_done = budget;
                }
        }
 
+       if (fp->xdp_xmit) {
+               u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+
+               fp->xdp_xmit = 0;
+               fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+               qede_update_tx_producer(fp->xdp_tx);
+       }
+
        return rx_work_done;
 }
 
@@ -1912,7 +2128,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
        struct qede_dev *edev = netdev_priv(dev);
        struct qede_vlan *vlan, *tmp;
-       int rc;
+       int rc = 0;
 
        DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
 
@@ -1936,6 +2152,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        }
 
        /* If interface is down, cache this VLAN ID and return */
+       __qede_lock(edev);
        if (edev->state != QEDE_STATE_OPEN) {
                DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
                           "Interface is down, VLAN %d will be configured when interface is up\n",
@@ -1943,8 +2160,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
                if (vid != 0)
                        edev->non_configured_vlans++;
                list_add(&vlan->list, &edev->vlan_list);
-
-               return 0;
+               goto out;
        }
 
        /* Check for the filter limit.
@@ -1960,7 +2176,7 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
                        DP_ERR(edev, "Failed to configure VLAN %d\n",
                               vlan->vid);
                        kfree(vlan);
-                       return -EINVAL;
+                       goto out;
                }
                vlan->configured = true;
 
@@ -1977,7 +2193,9 @@ static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
 
        list_add(&vlan->list, &edev->vlan_list);
 
-       return 0;
+out:
+       __qede_unlock(edev);
+       return rc;
 }
 
 static void qede_del_vlan_from_list(struct qede_dev *edev,
@@ -2054,11 +2272,12 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
 {
        struct qede_dev *edev = netdev_priv(dev);
        struct qede_vlan *vlan = NULL;
-       int rc;
+       int rc = 0;
 
        DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
 
        /* Find whether entry exists */
+       __qede_lock(edev);
        list_for_each_entry(vlan, &edev->vlan_list, list)
                if (vlan->vid == vid)
                        break;
@@ -2066,7 +2285,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
        if (!vlan || (vlan->vid != vid)) {
                DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
                           "Vlan isn't configured\n");
-               return 0;
+               goto out;
        }
 
        if (edev->state != QEDE_STATE_OPEN) {
@@ -2076,7 +2295,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
                DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
                           "Interface is down, removing VLAN from list only\n");
                qede_del_vlan_from_list(edev, vlan);
-               return 0;
+               goto out;
        }
 
        /* Remove vlan */
@@ -2085,7 +2304,7 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
                                            vid);
                if (rc) {
                        DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
-                       return -EINVAL;
+                       goto out;
                }
        }
 
@@ -2096,6 +2315,8 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
         */
        rc = qede_configure_vlan_filters(edev);
 
+out:
+       __qede_unlock(edev);
        return rc;
 }
 
@@ -2125,7 +2346,13 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
        edev->accept_any_vlan = false;
 }
 
-static int qede_set_features(struct net_device *dev, netdev_features_t features)
+static void qede_set_features_reload(struct qede_dev *edev,
+                                    struct qede_reload_args *args)
+{
+       edev->ndev->features = args->u.features;
+}
+
+int qede_set_features(struct net_device *dev, netdev_features_t features)
 {
        struct qede_dev *edev = netdev_priv(dev);
        netdev_features_t changes = features ^ dev->features;
@@ -2139,9 +2366,23 @@ static int qede_set_features(struct net_device *dev, netdev_features_t features)
                        need_reload = edev->gro_disable;
        }
 
-       if (need_reload && netif_running(edev->ndev)) {
-               dev->features = features;
-               qede_reload(edev, NULL, NULL);
+       if (need_reload) {
+               struct qede_reload_args args;
+
+               args.u.features = features;
+               args.func = &qede_set_features_reload;
+
+               /* Make sure that we definitely need to reload.
+                * In case of an eBPF attached program, there will be no FW
+                * aggregations, so no need to actually reload.
+                */
+               __qede_lock(edev);
+               if (edev->xdp_prog)
+                       args.func(edev, &args);
+               else
+                       qede_reload(edev, &args, true);
+               __qede_unlock(edev);
+
                return 1;
        }
 
@@ -2218,6 +2459,82 @@ static void qede_udp_tunnel_del(struct net_device *dev,
        schedule_delayed_work(&edev->sp_task, 0);
 }
 
+/* 8B udp header + 8B base tunnel header + 32B option length */
+#define QEDE_MAX_TUN_HDR_LEN 48
+
+static netdev_features_t qede_features_check(struct sk_buff *skb,
+                                            struct net_device *dev,
+                                            netdev_features_t features)
+{
+       if (skb->encapsulation) {
+               u8 l4_proto = 0;
+
+               switch (vlan_get_protocol(skb)) {
+               case htons(ETH_P_IP):
+                       l4_proto = ip_hdr(skb)->protocol;
+                       break;
+               case htons(ETH_P_IPV6):
+                       l4_proto = ipv6_hdr(skb)->nexthdr;
+                       break;
+               default:
+                       return features;
+               }
+
+               /* Disable offloads for geneve tunnels, as HW can't parse
+                * the geneve header which has option length greater than 32B.
+                */
+               if ((l4_proto == IPPROTO_UDP) &&
+                   ((skb_inner_mac_header(skb) -
+                     skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
+                       return features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_GSO_MASK);
+       }
+
+       return features;
+}
+
+static void qede_xdp_reload_func(struct qede_dev *edev,
+                                struct qede_reload_args *args)
+{
+       struct bpf_prog *old;
+
+       old = xchg(&edev->xdp_prog, args->u.new_prog);
+       if (old)
+               bpf_prog_put(old);
+}
+
+static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
+{
+       struct qede_reload_args args;
+
+       if (prog && prog->xdp_adjust_head) {
+               DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* If we're called, there was already a bpf reference increment */
+       args.func = &qede_xdp_reload_func;
+       args.u.new_prog = prog;
+       qede_reload(edev, &args, false);
+
+       return 0;
+}
+
+static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               return qede_xdp_set(edev, xdp->prog);
+       case XDP_QUERY_PROG:
+               xdp->prog_attached = !!edev->xdp_prog;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
 static const struct net_device_ops qede_netdev_ops = {
        .ndo_open = qede_open,
        .ndo_stop = qede_close,
@@ -2242,6 +2559,8 @@ static const struct net_device_ops qede_netdev_ops = {
 #endif
        .ndo_udp_tunnel_add = qede_udp_tunnel_add,
        .ndo_udp_tunnel_del = qede_udp_tunnel_del,
+       .ndo_features_check = qede_features_check,
+       .ndo_xdp = qede_xdp,
 };
 
 /* -------------------------------------------------------------------------
@@ -2282,8 +2601,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
        memset(&edev->stats, 0, sizeof(edev->stats));
        memcpy(&edev->dev_info, info, sizeof(*info));
 
-       edev->num_tc = edev->dev_info.num_tc;
-
        INIT_LIST_HEAD(&edev->vlan_list);
 
        return edev;
@@ -2308,6 +2625,8 @@ static void qede_init_ndev(struct qede_dev *edev)
 
        qede_set_ethtool_ops(ndev);
 
+       ndev->priv_flags |= IFF_UNICAST_FLT;
+
        /* user-changeble features */
        hw_features = NETIF_F_GRO | NETIF_F_SG |
                      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -2315,11 +2634,14 @@ static void qede_init_ndev(struct qede_dev *edev)
 
        /* Encap features*/
        hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
-                      NETIF_F_TSO_ECN;
+                      NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                      NETIF_F_GSO_GRE_CSUM;
        ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                                NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
                                NETIF_F_TSO6 | NETIF_F_GSO_GRE |
-                               NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+                               NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
+                               NETIF_F_GSO_UDP_TUNNEL_CSUM |
+                               NETIF_F_GSO_GRE_CSUM;
 
        ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
                              NETIF_F_HIGHDMA;
@@ -2329,8 +2651,14 @@ static void qede_init_ndev(struct qede_dev *edev)
 
        ndev->hw_features = hw_features;
 
+       /* MTU range: 46 - 9600 */
+       ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
+       ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
+
        /* Set network device HW mac */
        ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+
+       ndev->mtu = edev->dev_info.common.mtu;
 }
 
 /* This function converts from 32b param to two params of level and module
@@ -2370,7 +2698,8 @@ static void qede_free_fp_array(struct qede_dev *edev)
 
                        kfree(fp->sb_info);
                        kfree(fp->rxq);
-                       kfree(fp->txqs);
+                       kfree(fp->xdp_tx);
+                       kfree(fp->txq);
                }
                kfree(edev->fp_array);
        }
@@ -2403,7 +2732,7 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
        for_each_queue(i) {
                fp = &edev->fp_array[i];
 
-               fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+               fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
                if (!fp->sb_info) {
                        DP_NOTICE(edev, "sb info struct allocation failed\n");
                        goto err;
@@ -2420,21 +2749,22 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
                }
 
                if (fp->type & QEDE_FASTPATH_TX) {
-                       fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs),
-                                          GFP_KERNEL);
-                       if (!fp->txqs) {
-                               DP_NOTICE(edev,
-                                         "TXQ array allocation failed\n");
+                       fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
+                       if (!fp->txq)
                                goto err;
-                       }
                }
 
                if (fp->type & QEDE_FASTPATH_RX) {
-                       fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL);
-                       if (!fp->rxq) {
-                               DP_NOTICE(edev,
-                                         "RXQ struct allocation failed\n");
+                       fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
+                       if (!fp->rxq)
                                goto err;
+
+                       if (edev->xdp_prog) {
+                               fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
+                                                    GFP_KERNEL);
+                               if (!fp->xdp_tx)
+                                       goto err;
+                               fp->type |= QEDE_FASTPATH_XDP;
                        }
                }
        }
@@ -2451,12 +2781,11 @@ static void qede_sp_task(struct work_struct *work)
                                             sp_task.work);
        struct qed_dev *cdev = edev->cdev;
 
-       mutex_lock(&edev->qede_lock);
+       __qede_lock(edev);
 
-       if (edev->state == QEDE_STATE_OPEN) {
-               if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+       if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+               if (edev->state == QEDE_STATE_OPEN)
                        qede_config_rx_mode(edev->ndev);
-       }
 
        if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
                struct qed_tunn_params tunn_params;
@@ -2476,16 +2805,16 @@ static void qede_sp_task(struct work_struct *work)
                qed_ops->tunn_config(cdev, &tunn_params);
        }
 
-       mutex_unlock(&edev->qede_lock);
+       __qede_unlock(edev);
 }
 
 static void qede_update_pf_params(struct qed_dev *cdev)
 {
        struct qed_pf_params pf_params;
 
-       /* 64 rx + 64 tx */
+       /* 64 rx + 64 tx + 64 XDP */
        memset(&pf_params, 0, sizeof(struct qed_pf_params));
-       pf_params.eth_pf_params.num_cons = 128;
+       pf_params.eth_pf_params.num_cons = 192;
        qed_ops->common->update_pf_params(cdev, &pf_params);
 }
 
@@ -2634,10 +2963,16 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
 
        pci_set_drvdata(pdev, NULL);
 
+       /* Release edev's reference to XDP's bpf if such exist */
+       if (edev->xdp_prog)
+               bpf_prog_put(edev->xdp_prog);
+
        free_netdev(ndev);
 
        /* Use global ops since we've freed edev */
        qed_ops->common->slowpath_stop(cdev);
+       if (system_state == SYSTEM_POWER_OFF)
+               return;
        qed_ops->common->remove(cdev);
 
        dev_info(&pdev->dev, "Ending qede_remove successfully\n");
@@ -2648,6 +2983,11 @@ static void qede_remove(struct pci_dev *pdev)
        __qede_remove(pdev, QEDE_REMOVE_NORMAL);
 }
 
+static void qede_shutdown(struct pci_dev *pdev)
+{
+       __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
 /* -------------------------------------------------------------------------
  * START OF LOAD / UNLOAD
  * -------------------------------------------------------------------------
@@ -2731,7 +3071,7 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
                data = rx_buf->data;
 
                dma_unmap_page(&edev->pdev->dev,
-                              rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
+                              rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
 
                rx_buf->data = NULL;
                __free_page(data);
@@ -2747,7 +3087,7 @@ static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 
        for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
-               struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+               struct sw_rx_data *replace_buf = &tpa_info->buffer;
 
                if (replace_buf->data) {
                        dma_unmap_page(&edev->pdev->dev,
@@ -2773,52 +3113,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
        edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
 }
 
-static int qede_alloc_rx_buffer(struct qede_dev *edev,
-                               struct qede_rx_queue *rxq)
-{
-       struct sw_rx_data *sw_rx_data;
-       struct eth_rx_bd *rx_bd;
-       dma_addr_t mapping;
-       struct page *data;
-
-       data = alloc_pages(GFP_ATOMIC, 0);
-       if (unlikely(!data)) {
-               DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
-               return -ENOMEM;
-       }
-
-       /* Map the entire page as it would be used
-        * for multiple RX buffer segment size mapping.
-        */
-       mapping = dma_map_page(&edev->pdev->dev, data, 0,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
-               __free_page(data);
-               DP_NOTICE(edev, "Failed to map Rx buffer\n");
-               return -ENOMEM;
-       }
-
-       sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
-       sw_rx_data->page_offset = 0;
-       sw_rx_data->data = data;
-       sw_rx_data->mapping = mapping;
-
-       /* Advance PROD and get BD pointer */
-       rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
-       WARN_ON(!rx_bd);
-       rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
-       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
-
-       rxq->sw_rx_prod++;
-
-       return 0;
-}
-
 static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
        dma_addr_t mapping;
        int i;
 
+       /* Don't perform FW aggregations in case of XDP */
+       if (edev->xdp_prog)
+               edev->gro_disable = 1;
+
        if (edev->gro_disable)
                return 0;
 
@@ -2829,7 +3132,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 
        for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
                struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
-               struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
+               struct sw_rx_data *replace_buf = &tpa_info->buffer;
 
                replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
                if (unlikely(!replace_buf->data)) {
@@ -2847,10 +3150,9 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
                }
 
                replace_buf->mapping = mapping;
-               tpa_info->replace_buf.page_offset = 0;
-
-               tpa_info->replace_buf_mapping = mapping;
-               tpa_info->agg_state = QEDE_AGG_STATE_NONE;
+               tpa_info->buffer.page_offset = 0;
+               tpa_info->buffer_mapping = mapping;
+               tpa_info->state = QEDE_AGG_STATE_NONE;
        }
 
        return 0;
@@ -2872,8 +3174,13 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
        if (rxq->rx_buf_size > PAGE_SIZE)
                rxq->rx_buf_size = PAGE_SIZE;
 
-       /* Segment size to spilt a page in multiple equal parts */
-       rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+       /* Segment size to spilt a page in multiple equal parts,
+        * unless XDP is used in which case we'd use the entire page.
+        */
+       if (!edev->xdp_prog)
+               rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+       else
+               rxq->rx_buf_seg_size = PAGE_SIZE;
 
        /* Allocate the parallel driver ring for Rx buffers */
        size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
@@ -2909,7 +3216,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
 
        /* Allocate buffers for the Rx ring */
        for (i = 0; i < rxq->num_rx_buffers; i++) {
-               rc = qede_alloc_rx_buffer(edev, rxq);
+               rc = qede_alloc_rx_buffer(rxq);
                if (rc) {
                        DP_ERR(edev,
                               "Rx buffers allocation failed at index %d\n", i);
@@ -2925,7 +3232,10 @@ err:
 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
        /* Free the parallel SW ring */
-       kfree(txq->sw_tx_ring);
+       if (txq->is_xdp)
+               kfree(txq->sw_tx_ring.pages);
+       else
+               kfree(txq->sw_tx_ring.skbs);
 
        /* Free the real RQ ring used by FW */
        edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
@@ -2934,17 +3244,22 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 /* This function allocates all memory needed per Tx queue */
 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
-       int size, rc;
        union eth_tx_bd_types *p_virt;
+       int size, rc;
 
        txq->num_tx_buffers = edev->q_num_tx_buffers;
 
        /* Allocate the parallel driver ring for Tx buffers */
-       size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
-       txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
-       if (!txq->sw_tx_ring) {
-               DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
-               goto err;
+       if (txq->is_xdp) {
+               size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
+               txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
+               if (!txq->sw_tx_ring.pages)
+                       goto err;
+       } else {
+               size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
+               txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
+               if (!txq->sw_tx_ring.skbs)
+                       goto err;
        }
 
        rc = edev->ops->common->chain_alloc(edev->cdev,
@@ -2966,16 +3281,13 @@ err:
 /* This function frees all memory of a single fp */
 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
 {
-       int tc;
-
        qede_free_mem_sb(edev, fp->sb_info);
 
        if (fp->type & QEDE_FASTPATH_RX)
                qede_free_mem_rxq(edev, fp->rxq);
 
        if (fp->type & QEDE_FASTPATH_TX)
-               for (tc = 0; tc < edev->num_tc; tc++)
-                       qede_free_mem_txq(edev, &fp->txqs[tc]);
+               qede_free_mem_txq(edev, fp->txq);
 }
 
 /* This function allocates all memory needed for a single fp (i.e. an entity
@@ -2983,28 +3295,31 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
  */
 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
 {
-       int rc, tc;
+       int rc = 0;
 
        rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
        if (rc)
-               goto err;
+               goto out;
 
        if (fp->type & QEDE_FASTPATH_RX) {
                rc = qede_alloc_mem_rxq(edev, fp->rxq);
                if (rc)
-                       goto err;
+                       goto out;
+       }
+
+       if (fp->type & QEDE_FASTPATH_XDP) {
+               rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
+               if (rc)
+                       goto out;
        }
 
        if (fp->type & QEDE_FASTPATH_TX) {
-               for (tc = 0; tc < edev->num_tc; tc++) {
-                       rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]);
-                       if (rc)
-                               goto err;
-               }
+               rc = qede_alloc_mem_txq(edev, fp->txq);
+               if (rc)
+                       goto out;
        }
 
-       return 0;
-err:
+out:
        return rc;
 }
 
@@ -3043,7 +3358,7 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
 static void qede_init_fp(struct qede_dev *edev)
 {
-       int queue_id, rxq_index = 0, txq_index = 0, tc;
+       int queue_id, rxq_index = 0, txq_index = 0;
        struct qede_fastpath *fp;
 
        for_each_queue(queue_id) {
@@ -3052,25 +3367,28 @@ static void qede_init_fp(struct qede_dev *edev)
                fp->edev = edev;
                fp->id = queue_id;
 
-               memset((void *)&fp->napi, 0, sizeof(fp->napi));
-
-               memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info));
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
+                                                               rxq_index);
+                       fp->xdp_tx->is_xdp = 1;
+               }
 
                if (fp->type & QEDE_FASTPATH_RX) {
-                       memset((void *)fp->rxq, 0, sizeof(*fp->rxq));
                        fp->rxq->rxq_id = rxq_index++;
+
+                       /* Determine how to map buffers for this queue */
+                       if (fp->type & QEDE_FASTPATH_XDP)
+                               fp->rxq->data_direction = DMA_BIDIRECTIONAL;
+                       else
+                               fp->rxq->data_direction = DMA_FROM_DEVICE;
+                       fp->rxq->dev = &edev->pdev->dev;
                }
 
                if (fp->type & QEDE_FASTPATH_TX) {
-                       memset((void *)fp->txqs, 0,
-                              (edev->num_tc * sizeof(*fp->txqs)));
-                       for (tc = 0; tc < edev->num_tc; tc++) {
-                               fp->txqs[tc].index = txq_index +
-                                   tc * QEDE_TSS_COUNT(edev);
-                               if (edev->dev_info.is_legacy)
-                                       fp->txqs[tc].is_legacy = true;
-                       }
-                       txq_index++;
+                       fp->txq->index = txq_index++;
+                       if (edev->dev_info.is_legacy)
+                               fp->txq->is_legacy = 1;
+                       fp->txq->dev = &edev->pdev->dev;
                }
 
                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -3238,11 +3556,18 @@ static int qede_drain_txq(struct qede_dev *edev,
        return 0;
 }
 
+static int qede_stop_txq(struct qede_dev *edev,
+                        struct qede_tx_queue *txq, int rss_id)
+{
+       return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
+}
+
 static int qede_stop_queues(struct qede_dev *edev)
 {
        struct qed_update_vport_params vport_update_params;
        struct qed_dev *cdev = edev->cdev;
-       int rc, tc, i;
+       struct qede_fastpath *fp;
+       int rc, i;
 
        /* Disable the vport */
        memset(&vport_update_params, 0, sizeof(vport_update_params));
@@ -3259,53 +3584,49 @@ static int qede_stop_queues(struct qede_dev *edev)
 
        /* Flush Tx queues. If needed, request drain from MCP */
        for_each_queue(i) {
-               struct qede_fastpath *fp = &edev->fp_array[i];
+               fp = &edev->fp_array[i];
 
                if (fp->type & QEDE_FASTPATH_TX) {
-                       for (tc = 0; tc < edev->num_tc; tc++) {
-                               struct qede_tx_queue *txq = &fp->txqs[tc];
+                       rc = qede_drain_txq(edev, fp->txq, true);
+                       if (rc)
+                               return rc;
+               }
 
-                               rc = qede_drain_txq(edev, txq, true);
-                               if (rc)
-                                       return rc;
-                       }
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       rc = qede_drain_txq(edev, fp->xdp_tx, true);
+                       if (rc)
+                               return rc;
                }
        }
 
        /* Stop all Queues in reverse order */
        for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
-               struct qed_stop_rxq_params rx_params;
+               fp = &edev->fp_array[i];
 
                /* Stop the Tx Queue(s) */
-               if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
-                       for (tc = 0; tc < edev->num_tc; tc++) {
-                               struct qed_stop_txq_params tx_params;
-                               u8 val;
-
-                               tx_params.rss_id = i;
-                               val = edev->fp_array[i].txqs[tc].index;
-                               tx_params.tx_queue_id = val;
-                               rc = edev->ops->q_tx_stop(cdev, &tx_params);
-                               if (rc) {
-                                       DP_ERR(edev, "Failed to stop TXQ #%d\n",
-                                              tx_params.tx_queue_id);
-                                       return rc;
-                               }
-                       }
+               if (fp->type & QEDE_FASTPATH_TX) {
+                       rc = qede_stop_txq(edev, fp->txq, i);
+                       if (rc)
+                               return rc;
                }
 
                /* Stop the Rx Queue */
-               if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
-                       memset(&rx_params, 0, sizeof(rx_params));
-                       rx_params.rss_id = i;
-                       rx_params.rx_queue_id = edev->fp_array[i].rxq->rxq_id;
-
-                       rc = edev->ops->q_rx_stop(cdev, &rx_params);
+               if (fp->type & QEDE_FASTPATH_RX) {
+                       rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
                        if (rc) {
                                DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
                                return rc;
                        }
                }
+
+               /* Stop the XDP forwarding queue */
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       rc = qede_stop_txq(edev, fp->xdp_tx, i);
+                       if (rc)
+                               return rc;
+
+                       bpf_prog_put(fp->rxq->xdp_prog);
+               }
        }
 
        /* Stop the vport */
@@ -3316,9 +3637,55 @@ static int qede_stop_queues(struct qede_dev *edev)
        return rc;
 }
 
+static int qede_start_txq(struct qede_dev *edev,
+                         struct qede_fastpath *fp,
+                         struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
+{
+       dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
+       u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
+       struct qed_queue_start_common_params params;
+       struct qed_txq_start_ret_params ret_params;
+       int rc;
+
+       memset(&params, 0, sizeof(params));
+       memset(&ret_params, 0, sizeof(ret_params));
+
+       /* Let the XDP queue share the queue-zone with one of the regular txq.
+        * We don't really care about its coalescing.
+        */
+       if (txq->is_xdp)
+               params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
+       else
+               params.queue_id = txq->index;
+
+       params.sb = fp->sb_info->igu_sb_id;
+       params.sb_idx = sb_idx;
+
+       rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
+                                  page_cnt, &ret_params);
+       if (rc) {
+               DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
+               return rc;
+       }
+
+       txq->doorbell_addr = ret_params.p_doorbell;
+       txq->handle = ret_params.p_handle;
+
+       /* Determine the FW consumer address associated */
+       txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
+
+       /* Prepare the doorbell parameters */
+       SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
+       SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+       SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
+                 DQ_XCM_ETH_TX_BD_PROD_CMD);
+       txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+
+       return rc;
+}
+
 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 {
-       int rc, tc, i;
        int vlan_removal_en = 1;
        struct qed_dev *cdev = edev->cdev;
        struct qed_update_vport_params vport_update_params;
@@ -3326,6 +3693,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
        struct qed_dev_info *qed_info = &edev->dev_info.common;
        struct qed_start_vport_params start = {0};
        bool reset_rss_indir = false;
+       int rc, i;
 
        if (!edev->num_queues) {
                DP_ERR(edev,
@@ -3357,11 +3725,12 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
                u32 page_cnt;
 
                if (fp->type & QEDE_FASTPATH_RX) {
+                       struct qed_rxq_start_ret_params ret_params;
                        struct qede_rx_queue *rxq = fp->rxq;
                        __le16 *val;
 
+                       memset(&ret_params, 0, sizeof(ret_params));
                        memset(&q_params, 0, sizeof(q_params));
-                       q_params.rss_id = i;
                        q_params.queue_id = rxq->rxq_id;
                        q_params.vport_id = 0;
                        q_params.sb = fp->sb_info->igu_sb_id;
@@ -3371,60 +3740,44 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
                            qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
                        page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
 
-                       rc = edev->ops->q_rx_start(cdev, &q_params,
+                       rc = edev->ops->q_rx_start(cdev, i, &q_params,
                                                   rxq->rx_buf_size,
                                                   rxq->rx_bd_ring.p_phys_addr,
                                                   p_phys_table,
-                                                  page_cnt,
-                                                  &rxq->hw_rxq_prod_addr);
+                                                  page_cnt, &ret_params);
                        if (rc) {
                                DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
                                       rc);
                                return rc;
                        }
 
+                       /* Use the return parameters */
+                       rxq->hw_rxq_prod_addr = ret_params.p_prod;
+                       rxq->handle = ret_params.p_handle;
+
                        val = &fp->sb_info->sb_virt->pi_array[RX_PI];
                        rxq->hw_cons_ptr = val;
 
                        qede_update_rx_prod(edev, rxq);
                }
 
-               if (!(fp->type & QEDE_FASTPATH_TX))
-                       continue;
-
-               for (tc = 0; tc < edev->num_tc; tc++) {
-                       struct qede_tx_queue *txq = &fp->txqs[tc];
-
-                       p_phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
-                       page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
-
-                       memset(&q_params, 0, sizeof(q_params));
-                       q_params.rss_id = i;
-                       q_params.queue_id = txq->index;
-                       q_params.vport_id = 0;
-                       q_params.sb = fp->sb_info->igu_sb_id;
-                       q_params.sb_idx = TX_PI(tc);
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
+                       if (rc)
+                               return rc;
 
-                       rc = edev->ops->q_tx_start(cdev, &q_params,
-                                                  p_phys_table, page_cnt,
-                                                  &txq->doorbell_addr);
-                       if (rc) {
-                               DP_ERR(edev, "Start TXQ #%d failed %d\n",
-                                      txq->index, rc);
+                       fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
+                       if (IS_ERR(fp->rxq->xdp_prog)) {
+                               rc = PTR_ERR(fp->rxq->xdp_prog);
+                               fp->rxq->xdp_prog = NULL;
                                return rc;
                        }
+               }
 
-                       txq->hw_cons_ptr =
-                               &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
-                       SET_FIELD(txq->tx_db.data.params,
-                                 ETH_DB_DATA_DEST, DB_DEST_XCM);
-                       SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
-                                 DB_AGG_CMD_SET);
-                       SET_FIELD(txq->tx_db.data.params,
-                                 ETH_DB_DATA_AGG_VAL_SEL,
-                                 DQ_XCM_ETH_TX_BD_PROD_CMD);
-
-                       txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+               if (fp->type & QEDE_FASTPATH_TX) {
+                       rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
+                       if (rc)
+                               return rc;
                }
        }
 
@@ -3519,15 +3872,18 @@ enum qede_unload_mode {
        QEDE_UNLOAD_NORMAL,
 };
 
-static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
+                       bool is_locked)
 {
        struct qed_link_params link_params;
        int rc;
 
        DP_INFO(edev, "Starting qede unload\n");
 
+       if (!is_locked)
+               __qede_lock(edev);
+
        qede_roce_dev_event_close(edev);
-       mutex_lock(&edev->qede_lock);
        edev->state = QEDE_STATE_CLOSED;
 
        /* Close OS Tx */
@@ -3559,7 +3915,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
        qede_free_fp_array(edev);
 
 out:
-       mutex_unlock(&edev->qede_lock);
+       if (!is_locked)
+               __qede_unlock(edev);
        DP_INFO(edev, "Ending qede unload\n");
 }
 
@@ -3568,7 +3925,8 @@ enum qede_load_mode {
        QEDE_LOAD_RELOAD,
 };
 
-static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
+                    bool is_locked)
 {
        struct qed_link_params link_params;
        struct qed_link_output link_output;
@@ -3576,21 +3934,24 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
 
        DP_INFO(edev, "Starting qede load\n");
 
+       if (!is_locked)
+               __qede_lock(edev);
+
        rc = qede_set_num_queues(edev);
        if (rc)
-               goto err0;
+               goto out;
 
        rc = qede_alloc_fp_array(edev);
        if (rc)
-               goto err0;
+               goto out;
 
        qede_init_fp(edev);
 
        rc = qede_alloc_mem_load(edev);
        if (rc)
                goto err1;
-       DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
-               QEDE_QUEUE_CNT(edev), edev->num_tc);
+       DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
+               QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
 
        rc = qede_set_real_num_queues(edev);
        if (rc)
@@ -3612,10 +3973,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
        /* Add primary mac and set Rx filters */
        ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
 
-       mutex_lock(&edev->qede_lock);
-       edev->state = QEDE_STATE_OPEN;
-       mutex_unlock(&edev->qede_lock);
-
        /* Program un-configured VLANs */
        qede_configure_vlan_filters(edev);
 
@@ -3630,10 +3987,12 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
        qede_roce_dev_event_open(edev);
        qede_link_update(edev, &link_output);
 
+       edev->state = QEDE_STATE_OPEN;
+
        DP_INFO(edev, "Ending successfully qede load\n");
 
-       return 0;
 
+       goto out;
 err4:
        qede_sync_free_irqs(edev);
        memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
@@ -3647,26 +4006,40 @@ err1:
        edev->num_queues = 0;
        edev->fp_num_tx = 0;
        edev->fp_num_rx = 0;
-err0:
+out:
+       if (!is_locked)
+               __qede_unlock(edev);
+
        return rc;
 }
 
+/* 'func' should be able to run between unload and reload assuming interface
+ * is actually running, or afterwards in case it's currently DOWN.
+ */
 void qede_reload(struct qede_dev *edev,
-                void (*func)(struct qede_dev *, union qede_reload_args *),
-                union qede_reload_args *args)
+                struct qede_reload_args *args, bool is_locked)
 {
-       qede_unload(edev, QEDE_UNLOAD_NORMAL);
-       /* Call function handler to update parameters
-        * needed for function load.
-        */
-       if (func)
-               func(edev, args);
+       if (!is_locked)
+               __qede_lock(edev);
 
-       qede_load(edev, QEDE_LOAD_RELOAD);
+       /* Since qede_lock is held, internal state wouldn't change even
+        * if netdev state would start transitioning. Check whether current
+        * internal configuration indicates device is up, then reload.
+        */
+       if (edev->state == QEDE_STATE_OPEN) {
+               qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
+               if (args)
+                       args->func(edev, args);
+               qede_load(edev, QEDE_LOAD_RELOAD, true);
+
+               /* Since no one is going to do it for us, re-configure */
+               qede_config_rx_mode(edev->ndev);
+       } else if (args) {
+               args->func(edev, args);
+       }
 
-       mutex_lock(&edev->qede_lock);
-       qede_config_rx_mode(edev->ndev);
-       mutex_unlock(&edev->qede_lock);
+       if (!is_locked)
+               __qede_unlock(edev);
 }
 
 /* called with rtnl_lock */
@@ -3679,13 +4052,14 @@ static int qede_open(struct net_device *ndev)
 
        edev->ops->common->set_power_state(edev->cdev, PCI_D0);
 
-       rc = qede_load(edev, QEDE_LOAD_NORMAL);
-
+       rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
        if (rc)
                return rc;
 
        udp_tunnel_get_rx_info(ndev);
 
+       edev->ops->common->update_drv_state(edev->cdev, true);
+
        return 0;
 }
 
@@ -3693,7 +4067,9 @@ static int qede_close(struct net_device *ndev)
 {
        struct qede_dev *edev = netdev_priv(ndev);
 
-       qede_unload(edev, QEDE_UNLOAD_NORMAL);
+       qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
+
+       edev->ops->common->update_drv_state(edev->cdev, false);
 
        return 0;
 }
@@ -3755,6 +4131,8 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
        if (rc)
                return rc;
 
+       edev->ops->common->update_mac(edev->cdev, addr->sa_data);
+
        /* Add MAC filter according to the new unicast HW MAC address */
        ether_addr_copy(edev->primary_mac, ndev->dev_addr);
        return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
@@ -3821,15 +4199,8 @@ static void qede_set_rx_mode(struct net_device *ndev)
 {
        struct qede_dev *edev = netdev_priv(ndev);
 
-       DP_INFO(edev, "qede_set_rx_mode called\n");
-
-       if (edev->state != QEDE_STATE_OPEN) {
-               DP_INFO(edev,
-                       "qede_set_rx_mode called while interface is down\n");
-       } else {
-               set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
-               schedule_delayed_work(&edev->sp_task, 0);
-       }
+       set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
 }
 
 /* Must be called with qede_lock held */
@@ -3877,7 +4248,7 @@ static void qede_config_rx_mode(struct net_device *ndev)
 
        /* Check for promiscuous */
        if ((ndev->flags & IFF_PROMISC) ||
-           (uc_count > 15)) { /* @@@TBD resource allocation - 1 */
+           (uc_count > edev->dev_info.num_mac_filters - 1)) {
                accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
        } else {
                /* Add MAC filters according to the unicast secondary macs */