]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/net/ethernet/qlogic/qede/qede_main.c
bpf: xdp: Allow head adjustment in XDP prog
[karo-tx-linux.git] / drivers / net / ethernet / qlogic / qede / qede_main.c
index 83492117861539c3d58aa572c6a7eecf7e9166ab..aecdd1c5c0ea24a368085c0b2a41f5891ce55ac1 100644 (file)
@@ -94,10 +94,11 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
 
 #define TX_TIMEOUT             (5 * HZ)
 
+/* Utilize last protocol index for XDP */
+#define XDP_PI 11
+
 static void qede_remove(struct pci_dev *pdev);
 static void qede_shutdown(struct pci_dev *pdev);
-static int qede_alloc_rx_buffer(struct qede_dev *edev,
-                               struct qede_rx_queue *rxq);
 static void qede_link_update(void *dev, struct qed_link_output *link);
 
 /* The qede lock is used to protect driver state change and driver flows that
@@ -303,12 +304,12 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
                            struct qede_tx_queue *txq, int *len)
 {
        u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
-       struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+       struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
        struct eth_tx_1st_bd *first_bd;
        struct eth_tx_bd *tx_data_bd;
        int bds_consumed = 0;
        int nbds;
-       bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
+       bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
        int i, split_bd_len = 0;
 
        if (unlikely(!skb)) {
@@ -348,20 +349,19 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
 
        /* Free skb */
        dev_kfree_skb_any(skb);
-       txq->sw_tx_ring[idx].skb = NULL;
-       txq->sw_tx_ring[idx].flags = 0;
+       txq->sw_tx_ring.skbs[idx].skb = NULL;
+       txq->sw_tx_ring.skbs[idx].flags = 0;
 
        return 0;
 }
 
 /* Unmap the data and free skb when mapping failed during start_xmit */
-static void qede_free_failed_tx_pkt(struct qede_dev *edev,
-                                   struct qede_tx_queue *txq,
+static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
                                    struct eth_tx_1st_bd *first_bd,
                                    int nbd, bool data_split)
 {
        u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
-       struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
+       struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
        struct eth_tx_bd *tx_data_bd;
        int i, split_bd_len = 0;
 
@@ -378,7 +378,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
                nbd--;
        }
 
-       dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+       dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
                         BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
 
        /* Unmap the data of the skb frags */
@@ -386,7 +386,7 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
                tx_data_bd = (struct eth_tx_bd *)
                        qed_chain_produce(&txq->tx_pbl);
                if (tx_data_bd->nbytes)
-                       dma_unmap_page(&edev->pdev->dev,
+                       dma_unmap_page(txq->dev,
                                       BD_UNMAP_ADDR(tx_data_bd),
                                       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
        }
@@ -397,12 +397,11 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
 
        /* Free skb */
        dev_kfree_skb_any(skb);
-       txq->sw_tx_ring[idx].skb = NULL;
-       txq->sw_tx_ring[idx].flags = 0;
+       txq->sw_tx_ring.skbs[idx].skb = NULL;
+       txq->sw_tx_ring.skbs[idx].flags = 0;
 }
 
-static u32 qede_xmit_type(struct qede_dev *edev,
-                         struct sk_buff *skb, int *ipv6_ext)
+static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
 {
        u32 rc = XMIT_L4_CSUM;
        __be16 l3_proto;
@@ -469,18 +468,16 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
        second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
 }
 
-static int map_frag_to_bd(struct qede_dev *edev,
+static int map_frag_to_bd(struct qede_tx_queue *txq,
                          skb_frag_t *frag, struct eth_tx_bd *bd)
 {
        dma_addr_t mapping;
 
        /* Map skb non-linear frag data for DMA */
-       mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0,
+       mapping = skb_frag_dma_map(txq->dev, frag, 0,
                                   skb_frag_size(frag), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
-               DP_NOTICE(edev, "Unable to map frag - dropping packet\n");
+       if (unlikely(dma_mapping_error(txq->dev, mapping)))
                return -ENOMEM;
-       }
 
        /* Setup the data pointer of the frag data */
        BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
@@ -500,8 +497,7 @@ static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
 
 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
-                            u8 xmit_type)
+static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
 {
        int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
 
@@ -537,6 +533,47 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
        mmiowb();
 }
 
+static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
+                        struct sw_rx_data *metadata, u16 padding, u16 length)
+{
+       struct qede_tx_queue *txq = fp->xdp_tx;
+       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       struct eth_tx_1st_bd *first_bd;
+
+       if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+               txq->stopped_cnt++;
+               return -ENOMEM;
+       }
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+       memset(first_bd, 0, sizeof(*first_bd));
+       first_bd->data.bd_flags.bitfields =
+           BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
+       first_bd->data.bitfields |=
+           (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+           ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+       first_bd->data.nbds = 1;
+
+       /* We can safely ignore the offset, as it's 0 for XDP */
+       BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+
+       /* Synchronize the buffer back to device, as program [probably]
+        * has changed it.
+        */
+       dma_sync_single_for_device(&edev->pdev->dev,
+                                  metadata->mapping + padding,
+                                  length, PCI_DMA_TODEVICE);
+
+       txq->sw_tx_ring.pages[idx] = metadata->data;
+       txq->sw_tx_prod++;
+
+       /* Mark the fastpath for future XDP doorbell */
+       fp->xdp_xmit = 1;
+
+       return 0;
+}
+
 /* Main transmit function */
 static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                                   struct net_device *ndev)
@@ -565,10 +602,10 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
 
        WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
 
-       xmit_type = qede_xmit_type(edev, skb, &ipv6_ext);
+       xmit_type = qede_xmit_type(skb, &ipv6_ext);
 
 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-       if (qede_pkt_req_lin(edev, skb, xmit_type)) {
+       if (qede_pkt_req_lin(skb, xmit_type)) {
                if (skb_linearize(skb)) {
                        DP_NOTICE(edev,
                                  "SKB linearization failed - silently dropping this SKB\n");
@@ -580,7 +617,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
 
        /* Fill the entry in the SW ring and the BDs in the FW ring */
        idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
-       txq->sw_tx_ring[idx].skb = skb;
+       txq->sw_tx_ring.skbs[idx].skb = skb;
        first_bd = (struct eth_tx_1st_bd *)
                   qed_chain_produce(&txq->tx_pbl);
        memset(first_bd, 0, sizeof(*first_bd));
@@ -588,11 +625,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
 
        /* Map skb linear data for DMA and set in the first BD */
-       mapping = dma_map_single(&edev->pdev->dev, skb->data,
+       mapping = dma_map_single(txq->dev, skb->data,
                                 skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+       if (unlikely(dma_mapping_error(txq->dev, mapping))) {
                DP_NOTICE(edev, "SKB mapping failed\n");
-               qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+               qede_free_failed_tx_pkt(txq, first_bd, 0, false);
                qede_update_tx_producer(txq);
                return NETDEV_TX_OK;
        }
@@ -700,7 +737,7 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
                        /* this marks the BD as one that has no
                         * individual mapping
                         */
-                       txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
+                       txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
 
                        first_bd->nbytes = cpu_to_le16(hlen);
 
@@ -716,12 +753,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
        /* Handle fragmented skb */
        /* special handle for frags inside 2nd and 3rd bds.. */
        while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
-               rc = map_frag_to_bd(edev,
+               rc = map_frag_to_bd(txq,
                                    &skb_shinfo(skb)->frags[frag_idx],
                                    tx_data_bd);
                if (rc) {
-                       qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
-                                               data_split);
+                       qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
                        qede_update_tx_producer(txq);
                        return NETDEV_TX_OK;
                }
@@ -741,12 +777,11 @@ static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
 
                memset(tx_data_bd, 0, sizeof(*tx_data_bd));
 
-               rc = map_frag_to_bd(edev,
+               rc = map_frag_to_bd(txq,
                                    &skb_shinfo(skb)->frags[frag_idx],
                                    tx_data_bd);
                if (rc) {
-                       qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
-                                               data_split);
+                       qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
                        qede_update_tx_producer(txq);
                        return NETDEV_TX_OK;
                }
@@ -811,6 +846,27 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
        return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
 }
 
+static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+       struct eth_tx_1st_bd *bd;
+       u16 hw_bd_cons;
+
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       barrier();
+
+       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+               bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+               dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
+                                PAGE_SIZE, DMA_BIDIRECTIONAL);
+               __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
+                                                 NUM_TX_BDS_MAX]);
+
+               txq->sw_tx_cons++;
+               txq->xmit_pkts++;
+       }
+}
+
 static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
        struct netdev_queue *netdev_txq;
@@ -903,8 +959,7 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
 /* This function reuses the buffer(from an offset) from
  * consumer index to producer index in the bd ring
  */
-static inline void qede_reuse_page(struct qede_dev *edev,
-                                  struct qede_rx_queue *rxq,
+static inline void qede_reuse_page(struct qede_rx_queue *rxq,
                                   struct sw_rx_data *curr_cons)
 {
        struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
@@ -926,27 +981,62 @@ static inline void qede_reuse_page(struct qede_dev *edev,
 /* In case of allocation failures reuse buffers
  * from consumer index to produce buffers for firmware
  */
-void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
-                            struct qede_dev *edev, u8 count)
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
 {
        struct sw_rx_data *curr_cons;
 
        for (; count > 0; count--) {
                curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
-               qede_reuse_page(edev, rxq, curr_cons);
+               qede_reuse_page(rxq, curr_cons);
                qede_rx_bd_ring_consume(rxq);
        }
 }
 
-static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
-                                        struct qede_rx_queue *rxq,
+static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+{
+       struct sw_rx_data *sw_rx_data;
+       struct eth_rx_bd *rx_bd;
+       dma_addr_t mapping;
+       struct page *data;
+
+       data = alloc_pages(GFP_ATOMIC, 0);
+       if (unlikely(!data))
+               return -ENOMEM;
+
+       /* Map the entire page as it would be used
+        * for multiple RX buffer segment size mapping.
+        */
+       mapping = dma_map_page(rxq->dev, data, 0,
+                              PAGE_SIZE, rxq->data_direction);
+       if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
+               __free_page(data);
+               return -ENOMEM;
+       }
+
+       sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+       sw_rx_data->page_offset = 0;
+       sw_rx_data->data = data;
+       sw_rx_data->mapping = mapping;
+
+       /* Advance PROD and get BD pointer */
+       rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+       WARN_ON(!rx_bd);
+       rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+       rxq->sw_rx_prod++;
+
+       return 0;
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
                                         struct sw_rx_data *curr_cons)
 {
        /* Move to the next segment in the page */
        curr_cons->page_offset += rxq->rx_buf_seg_size;
 
        if (curr_cons->page_offset == PAGE_SIZE) {
-               if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
+               if (unlikely(qede_alloc_rx_buffer(rxq))) {
                        /* Since we failed to allocate new buffer
                         * current buffer can be used again.
                         */
@@ -955,15 +1045,15 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
                        return -ENOMEM;
                }
 
-               dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
+               dma_unmap_page(rxq->dev, curr_cons->mapping,
+                              PAGE_SIZE, rxq->data_direction);
        } else {
                /* Increment refcount of the page as we don't want
                 * network stack to take the ownership of the page
                 * which can be recycled multiple times by the driver.
                 */
                page_ref_inc(curr_cons->data);
-               qede_reuse_page(edev, rxq, curr_cons);
+               qede_reuse_page(rxq, curr_cons);
        }
 
        return 0;
@@ -997,22 +1087,20 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
        mmiowb();
 }
 
-static u32 qede_get_rxhash(struct qede_dev *edev,
-                          u8 bitfields,
-                          __le32 rss_hash, enum pkt_hash_types *rxhash_type)
+static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
 {
+       enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
        enum rss_hash_type htype;
+       u32 hash = 0;
 
        htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
-
-       if ((edev->ndev->features & NETIF_F_RXHASH) && htype) {
-               *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
-                               (htype == RSS_HASH_TYPE_IPV6)) ?
-                               PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
-               return le32_to_cpu(rss_hash);
+       if (htype) {
+               hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+                            (htype == RSS_HASH_TYPE_IPV6)) ?
+                           PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+               hash = le32_to_cpu(rss_hash);
        }
-       *rxhash_type = PKT_HASH_TYPE_NONE;
-       return 0;
+       skb_set_hash(skb, hash, hash_type);
 }
 
 static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
@@ -1028,6 +1116,7 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
 
 static inline void qede_skb_receive(struct qede_dev *edev,
                                    struct qede_fastpath *fp,
+                                   struct qede_rx_queue *rxq,
                                    struct sk_buff *skb, u16 vlan_tag)
 {
        if (vlan_tag)
@@ -1070,7 +1159,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
                           current_bd->data, current_bd->page_offset,
                           len_on_bd);
 
-       if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
+       if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
                /* Incr page ref count to reuse on allocation failure
                 * so that it doesn't get freed while freeing SKB.
                 */
@@ -1089,7 +1178,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
 
 out:
        tpa_info->state = QEDE_AGG_STATE_ERROR;
-       qede_recycle_rx_bd_ring(rxq, edev, 1);
+       qede_recycle_rx_bd_ring(rxq, 1);
+
        return -ENOMEM;
 }
 
@@ -1104,8 +1194,6 @@ static void qede_tpa_start(struct qede_dev *edev,
        dma_addr_t mapping = tpa_info->buffer_mapping;
        struct sw_rx_data *sw_rx_data_cons;
        struct sw_rx_data *sw_rx_data_prod;
-       enum pkt_hash_types rxhash_type;
-       u32 rxhash;
 
        sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
        sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
@@ -1150,10 +1238,6 @@ static void qede_tpa_start(struct qede_dev *edev,
        tpa_info->frag_id = 0;
        tpa_info->state = QEDE_AGG_STATE_START;
 
-       rxhash = qede_get_rxhash(edev, cqe->bitfields,
-                                cqe->rss_hash, &rxhash_type);
-       skb_set_hash(tpa_info->skb, rxhash, rxhash_type);
-
        /* Store some information from first CQE */
        tpa_info->start_cqe_placement_offset = cqe->placement_offset;
        tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
@@ -1164,6 +1248,8 @@ static void qede_tpa_start(struct qede_dev *edev,
        else
                tpa_info->vlan_tag = 0;
 
+       qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
+
        /* This is needed in order to enable forwarding support */
        qede_set_gro_params(edev, tpa_info->skb, cqe);
 
@@ -1226,7 +1312,7 @@ static void qede_gro_receive(struct qede_dev *edev,
 
 #ifdef CONFIG_INET
        if (skb_shinfo(skb)->gso_size) {
-               skb_set_network_header(skb, 0);
+               skb_reset_network_header(skb);
 
                switch (skb->protocol) {
                case htons(ETH_P_IP):
@@ -1245,7 +1331,7 @@ static void qede_gro_receive(struct qede_dev *edev,
 
 send_skb:
        skb_record_rx_queue(skb, fp->rxq->rxq_id);
-       qede_skb_receive(edev, fp, skb, vlan_tag);
+       qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
 }
 
 static inline void qede_tpa_cont(struct qede_dev *edev,
@@ -1397,6 +1483,66 @@ static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
        return false;
 }
 
+/* Return true iff packet is to be passed to stack */
+static bool qede_rx_xdp(struct qede_dev *edev,
+                       struct qede_fastpath *fp,
+                       struct qede_rx_queue *rxq,
+                       struct bpf_prog *prog,
+                       struct sw_rx_data *bd,
+                       struct eth_fast_path_rx_reg_cqe *cqe)
+{
+       u16 len = le16_to_cpu(cqe->len_on_first_bd);
+       struct xdp_buff xdp;
+       enum xdp_action act;
+
+       xdp.data = page_address(bd->data) + cqe->placement_offset;
+       xdp.data_end = xdp.data + len;
+
+       /* Queues always have a full reset currently, so for the time
+        * being until there's atomic program replace just mark read
+        * side for map helpers.
+        */
+       rcu_read_lock();
+       act = bpf_prog_run_xdp(prog, &xdp);
+       rcu_read_unlock();
+
+       if (act == XDP_PASS)
+               return true;
+
+       /* Count number of packets not to be passed to stack */
+       rxq->xdp_no_pass++;
+
+       switch (act) {
+       case XDP_TX:
+               /* We need the replacement buffer before transmit. */
+               if (qede_alloc_rx_buffer(rxq)) {
+                       qede_recycle_rx_bd_ring(rxq, 1);
+                       return false;
+               }
+
+               /* Now if there's a transmission problem, we'd still have to
+                * throw current buffer, as replacement was already allocated.
+                */
+               if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
+                       dma_unmap_page(rxq->dev, bd->mapping,
+                                      PAGE_SIZE, DMA_BIDIRECTIONAL);
+                       __free_page(bd->data);
+               }
+
+               /* Regardless, we've consumed an Rx BD */
+               qede_rx_bd_ring_consume(rxq);
+               return false;
+
+       default:
+               bpf_warn_invalid_xdp_action(act);
+       case XDP_ABORTED:
+       case XDP_DROP:
+               qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
+       }
+
+       return false;
+}
+
 static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
                                            struct qede_rx_queue *rxq,
                                            struct sw_rx_data *bd, u16 len,
@@ -1420,7 +1566,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
        if (len + pad <= edev->rx_copybreak) {
                memcpy(skb_put(skb, len),
                       page_address(page) + pad + offset, len);
-               qede_reuse_page(edev, rxq, bd);
+               qede_reuse_page(rxq, bd);
                goto out;
        }
 
@@ -1441,7 +1587,7 @@ static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
        skb->data_len -= pull_len;
        skb->tail += pull_len;
 
-       if (unlikely(qede_realloc_rx_buffer(edev, rxq, bd))) {
+       if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
                /* Incr page ref count to reuse on allocation failure so
                 * that it doesn't get freed while freeing SKB [as its
                 * already mapped there].
@@ -1483,7 +1629,7 @@ static int qede_rx_build_jumbo(struct qede_dev *edev,
                }
 
                /* We need a replacement buffer for each BD */
-               if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
+               if (unlikely(qede_alloc_rx_buffer(rxq)))
                        goto out;
 
                /* Now that we've allocated the replacement buffer,
@@ -1493,7 +1639,7 @@ static int qede_rx_build_jumbo(struct qede_dev *edev,
                bd = &rxq->sw_rx_ring[bd_cons_idx];
                qede_rx_bd_ring_consume(rxq);
 
-               dma_unmap_page(&edev->pdev->dev, bd->mapping,
+               dma_unmap_page(rxq->dev, bd->mapping,
                               PAGE_SIZE, DMA_FROM_DEVICE);
 
                skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
@@ -1539,16 +1685,15 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
                               struct qede_fastpath *fp,
                               struct qede_rx_queue *rxq)
 {
+       struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
        struct eth_fast_path_rx_reg_cqe *fp_cqe;
        u16 len, pad, bd_cons_idx, parse_flag;
-       enum pkt_hash_types rxhash_type;
        enum eth_rx_cqe_type cqe_type;
        union eth_rx_cqe *cqe;
        struct sw_rx_data *bd;
        struct sk_buff *skb;
        __le16 flags;
        u8 csum_flag;
-       u32 rx_hash;
 
        /* Get the CQE from the completion ring */
        cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
@@ -1577,6 +1722,11 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
        len = le16_to_cpu(fp_cqe->len_on_first_bd);
        pad = fp_cqe->placement_offset;
 
+       /* Run eBPF program if one is attached */
+       if (xdp_prog)
+               if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
+                       return 1;
+
        /* If this is an error packet then drop it */
        flags = cqe->fast_path_regular.pars_flags.flags;
        parse_flag = le16_to_cpu(flags);
@@ -1590,7 +1740,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
                                  "CQE has error, flags = %x, dropping incoming packet\n",
                                  parse_flag);
                        rxq->rx_hw_errors++;
-                       qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+                       qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
                        return 0;
                }
        }
@@ -1601,7 +1751,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
        skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
        if (!skb) {
                rxq->rx_alloc_errors++;
-               qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
+               qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
                return 0;
        }
 
@@ -1613,7 +1763,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
                                                         fp_cqe, len);
 
                if (unlikely(unmapped_frags > 0)) {
-                       qede_recycle_rx_bd_ring(rxq, edev, unmapped_frags);
+                       qede_recycle_rx_bd_ring(rxq, unmapped_frags);
                        dev_kfree_skb_any(skb);
                        return 0;
                }
@@ -1621,14 +1771,12 @@ static int qede_rx_process_cqe(struct qede_dev *edev,
 
        /* The SKB contains all the data. Now prepare meta-magic */
        skb->protocol = eth_type_trans(skb, edev->ndev);
-       rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields,
-                                 fp_cqe->rss_hash, &rxhash_type);
-       skb_set_hash(skb, rx_hash, rxhash_type);
+       qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
        qede_set_skb_csum(skb, csum_flag);
        skb_record_rx_queue(skb, rxq->rxq_id);
 
        /* SKB is prepared - pass it to stack */
-       qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
+       qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
 
        return 1;
 }
@@ -1684,6 +1832,10 @@ static bool qede_poll_is_more_work(struct qede_fastpath *fp)
                if (qede_has_rx_work(fp->rxq))
                        return true;
 
+       if (fp->type & QEDE_FASTPATH_XDP)
+               if (qede_txq_has_work(fp->xdp_tx))
+                       return true;
+
        if (likely(fp->type & QEDE_FASTPATH_TX))
                if (qede_txq_has_work(fp->txq))
                        return true;
@@ -1701,6 +1853,9 @@ static int qede_poll(struct napi_struct *napi, int budget)
        if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
                qede_tx_int(edev, fp->txq);
 
+       if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
+               qede_xdp_tx_int(edev, fp->xdp_tx);
+
        rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
                        qede_has_rx_work(fp->rxq)) ?
                        qede_rx_int(fp, budget) : 0;
@@ -1715,6 +1870,14 @@ static int qede_poll(struct napi_struct *napi, int budget)
                }
        }
 
+       if (fp->xdp_xmit) {
+               u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+
+               fp->xdp_xmit = 0;
+               fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+               qede_update_tx_producer(fp->xdp_tx);
+       }
+
        return rx_work_done;
 }
 
@@ -2209,7 +2372,16 @@ int qede_set_features(struct net_device *dev, netdev_features_t features)
                args.u.features = features;
                args.func = &qede_set_features_reload;
 
-               qede_reload(edev, &args, false);
+               /* Make sure that we definitely need to reload.
+                * In case of an eBPF attached program, there will be no FW
+                * aggregations, so no need to actually reload.
+                */
+               __qede_lock(edev);
+               if (edev->xdp_prog)
+                       args.func(edev, &args);
+               else
+                       qede_reload(edev, &args, true);
+               __qede_unlock(edev);
 
                return 1;
        }
@@ -2321,6 +2493,48 @@ static netdev_features_t qede_features_check(struct sk_buff *skb,
        return features;
 }
 
+static void qede_xdp_reload_func(struct qede_dev *edev,
+                                struct qede_reload_args *args)
+{
+       struct bpf_prog *old;
+
+       old = xchg(&edev->xdp_prog, args->u.new_prog);
+       if (old)
+               bpf_prog_put(old);
+}
+
+static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
+{
+       struct qede_reload_args args;
+
+       if (prog && prog->xdp_adjust_head) {
+               DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* If we're called, there was already a bpf reference increment */
+       args.func = &qede_xdp_reload_func;
+       args.u.new_prog = prog;
+       qede_reload(edev, &args, false);
+
+       return 0;
+}
+
+static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               return qede_xdp_set(edev, xdp->prog);
+       case XDP_QUERY_PROG:
+               xdp->prog_attached = !!edev->xdp_prog;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
 static const struct net_device_ops qede_netdev_ops = {
        .ndo_open = qede_open,
        .ndo_stop = qede_close,
@@ -2346,6 +2560,7 @@ static const struct net_device_ops qede_netdev_ops = {
        .ndo_udp_tunnel_add = qede_udp_tunnel_add,
        .ndo_udp_tunnel_del = qede_udp_tunnel_del,
        .ndo_features_check = qede_features_check,
+       .ndo_xdp = qede_xdp,
 };
 
 /* -------------------------------------------------------------------------
@@ -2483,6 +2698,7 @@ static void qede_free_fp_array(struct qede_dev *edev)
 
                        kfree(fp->sb_info);
                        kfree(fp->rxq);
+                       kfree(fp->xdp_tx);
                        kfree(fp->txq);
                }
                kfree(edev->fp_array);
@@ -2542,6 +2758,14 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
                        fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
                        if (!fp->rxq)
                                goto err;
+
+                       if (edev->xdp_prog) {
+                               fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
+                                                    GFP_KERNEL);
+                               if (!fp->xdp_tx)
+                                       goto err;
+                               fp->type |= QEDE_FASTPATH_XDP;
+                       }
                }
        }
 
@@ -2588,9 +2812,9 @@ static void qede_update_pf_params(struct qed_dev *cdev)
 {
        struct qed_pf_params pf_params;
 
-       /* 64 rx + 64 tx */
+       /* 64 rx + 64 tx + 64 XDP */
        memset(&pf_params, 0, sizeof(struct qed_pf_params));
-       pf_params.eth_pf_params.num_cons = 128;
+       pf_params.eth_pf_params.num_cons = 192;
        qed_ops->common->update_pf_params(cdev, &pf_params);
 }
 
@@ -2739,6 +2963,10 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
 
        pci_set_drvdata(pdev, NULL);
 
+       /* Release edev's reference to XDP's bpf if such exist */
+       if (edev->xdp_prog)
+               bpf_prog_put(edev->xdp_prog);
+
        free_netdev(ndev);
 
        /* Use global ops since we've freed edev */
@@ -2843,7 +3071,7 @@ static void qede_free_rx_buffers(struct qede_dev *edev,
                data = rx_buf->data;
 
                dma_unmap_page(&edev->pdev->dev,
-                              rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
+                              rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
 
                rx_buf->data = NULL;
                __free_page(data);
@@ -2885,52 +3113,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
        edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
 }
 
-static int qede_alloc_rx_buffer(struct qede_dev *edev,
-                               struct qede_rx_queue *rxq)
-{
-       struct sw_rx_data *sw_rx_data;
-       struct eth_rx_bd *rx_bd;
-       dma_addr_t mapping;
-       struct page *data;
-
-       data = alloc_pages(GFP_ATOMIC, 0);
-       if (unlikely(!data)) {
-               DP_NOTICE(edev, "Failed to allocate Rx data [page]\n");
-               return -ENOMEM;
-       }
-
-       /* Map the entire page as it would be used
-        * for multiple RX buffer segment size mapping.
-        */
-       mapping = dma_map_page(&edev->pdev->dev, data, 0,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
-               __free_page(data);
-               DP_NOTICE(edev, "Failed to map Rx buffer\n");
-               return -ENOMEM;
-       }
-
-       sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
-       sw_rx_data->page_offset = 0;
-       sw_rx_data->data = data;
-       sw_rx_data->mapping = mapping;
-
-       /* Advance PROD and get BD pointer */
-       rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
-       WARN_ON(!rx_bd);
-       rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
-       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
-
-       rxq->sw_rx_prod++;
-
-       return 0;
-}
-
 static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
        dma_addr_t mapping;
        int i;
 
+       /* Don't perform FW aggregations in case of XDP */
+       if (edev->xdp_prog)
+               edev->gro_disable = 1;
+
        if (edev->gro_disable)
                return 0;
 
@@ -2983,8 +3174,13 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
        if (rxq->rx_buf_size > PAGE_SIZE)
                rxq->rx_buf_size = PAGE_SIZE;
 
-       /* Segment size to spilt a page in multiple equal parts */
-       rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+       /* Segment size to spilt a page in multiple equal parts,
+        * unless XDP is used in which case we'd use the entire page.
+        */
+       if (!edev->xdp_prog)
+               rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
+       else
+               rxq->rx_buf_seg_size = PAGE_SIZE;
 
        /* Allocate the parallel driver ring for Rx buffers */
        size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
@@ -3020,7 +3216,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
 
        /* Allocate buffers for the Rx ring */
        for (i = 0; i < rxq->num_rx_buffers; i++) {
-               rc = qede_alloc_rx_buffer(edev, rxq);
+               rc = qede_alloc_rx_buffer(rxq);
                if (rc) {
                        DP_ERR(edev,
                               "Rx buffers allocation failed at index %d\n", i);
@@ -3036,7 +3232,10 @@ err:
 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
        /* Free the parallel SW ring */
-       kfree(txq->sw_tx_ring);
+       if (txq->is_xdp)
+               kfree(txq->sw_tx_ring.pages);
+       else
+               kfree(txq->sw_tx_ring.skbs);
 
        /* Free the real RQ ring used by FW */
        edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
@@ -3045,17 +3244,22 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 /* This function allocates all memory needed per Tx queue */
 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
-       int size, rc;
        union eth_tx_bd_types *p_virt;
+       int size, rc;
 
        txq->num_tx_buffers = edev->q_num_tx_buffers;
 
        /* Allocate the parallel driver ring for Tx buffers */
-       size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
-       txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
-       if (!txq->sw_tx_ring) {
-               DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
-               goto err;
+       if (txq->is_xdp) {
+               size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
+               txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
+               if (!txq->sw_tx_ring.pages)
+                       goto err;
+       } else {
+               size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
+               txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
+               if (!txq->sw_tx_ring.skbs)
+                       goto err;
        }
 
        rc = edev->ops->common->chain_alloc(edev->cdev,
@@ -3091,26 +3295,31 @@ static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
  */
 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
 {
-       int rc;
+       int rc = 0;
 
        rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
        if (rc)
-               goto err;
+               goto out;
 
        if (fp->type & QEDE_FASTPATH_RX) {
                rc = qede_alloc_mem_rxq(edev, fp->rxq);
                if (rc)
-                       goto err;
+                       goto out;
+       }
+
+       if (fp->type & QEDE_FASTPATH_XDP) {
+               rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
+               if (rc)
+                       goto out;
        }
 
        if (fp->type & QEDE_FASTPATH_TX) {
                rc = qede_alloc_mem_txq(edev, fp->txq);
                if (rc)
-                       goto err;
+                       goto out;
        }
 
-       return 0;
-err:
+out:
        return rc;
 }
 
@@ -3158,15 +3367,28 @@ static void qede_init_fp(struct qede_dev *edev)
                fp->edev = edev;
                fp->id = queue_id;
 
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
+                                                               rxq_index);
+                       fp->xdp_tx->is_xdp = 1;
+               }
 
                if (fp->type & QEDE_FASTPATH_RX) {
                        fp->rxq->rxq_id = rxq_index++;
+
+                       /* Determine how to map buffers for this queue */
+                       if (fp->type & QEDE_FASTPATH_XDP)
+                               fp->rxq->data_direction = DMA_BIDIRECTIONAL;
+                       else
+                               fp->rxq->data_direction = DMA_FROM_DEVICE;
+                       fp->rxq->dev = &edev->pdev->dev;
                }
 
                if (fp->type & QEDE_FASTPATH_TX) {
                        fp->txq->index = txq_index++;
                        if (edev->dev_info.is_legacy)
                                fp->txq->is_legacy = 1;
+                       fp->txq->dev = &edev->pdev->dev;
                }
 
                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -3369,6 +3591,12 @@ static int qede_stop_queues(struct qede_dev *edev)
                        if (rc)
                                return rc;
                }
+
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       rc = qede_drain_txq(edev, fp->xdp_tx, true);
+                       if (rc)
+                               return rc;
+               }
        }
 
        /* Stop all Queues in reverse order */
@@ -3390,6 +3618,15 @@ static int qede_stop_queues(struct qede_dev *edev)
                                return rc;
                        }
                }
+
+               /* Stop the XDP forwarding queue */
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       rc = qede_stop_txq(edev, fp->xdp_tx, i);
+                       if (rc)
+                               return rc;
+
+                       bpf_prog_put(fp->rxq->xdp_prog);
+               }
        }
 
        /* Stop the vport */
@@ -3413,7 +3650,14 @@ static int qede_start_txq(struct qede_dev *edev,
        memset(&params, 0, sizeof(params));
        memset(&ret_params, 0, sizeof(ret_params));
 
-       params.queue_id = txq->index;
+       /* Let the XDP queue share the queue-zone with one of the regular txq.
+        * We don't really care about its coalescing.
+        */
+       if (txq->is_xdp)
+               params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
+       else
+               params.queue_id = txq->index;
+
        params.sb = fp->sb_info->igu_sb_id;
        params.sb_idx = sb_idx;
 
@@ -3517,6 +3761,19 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
                        qede_update_rx_prod(edev, rxq);
                }
 
+               if (fp->type & QEDE_FASTPATH_XDP) {
+                       rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
+                       if (rc)
+                               return rc;
+
+                       fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
+                       if (IS_ERR(fp->rxq->xdp_prog)) {
+                               rc = PTR_ERR(fp->rxq->xdp_prog);
+                               fp->rxq->xdp_prog = NULL;
+                               return rc;
+                       }
+               }
+
                if (fp->type & QEDE_FASTPATH_TX) {
                        rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
                        if (rc)