]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/net/ethernet/intel/fm10k/fm10k_main.c
Merge remote-tracking branch 'sound-current/for-linus'
[karo-tx-linux.git] / drivers / net / ethernet / intel / fm10k / fm10k_main.c
index b5b2925103ec10c1e835429c2ab7d2efee093838..e76a44cf330cd47d57084a05fd69611985a79e15 100644 (file)
@@ -398,6 +398,8 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
                return;
 
        skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       ring->rx_stats.csum_good++;
 }
 
 #define FM10K_RSS_L4_TYPES_MASK \
@@ -497,8 +499,11 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
        if (rx_desc->w.vlan) {
                u16 vid = le16_to_cpu(rx_desc->w.vlan);
 
-               if (vid != rx_ring->vid)
+               if ((vid & VLAN_VID_MASK) != rx_ring->vid)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+               else if (vid & VLAN_PRIO_MASK)
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                              vid & VLAN_PRIO_MASK);
        }
 
        fm10k_type_trans(rx_ring, rx_desc, skb);
@@ -553,6 +558,18 @@ static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
 {
        if (unlikely((fm10k_test_staterr(rx_desc,
                                         FM10K_RXD_STATUS_RXE)))) {
+#define FM10K_TEST_RXD_BIT(rxd, bit) \
+       ((rxd)->w.csum_err & cpu_to_le16(bit))
+               if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR))
+                       rx_ring->rx_stats.switch_errors++;
+               if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR))
+                       rx_ring->rx_stats.drops++;
+               if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR))
+                       rx_ring->rx_stats.pp_errors++;
+               if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY))
+                       rx_ring->rx_stats.link_errors++;
+               if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG))
+                       rx_ring->rx_stats.length_errors++;
                dev_kfree_skb_any(skb);
                rx_ring->rx_stats.errors++;
                return true;
@@ -576,9 +593,9 @@ static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
        napi_gro_receive(&q_vector->napi, skb);
 }
 
-static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
-                              struct fm10k_ring *rx_ring,
-                              int budget)
+static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
+                             struct fm10k_ring *rx_ring,
+                             int budget)
 {
        struct sk_buff *skb = rx_ring->skb;
        unsigned int total_bytes = 0, total_packets = 0;
@@ -645,7 +662,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
        q_vector->rx.total_packets += total_packets;
        q_vector->rx.total_bytes += total_bytes;
 
-       return total_packets < budget;
+       return total_packets;
 }
 
 #define VXLAN_HLEN (sizeof(struct udphdr) + 8)
@@ -878,6 +895,7 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
 
        /* update TX checksum flag */
        first->tx_flags |= FM10K_TX_FLAGS_CSUM;
+       tx_ring->tx_stats.csum_good++;
 
 no_csum:
        /* populate Tx descriptor header size and mss */
@@ -1079,9 +1097,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
        struct fm10k_tx_buffer *first;
        int tso;
        u32 tx_flags = 0;
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
        unsigned short f;
-#endif
        u16 count = TXD_USE_COUNT(skb_headlen(skb));
 
        /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
@@ -1089,12 +1105,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
         *       + 2 desc gap to keep tail from touching head
         * otherwise try next time
         */
-#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
-       count += skb_shinfo(skb)->nr_frags;
-#endif
+
        if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
                tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
@@ -1409,7 +1422,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
        struct fm10k_q_vector *q_vector =
                               container_of(napi, struct fm10k_q_vector, napi);
        struct fm10k_ring *ring;
-       int per_ring_budget;
+       int per_ring_budget, work_done = 0;
        bool clean_complete = true;
 
        fm10k_for_each_ring(ring, q_vector->tx)
@@ -1423,16 +1436,19 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
        else
                per_ring_budget = budget;
 
-       fm10k_for_each_ring(ring, q_vector->rx)
-               clean_complete &= fm10k_clean_rx_irq(q_vector, ring,
-                                                    per_ring_budget);
+       fm10k_for_each_ring(ring, q_vector->rx) {
+               int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
+
+               work_done += work;
+               clean_complete &= !!(work < per_ring_budget);
+       }
 
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
                return budget;
 
        /* all work done, exit the polling mode */
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
 
        /* re-enable the q_vector */
        fm10k_qv_enable(q_vector);
@@ -1892,7 +1908,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
        u32 reta, base;
 
        /* If the netdev is initialized we have to maintain table if possible */
-       if (interface->netdev->reg_state) {
+       if (interface->netdev->reg_state != NETREG_UNINITIALIZED) {
                for (i = FM10K_RETA_SIZE; i--;) {
                        reta = interface->reta[i];
                        if ((((reta << 24) >> 24) < rss_i) &&