]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/intel/i40e/i40e_txrx.c
b560e0257ba6a2a12184b1c7bda77689d3f7cfc0
[karo-tx-linux.git] / drivers / net / ethernet / intel / i40e / i40e_txrx.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2014 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
29 #include "i40e.h"
30 #include "i40e_prototype.h"
31
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
33                                 u32 td_tag)
34 {
35         return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36                            ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
37                            ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38                            ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39                            ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
40 }
41
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
43 #define I40E_FD_CLEAN_DELAY 10
44 /**
45  * i40e_program_fdir_filter - Program a Flow Director filter
46  * @fdir_data: Packet data that will be filter parameters
47  * @raw_packet: the pre-allocated packet buffer for FDir
48  * @pf: The PF pointer
49  * @add: True for add/update, False for remove
50  **/
51 int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
52                              struct i40e_pf *pf, bool add)
53 {
54         struct i40e_filter_program_desc *fdir_desc;
55         struct i40e_tx_buffer *tx_buf, *first;
56         struct i40e_tx_desc *tx_desc;
57         struct i40e_ring *tx_ring;
58         unsigned int fpt, dcc;
59         struct i40e_vsi *vsi;
60         struct device *dev;
61         dma_addr_t dma;
62         u32 td_cmd = 0;
63         u16 delay = 0;
64         u16 i;
65
66         /* find existing FDIR VSI */
67         vsi = NULL;
68         for (i = 0; i < pf->num_alloc_vsi; i++)
69                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
70                         vsi = pf->vsi[i];
71         if (!vsi)
72                 return -ENOENT;
73
74         tx_ring = vsi->tx_rings[0];
75         dev = tx_ring->dev;
76
77         /* we need two descriptors to add/del a filter and we can wait */
78         do {
79                 if (I40E_DESC_UNUSED(tx_ring) > 1)
80                         break;
81                 msleep_interruptible(1);
82                 delay++;
83         } while (delay < I40E_FD_CLEAN_DELAY);
84
85         if (!(I40E_DESC_UNUSED(tx_ring) > 1))
86                 return -EAGAIN;
87
88         dma = dma_map_single(dev, raw_packet,
89                              I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
90         if (dma_mapping_error(dev, dma))
91                 goto dma_fail;
92
93         /* grab the next descriptor */
94         i = tx_ring->next_to_use;
95         fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
96         first = &tx_ring->tx_bi[i];
97         memset(first, 0, sizeof(struct i40e_tx_buffer));
98
99         tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
100
101         fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
102               I40E_TXD_FLTR_QW0_QINDEX_MASK;
103
104         fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
105                I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
106
107         fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
108                I40E_TXD_FLTR_QW0_PCTYPE_MASK;
109
110         /* Use LAN VSI Id if not programmed by user */
111         if (fdir_data->dest_vsi == 0)
112                 fpt |= (pf->vsi[pf->lan_vsi]->id) <<
113                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
114         else
115                 fpt |= ((u32)fdir_data->dest_vsi <<
116                         I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
117                        I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
118
119         dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
120
121         if (add)
122                 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
123                        I40E_TXD_FLTR_QW1_PCMD_SHIFT;
124         else
125                 dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
126                        I40E_TXD_FLTR_QW1_PCMD_SHIFT;
127
128         dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
129                I40E_TXD_FLTR_QW1_DEST_MASK;
130
131         dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
132                I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
133
134         if (fdir_data->cnt_index != 0) {
135                 dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
136                 dcc |= ((u32)fdir_data->cnt_index <<
137                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
138                         I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
139         }
140
141         fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
142         fdir_desc->rsvd = cpu_to_le32(0);
143         fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
144         fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
145
146         /* Now program a dummy descriptor */
147         i = tx_ring->next_to_use;
148         tx_desc = I40E_TX_DESC(tx_ring, i);
149         tx_buf = &tx_ring->tx_bi[i];
150
151         tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
152
153         memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
154
155         /* record length, and DMA address */
156         dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
157         dma_unmap_addr_set(tx_buf, dma, dma);
158
159         tx_desc->buffer_addr = cpu_to_le64(dma);
160         td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
161
162         tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
163         tx_buf->raw_buf = (void *)raw_packet;
164
165         tx_desc->cmd_type_offset_bsz =
166                 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
167
168         /* Force memory writes to complete before letting h/w
169          * know there are new descriptors to fetch.
170          */
171         wmb();
172
173         /* Mark the data descriptor to be watched */
174         first->next_to_watch = tx_desc;
175
176         writel(tx_ring->next_to_use, tx_ring->tail);
177         return 0;
178
179 dma_fail:
180         return -1;
181 }
182
183 #define IP_HEADER_OFFSET 14
184 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
185 /**
186  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
187  * @vsi: pointer to the targeted VSI
188  * @fd_data: the flow director data required for the FDir descriptor
189  * @add: true adds a filter, false removes it
190  *
191  * Returns 0 if the filters were successfully added or removed
192  **/
193 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
194                                    struct i40e_fdir_filter *fd_data,
195                                    bool add)
196 {
197         struct i40e_pf *pf = vsi->back;
198         struct udphdr *udp;
199         struct iphdr *ip;
200         bool err = false;
201         u8 *raw_packet;
202         int ret;
203         static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
204                 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
205                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
206
207         raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
208         if (!raw_packet)
209                 return -ENOMEM;
210         memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
211
212         ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
213         udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
214               + sizeof(struct iphdr));
215
216         ip->daddr = fd_data->dst_ip[0];
217         udp->dest = fd_data->dst_port;
218         ip->saddr = fd_data->src_ip[0];
219         udp->source = fd_data->src_port;
220
221         fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
222         ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
223         if (ret) {
224                 dev_info(&pf->pdev->dev,
225                          "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
226                          fd_data->pctype, fd_data->fd_id, ret);
227                 err = true;
228         } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
229                 if (add)
230                         dev_info(&pf->pdev->dev,
231                                  "Filter OK for PCTYPE %d loc = %d\n",
232                                  fd_data->pctype, fd_data->fd_id);
233                 else
234                         dev_info(&pf->pdev->dev,
235                                  "Filter deleted for PCTYPE %d loc = %d\n",
236                                  fd_data->pctype, fd_data->fd_id);
237         }
238         return err ? -EOPNOTSUPP : 0;
239 }
240
241 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
242 /**
243  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
244  * @vsi: pointer to the targeted VSI
245  * @fd_data: the flow director data required for the FDir descriptor
246  * @add: true adds a filter, false removes it
247  *
248  * Returns 0 if the filters were successfully added or removed
249  **/
250 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
251                                    struct i40e_fdir_filter *fd_data,
252                                    bool add)
253 {
254         struct i40e_pf *pf = vsi->back;
255         struct tcphdr *tcp;
256         struct iphdr *ip;
257         bool err = false;
258         u8 *raw_packet;
259         int ret;
260         /* Dummy packet */
261         static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
262                 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
263                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
264                 0x0, 0x72, 0, 0, 0, 0};
265
266         raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
267         if (!raw_packet)
268                 return -ENOMEM;
269         memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
270
271         ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
272         tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
273               + sizeof(struct iphdr));
274
275         ip->daddr = fd_data->dst_ip[0];
276         tcp->dest = fd_data->dst_port;
277         ip->saddr = fd_data->src_ip[0];
278         tcp->source = fd_data->src_port;
279
280         if (add) {
281                 pf->fd_tcp_rule++;
282                 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
283                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
284                                 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
285                         pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
286                 }
287         } else {
288                 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
289                                   (pf->fd_tcp_rule - 1) : 0;
290                 if (pf->fd_tcp_rule == 0) {
291                         pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
292                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
293                                 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
294                 }
295         }
296
297         fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
298         ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
299
300         if (ret) {
301                 dev_info(&pf->pdev->dev,
302                          "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
303                          fd_data->pctype, fd_data->fd_id, ret);
304                 err = true;
305         } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
306                 if (add)
307                         dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
308                                  fd_data->pctype, fd_data->fd_id);
309                 else
310                         dev_info(&pf->pdev->dev,
311                                  "Filter deleted for PCTYPE %d loc = %d\n",
312                                  fd_data->pctype, fd_data->fd_id);
313         }
314
315         return err ? -EOPNOTSUPP : 0;
316 }
317
318 /**
319  * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
320  * a specific flow spec
321  * @vsi: pointer to the targeted VSI
322  * @fd_data: the flow director data required for the FDir descriptor
323  * @add: true adds a filter, false removes it
324  *
325  * Always returns -EOPNOTSUPP
326  **/
327 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
328                                     struct i40e_fdir_filter *fd_data,
329                                     bool add)
330 {
331         return -EOPNOTSUPP;
332 }
333
334 #define I40E_IP_DUMMY_PACKET_LEN 34
335 /**
336  * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
337  * a specific flow spec
338  * @vsi: pointer to the targeted VSI
339  * @fd_data: the flow director data required for the FDir descriptor
340  * @add: true adds a filter, false removes it
341  *
342  * Returns 0 if the filters were successfully added or removed
343  **/
344 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
345                                   struct i40e_fdir_filter *fd_data,
346                                   bool add)
347 {
348         struct i40e_pf *pf = vsi->back;
349         struct iphdr *ip;
350         bool err = false;
351         u8 *raw_packet;
352         int ret;
353         int i;
354         static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
355                 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
356                 0, 0, 0, 0};
357
358         for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
359              i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
360                 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
361                 if (!raw_packet)
362                         return -ENOMEM;
363                 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
364                 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
365
366                 ip->saddr = fd_data->src_ip[0];
367                 ip->daddr = fd_data->dst_ip[0];
368                 ip->protocol = 0;
369
370                 fd_data->pctype = i;
371                 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
372
373                 if (ret) {
374                         dev_info(&pf->pdev->dev,
375                                  "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
376                                  fd_data->pctype, fd_data->fd_id, ret);
377                         err = true;
378                 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
379                         if (add)
380                                 dev_info(&pf->pdev->dev,
381                                          "Filter OK for PCTYPE %d loc = %d\n",
382                                          fd_data->pctype, fd_data->fd_id);
383                         else
384                                 dev_info(&pf->pdev->dev,
385                                          "Filter deleted for PCTYPE %d loc = %d\n",
386                                          fd_data->pctype, fd_data->fd_id);
387                 }
388         }
389
390         return err ? -EOPNOTSUPP : 0;
391 }
392
393 /**
394  * i40e_add_del_fdir - Build raw packets to add/del fdir filter
395  * @vsi: pointer to the targeted VSI
396  * @cmd: command to get or set RX flow classification rules
397  * @add: true adds a filter, false removes it
398  *
399  **/
400 int i40e_add_del_fdir(struct i40e_vsi *vsi,
401                       struct i40e_fdir_filter *input, bool add)
402 {
403         struct i40e_pf *pf = vsi->back;
404         int ret;
405
406         switch (input->flow_type & ~FLOW_EXT) {
407         case TCP_V4_FLOW:
408                 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
409                 break;
410         case UDP_V4_FLOW:
411                 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
412                 break;
413         case SCTP_V4_FLOW:
414                 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
415                 break;
416         case IPV4_FLOW:
417                 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
418                 break;
419         case IP_USER_FLOW:
420                 switch (input->ip4_proto) {
421                 case IPPROTO_TCP:
422                         ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
423                         break;
424                 case IPPROTO_UDP:
425                         ret = i40e_add_del_fdir_udpv4(vsi, input, add);
426                         break;
427                 case IPPROTO_SCTP:
428                         ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
429                         break;
430                 default:
431                         ret = i40e_add_del_fdir_ipv4(vsi, input, add);
432                         break;
433                 }
434                 break;
435         default:
436                 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
437                          input->flow_type);
438                 ret = -EINVAL;
439         }
440
441         /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
442         return ret;
443 }
444
445 /**
446  * i40e_fd_handle_status - check the Programming Status for FD
447  * @rx_ring: the Rx ring for this descriptor
448  * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
449  * @prog_id: the id originally used for programming
450  *
451  * This is used to verify if the FD programming or invalidation
452  * requested by SW to the HW is successful or not and take actions accordingly.
453  **/
454 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
455                                   union i40e_rx_desc *rx_desc, u8 prog_id)
456 {
457         struct i40e_pf *pf = rx_ring->vsi->back;
458         struct pci_dev *pdev = pf->pdev;
459         u32 fcnt_prog, fcnt_avail;
460         u32 error;
461         u64 qw;
462
463         qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
464         error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
465                 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
466
467         if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
468                 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
469                 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
470                     (I40E_DEBUG_FD & pf->hw.debug_mask))
471                         dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
472                                  pf->fd_inv);
473
474                 /* Check if the programming error is for ATR.
475                  * If so, auto disable ATR and set a state for
476                  * flush in progress. Next time we come here if flush is in
477                  * progress do nothing, once flush is complete the state will
478                  * be cleared.
479                  */
480                 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
481                         return;
482
483                 pf->fd_add_err++;
484                 /* store the current atr filter count */
485                 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
486
487                 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
488                     (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
489                         pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
490                         set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
491                 }
492
493                 /* filter programming failed most likely due to table full */
494                 fcnt_prog = i40e_get_global_fd_count(pf);
495                 fcnt_avail = pf->fdir_pf_filter_count;
496                 /* If ATR is running fcnt_prog can quickly change,
497                  * if we are very close to full, it makes sense to disable
498                  * FD ATR/SB and then re-enable it when there is room.
499                  */
500                 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
501                         if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
502                             !(pf->auto_disable_flags &
503                                      I40E_FLAG_FD_SB_ENABLED)) {
504                                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
505                                         dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
506                                 pf->auto_disable_flags |=
507                                                         I40E_FLAG_FD_SB_ENABLED;
508                         }
509                 } else {
510                         dev_info(&pdev->dev,
511                                 "FD filter programming failed due to incorrect filter parameters\n");
512                 }
513         } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
514                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
515                         dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
516                                  rx_desc->wb.qword0.hi_dword.fd_id);
517         }
518 }
519
520 /**
521  * i40e_unmap_and_free_tx_resource - Release a Tx buffer
522  * @ring:      the ring that owns the buffer
523  * @tx_buffer: the buffer to free
524  **/
525 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
526                                             struct i40e_tx_buffer *tx_buffer)
527 {
528         if (tx_buffer->skb) {
529                 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
530                         kfree(tx_buffer->raw_buf);
531                 else
532                         dev_kfree_skb_any(tx_buffer->skb);
533
534                 if (dma_unmap_len(tx_buffer, len))
535                         dma_unmap_single(ring->dev,
536                                          dma_unmap_addr(tx_buffer, dma),
537                                          dma_unmap_len(tx_buffer, len),
538                                          DMA_TO_DEVICE);
539         } else if (dma_unmap_len(tx_buffer, len)) {
540                 dma_unmap_page(ring->dev,
541                                dma_unmap_addr(tx_buffer, dma),
542                                dma_unmap_len(tx_buffer, len),
543                                DMA_TO_DEVICE);
544         }
545         tx_buffer->next_to_watch = NULL;
546         tx_buffer->skb = NULL;
547         dma_unmap_len_set(tx_buffer, len, 0);
548         /* tx_buffer must be completely set up in the transmit path */
549 }
550
551 /**
552  * i40e_clean_tx_ring - Free any empty Tx buffers
553  * @tx_ring: ring to be cleaned
554  **/
555 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
556 {
557         unsigned long bi_size;
558         u16 i;
559
560         /* ring already cleared, nothing to do */
561         if (!tx_ring->tx_bi)
562                 return;
563
564         /* Free all the Tx ring sk_buffs */
565         for (i = 0; i < tx_ring->count; i++)
566                 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
567
568         bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
569         memset(tx_ring->tx_bi, 0, bi_size);
570
571         /* Zero out the descriptor ring */
572         memset(tx_ring->desc, 0, tx_ring->size);
573
574         tx_ring->next_to_use = 0;
575         tx_ring->next_to_clean = 0;
576
577         if (!tx_ring->netdev)
578                 return;
579
580         /* cleanup Tx queue statistics */
581         netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
582                                                   tx_ring->queue_index));
583 }
584
585 /**
586  * i40e_free_tx_resources - Free Tx resources per queue
587  * @tx_ring: Tx descriptor ring for a specific queue
588  *
589  * Free all transmit software resources
590  **/
591 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
592 {
593         i40e_clean_tx_ring(tx_ring);
594         kfree(tx_ring->tx_bi);
595         tx_ring->tx_bi = NULL;
596
597         if (tx_ring->desc) {
598                 dma_free_coherent(tx_ring->dev, tx_ring->size,
599                                   tx_ring->desc, tx_ring->dma);
600                 tx_ring->desc = NULL;
601         }
602 }
603
604 /**
605  * i40e_get_tx_pending - how many tx descriptors not processed
606  * @tx_ring: the ring of descriptors
607  *
608  * Since there is no access to the ring head register
609  * in XL710, we need to use our local copies
610  **/
611 u32 i40e_get_tx_pending(struct i40e_ring *ring)
612 {
613         u32 head, tail;
614
615         head = i40e_get_head(ring);
616         tail = readl(ring->tail);
617
618         if (head != tail)
619                 return (head < tail) ?
620                         tail - head : (tail + ring->count - head);
621
622         return 0;
623 }
624
625 #define WB_STRIDE 0x3
626
627 /**
628  * i40e_clean_tx_irq - Reclaim resources after transmit completes
629  * @tx_ring:  tx ring to clean
630  * @budget:   how many cleans we're allowed
631  *
632  * Returns true if there's any budget left (e.g. the clean is finished)
633  **/
634 static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
635 {
636         u16 i = tx_ring->next_to_clean;
637         struct i40e_tx_buffer *tx_buf;
638         struct i40e_tx_desc *tx_head;
639         struct i40e_tx_desc *tx_desc;
640         unsigned int total_packets = 0;
641         unsigned int total_bytes = 0;
642
643         tx_buf = &tx_ring->tx_bi[i];
644         tx_desc = I40E_TX_DESC(tx_ring, i);
645         i -= tx_ring->count;
646
647         tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
648
649         do {
650                 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
651
652                 /* if next_to_watch is not set then there is no work pending */
653                 if (!eop_desc)
654                         break;
655
656                 /* prevent any other reads prior to eop_desc */
657                 read_barrier_depends();
658
659                 /* we have caught up to head, no work left to do */
660                 if (tx_head == tx_desc)
661                         break;
662
663                 /* clear next_to_watch to prevent false hangs */
664                 tx_buf->next_to_watch = NULL;
665
666                 /* update the statistics for this packet */
667                 total_bytes += tx_buf->bytecount;
668                 total_packets += tx_buf->gso_segs;
669
670                 /* free the skb */
671                 dev_consume_skb_any(tx_buf->skb);
672
673                 /* unmap skb header data */
674                 dma_unmap_single(tx_ring->dev,
675                                  dma_unmap_addr(tx_buf, dma),
676                                  dma_unmap_len(tx_buf, len),
677                                  DMA_TO_DEVICE);
678
679                 /* clear tx_buffer data */
680                 tx_buf->skb = NULL;
681                 dma_unmap_len_set(tx_buf, len, 0);
682
683                 /* unmap remaining buffers */
684                 while (tx_desc != eop_desc) {
685
686                         tx_buf++;
687                         tx_desc++;
688                         i++;
689                         if (unlikely(!i)) {
690                                 i -= tx_ring->count;
691                                 tx_buf = tx_ring->tx_bi;
692                                 tx_desc = I40E_TX_DESC(tx_ring, 0);
693                         }
694
695                         /* unmap any remaining paged data */
696                         if (dma_unmap_len(tx_buf, len)) {
697                                 dma_unmap_page(tx_ring->dev,
698                                                dma_unmap_addr(tx_buf, dma),
699                                                dma_unmap_len(tx_buf, len),
700                                                DMA_TO_DEVICE);
701                                 dma_unmap_len_set(tx_buf, len, 0);
702                         }
703                 }
704
705                 /* move us one more past the eop_desc for start of next pkt */
706                 tx_buf++;
707                 tx_desc++;
708                 i++;
709                 if (unlikely(!i)) {
710                         i -= tx_ring->count;
711                         tx_buf = tx_ring->tx_bi;
712                         tx_desc = I40E_TX_DESC(tx_ring, 0);
713                 }
714
715                 prefetch(tx_desc);
716
717                 /* update budget accounting */
718                 budget--;
719         } while (likely(budget));
720
721         i += tx_ring->count;
722         tx_ring->next_to_clean = i;
723         u64_stats_update_begin(&tx_ring->syncp);
724         tx_ring->stats.bytes += total_bytes;
725         tx_ring->stats.packets += total_packets;
726         u64_stats_update_end(&tx_ring->syncp);
727         tx_ring->q_vector->tx.total_bytes += total_bytes;
728         tx_ring->q_vector->tx.total_packets += total_packets;
729
730         if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
731                 unsigned int j = 0;
732
733                 /* check to see if there are < 4 descriptors
734                  * waiting to be written back, then kick the hardware to force
735                  * them to be written back in case we stay in NAPI.
736                  * In this mode on X722 we do not enable Interrupt.
737                  */
738                 j = i40e_get_tx_pending(tx_ring);
739
740                 if (budget &&
741                     ((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
742                     !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
743                     (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
744                         tx_ring->arm_wb = true;
745         }
746
747         netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
748                                                       tx_ring->queue_index),
749                                   total_packets, total_bytes);
750
751 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
752         if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
753                      (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
754                 /* Make sure that anybody stopping the queue after this
755                  * sees the new next_to_clean.
756                  */
757                 smp_mb();
758                 if (__netif_subqueue_stopped(tx_ring->netdev,
759                                              tx_ring->queue_index) &&
760                    !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
761                         netif_wake_subqueue(tx_ring->netdev,
762                                             tx_ring->queue_index);
763                         ++tx_ring->tx_stats.restart_queue;
764                 }
765         }
766
767         return !!budget;
768 }
769
770 /**
771  * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
772  * @vsi: the VSI we care about
773  * @q_vector: the vector  on which to force writeback
774  *
775  **/
776 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
777 {
778         u16 flags = q_vector->tx.ring[0].flags;
779
780         if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
781                 u32 val;
782
783                 if (q_vector->arm_wb_state)
784                         return;
785
786                 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
787
788                 wr32(&vsi->back->hw,
789                      I40E_PFINT_DYN_CTLN(q_vector->v_idx +
790                                          vsi->base_vector - 1),
791                      val);
792                 q_vector->arm_wb_state = true;
793         } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
794                 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
795                           I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
796                           I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
797                           I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
798                           /* allow 00 to be written to the index */
799
800                 wr32(&vsi->back->hw,
801                      I40E_PFINT_DYN_CTLN(q_vector->v_idx +
802                                          vsi->base_vector - 1), val);
803         } else {
804                 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
805                           I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
806                           I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
807                           I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
808                         /* allow 00 to be written to the index */
809
810                 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
811         }
812 }
813
814 /**
815  * i40e_set_new_dynamic_itr - Find new ITR level
816  * @rc: structure containing ring performance data
817  *
818  * Stores a new ITR value based on packets and byte counts during
819  * the last interrupt.  The advantage of per interrupt computation
820  * is faster updates and more accurate ITR for the current traffic
821  * pattern.  Constants in this function were computed based on
822  * theoretical maximum wire speed and thresholds were set based on
823  * testing data as well as attempting to minimize response time
824  * while increasing bulk throughput.
825  **/
826 static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
827 {
828         enum i40e_latency_range new_latency_range = rc->latency_range;
829         u32 new_itr = rc->itr;
830         int bytes_per_int;
831
832         if (rc->total_packets == 0 || !rc->itr)
833                 return;
834
835         /* simple throttlerate management
836          *   0-10MB/s   lowest (100000 ints/s)
837          *  10-20MB/s   low    (20000 ints/s)
838          *  20-1249MB/s bulk   (8000 ints/s)
839          */
840         bytes_per_int = rc->total_bytes / rc->itr;
841         switch (new_latency_range) {
842         case I40E_LOWEST_LATENCY:
843                 if (bytes_per_int > 10)
844                         new_latency_range = I40E_LOW_LATENCY;
845                 break;
846         case I40E_LOW_LATENCY:
847                 if (bytes_per_int > 20)
848                         new_latency_range = I40E_BULK_LATENCY;
849                 else if (bytes_per_int <= 10)
850                         new_latency_range = I40E_LOWEST_LATENCY;
851                 break;
852         case I40E_BULK_LATENCY:
853                 if (bytes_per_int <= 20)
854                         new_latency_range = I40E_LOW_LATENCY;
855                 break;
856         default:
857                 if (bytes_per_int <= 20)
858                         new_latency_range = I40E_LOW_LATENCY;
859                 break;
860         }
861         rc->latency_range = new_latency_range;
862
863         switch (new_latency_range) {
864         case I40E_LOWEST_LATENCY:
865                 new_itr = I40E_ITR_100K;
866                 break;
867         case I40E_LOW_LATENCY:
868                 new_itr = I40E_ITR_20K;
869                 break;
870         case I40E_BULK_LATENCY:
871                 new_itr = I40E_ITR_8K;
872                 break;
873         default:
874                 break;
875         }
876
877         if (new_itr != rc->itr)
878                 rc->itr = new_itr;
879
880         rc->total_bytes = 0;
881         rc->total_packets = 0;
882 }
883
884 /**
885  * i40e_clean_programming_status - clean the programming status descriptor
886  * @rx_ring: the rx ring that has this descriptor
887  * @rx_desc: the rx descriptor written back by HW
888  *
889  * Flow director should handle FD_FILTER_STATUS to check its filter programming
890  * status being successful or not and take actions accordingly. FCoE should
891  * handle its context/filter programming/invalidation status and take actions.
892  *
893  **/
894 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
895                                           union i40e_rx_desc *rx_desc)
896 {
897         u64 qw;
898         u8 id;
899
900         qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
901         id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
902                   I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
903
904         if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
905                 i40e_fd_handle_status(rx_ring, rx_desc, id);
906 #ifdef I40E_FCOE
907         else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
908                  (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
909                 i40e_fcoe_handle_status(rx_ring, rx_desc, id);
910 #endif
911 }
912
913 /**
914  * i40e_setup_tx_descriptors - Allocate the Tx descriptors
915  * @tx_ring: the tx ring to set up
916  *
917  * Return 0 on success, negative on error
918  **/
919 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
920 {
921         struct device *dev = tx_ring->dev;
922         int bi_size;
923
924         if (!dev)
925                 return -ENOMEM;
926
927         /* warn if we are about to overwrite the pointer */
928         WARN_ON(tx_ring->tx_bi);
929         bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
930         tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
931         if (!tx_ring->tx_bi)
932                 goto err;
933
934         /* round up to nearest 4K */
935         tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
936         /* add u32 for head writeback, align after this takes care of
937          * guaranteeing this is at least one cache line in size
938          */
939         tx_ring->size += sizeof(u32);
940         tx_ring->size = ALIGN(tx_ring->size, 4096);
941         tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
942                                            &tx_ring->dma, GFP_KERNEL);
943         if (!tx_ring->desc) {
944                 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
945                          tx_ring->size);
946                 goto err;
947         }
948
949         tx_ring->next_to_use = 0;
950         tx_ring->next_to_clean = 0;
951         return 0;
952
953 err:
954         kfree(tx_ring->tx_bi);
955         tx_ring->tx_bi = NULL;
956         return -ENOMEM;
957 }
958
959 /**
960  * i40e_clean_rx_ring - Free Rx buffers
961  * @rx_ring: ring to be cleaned
962  **/
963 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
964 {
965         struct device *dev = rx_ring->dev;
966         struct i40e_rx_buffer *rx_bi;
967         unsigned long bi_size;
968         u16 i;
969
970         /* ring already cleared, nothing to do */
971         if (!rx_ring->rx_bi)
972                 return;
973
974         if (ring_is_ps_enabled(rx_ring)) {
975                 int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
976
977                 rx_bi = &rx_ring->rx_bi[0];
978                 if (rx_bi->hdr_buf) {
979                         dma_free_coherent(dev,
980                                           bufsz,
981                                           rx_bi->hdr_buf,
982                                           rx_bi->dma);
983                         for (i = 0; i < rx_ring->count; i++) {
984                                 rx_bi = &rx_ring->rx_bi[i];
985                                 rx_bi->dma = 0;
986                                 rx_bi->hdr_buf = NULL;
987                         }
988                 }
989         }
990         /* Free all the Rx ring sk_buffs */
991         for (i = 0; i < rx_ring->count; i++) {
992                 rx_bi = &rx_ring->rx_bi[i];
993                 if (rx_bi->dma) {
994                         dma_unmap_single(dev,
995                                          rx_bi->dma,
996                                          rx_ring->rx_buf_len,
997                                          DMA_FROM_DEVICE);
998                         rx_bi->dma = 0;
999                 }
1000                 if (rx_bi->skb) {
1001                         dev_kfree_skb(rx_bi->skb);
1002                         rx_bi->skb = NULL;
1003                 }
1004                 if (rx_bi->page) {
1005                         if (rx_bi->page_dma) {
1006                                 dma_unmap_page(dev,
1007                                                rx_bi->page_dma,
1008                                                PAGE_SIZE / 2,
1009                                                DMA_FROM_DEVICE);
1010                                 rx_bi->page_dma = 0;
1011                         }
1012                         __free_page(rx_bi->page);
1013                         rx_bi->page = NULL;
1014                         rx_bi->page_offset = 0;
1015                 }
1016         }
1017
1018         bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1019         memset(rx_ring->rx_bi, 0, bi_size);
1020
1021         /* Zero out the descriptor ring */
1022         memset(rx_ring->desc, 0, rx_ring->size);
1023
1024         rx_ring->next_to_clean = 0;
1025         rx_ring->next_to_use = 0;
1026 }
1027
1028 /**
1029  * i40e_free_rx_resources - Free Rx resources
1030  * @rx_ring: ring to clean the resources from
1031  *
1032  * Free all receive software resources
1033  **/
1034 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1035 {
1036         i40e_clean_rx_ring(rx_ring);
1037         kfree(rx_ring->rx_bi);
1038         rx_ring->rx_bi = NULL;
1039
1040         if (rx_ring->desc) {
1041                 dma_free_coherent(rx_ring->dev, rx_ring->size,
1042                                   rx_ring->desc, rx_ring->dma);
1043                 rx_ring->desc = NULL;
1044         }
1045 }
1046
1047 /**
1048  * i40e_alloc_rx_headers - allocate rx header buffers
1049  * @rx_ring: ring to alloc buffers
1050  *
1051  * Allocate rx header buffers for the entire ring. As these are static,
1052  * this is only called when setting up a new ring.
1053  **/
1054 void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
1055 {
1056         struct device *dev = rx_ring->dev;
1057         struct i40e_rx_buffer *rx_bi;
1058         dma_addr_t dma;
1059         void *buffer;
1060         int buf_size;
1061         int i;
1062
1063         if (rx_ring->rx_bi[0].hdr_buf)
1064                 return;
1065         /* Make sure the buffers don't cross cache line boundaries. */
1066         buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
1067         buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
1068                                     &dma, GFP_KERNEL);
1069         if (!buffer)
1070                 return;
1071         for (i = 0; i < rx_ring->count; i++) {
1072                 rx_bi = &rx_ring->rx_bi[i];
1073                 rx_bi->dma = dma + (i * buf_size);
1074                 rx_bi->hdr_buf = buffer + (i * buf_size);
1075         }
1076 }
1077
1078 /**
1079  * i40e_setup_rx_descriptors - Allocate Rx descriptors
1080  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1081  *
1082  * Returns 0 on success, negative on failure
1083  **/
1084 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1085 {
1086         struct device *dev = rx_ring->dev;
1087         int bi_size;
1088
1089         /* warn if we are about to overwrite the pointer */
1090         WARN_ON(rx_ring->rx_bi);
1091         bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1092         rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1093         if (!rx_ring->rx_bi)
1094                 goto err;
1095
1096         u64_stats_init(&rx_ring->syncp);
1097
1098         /* Round up to nearest 4K */
1099         rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1100                 ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1101                 : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1102         rx_ring->size = ALIGN(rx_ring->size, 4096);
1103         rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1104                                            &rx_ring->dma, GFP_KERNEL);
1105
1106         if (!rx_ring->desc) {
1107                 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1108                          rx_ring->size);
1109                 goto err;
1110         }
1111
1112         rx_ring->next_to_clean = 0;
1113         rx_ring->next_to_use = 0;
1114
1115         return 0;
1116 err:
1117         kfree(rx_ring->rx_bi);
1118         rx_ring->rx_bi = NULL;
1119         return -ENOMEM;
1120 }
1121
1122 /**
1123  * i40e_release_rx_desc - Store the new tail and head values
1124  * @rx_ring: ring to bump
1125  * @val: new head index
1126  **/
1127 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1128 {
1129         rx_ring->next_to_use = val;
1130         /* Force memory writes to complete before letting h/w
1131          * know there are new descriptors to fetch.  (Only
1132          * applicable for weak-ordered memory model archs,
1133          * such as IA-64).
1134          */
1135         wmb();
1136         writel(val, rx_ring->tail);
1137 }
1138
1139 /**
1140  * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
1141  * @rx_ring: ring to place buffers on
1142  * @cleaned_count: number of buffers to replace
1143  **/
1144 void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
1145 {
1146         u16 i = rx_ring->next_to_use;
1147         union i40e_rx_desc *rx_desc;
1148         struct i40e_rx_buffer *bi;
1149
1150         /* do nothing if no valid netdev defined */
1151         if (!rx_ring->netdev || !cleaned_count)
1152                 return;
1153
1154         while (cleaned_count--) {
1155                 rx_desc = I40E_RX_DESC(rx_ring, i);
1156                 bi = &rx_ring->rx_bi[i];
1157
1158                 if (bi->skb) /* desc is in use */
1159                         goto no_buffers;
1160                 if (!bi->page) {
1161                         bi->page = alloc_page(GFP_ATOMIC);
1162                         if (!bi->page) {
1163                                 rx_ring->rx_stats.alloc_page_failed++;
1164                                 goto no_buffers;
1165                         }
1166                 }
1167
1168                 if (!bi->page_dma) {
1169                         /* use a half page if we're re-using */
1170                         bi->page_offset ^= PAGE_SIZE / 2;
1171                         bi->page_dma = dma_map_page(rx_ring->dev,
1172                                                     bi->page,
1173                                                     bi->page_offset,
1174                                                     PAGE_SIZE / 2,
1175                                                     DMA_FROM_DEVICE);
1176                         if (dma_mapping_error(rx_ring->dev,
1177                                               bi->page_dma)) {
1178                                 rx_ring->rx_stats.alloc_page_failed++;
1179                                 bi->page_dma = 0;
1180                                 goto no_buffers;
1181                         }
1182                 }
1183
1184                 dma_sync_single_range_for_device(rx_ring->dev,
1185                                                  bi->dma,
1186                                                  0,
1187                                                  rx_ring->rx_hdr_len,
1188                                                  DMA_FROM_DEVICE);
1189                 /* Refresh the desc even if buffer_addrs didn't change
1190                  * because each write-back erases this info.
1191                  */
1192                 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1193                 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1194                 i++;
1195                 if (i == rx_ring->count)
1196                         i = 0;
1197         }
1198
1199 no_buffers:
1200         if (rx_ring->next_to_use != i)
1201                 i40e_release_rx_desc(rx_ring, i);
1202 }
1203
1204 /**
1205  * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
1206  * @rx_ring: ring to place buffers on
1207  * @cleaned_count: number of buffers to replace
1208  **/
1209 void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
1210 {
1211         u16 i = rx_ring->next_to_use;
1212         union i40e_rx_desc *rx_desc;
1213         struct i40e_rx_buffer *bi;
1214         struct sk_buff *skb;
1215
1216         /* do nothing if no valid netdev defined */
1217         if (!rx_ring->netdev || !cleaned_count)
1218                 return;
1219
1220         while (cleaned_count--) {
1221                 rx_desc = I40E_RX_DESC(rx_ring, i);
1222                 bi = &rx_ring->rx_bi[i];
1223                 skb = bi->skb;
1224
1225                 if (!skb) {
1226                         skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1227                                                         rx_ring->rx_buf_len);
1228                         if (!skb) {
1229                                 rx_ring->rx_stats.alloc_buff_failed++;
1230                                 goto no_buffers;
1231                         }
1232                         /* initialize queue mapping */
1233                         skb_record_rx_queue(skb, rx_ring->queue_index);
1234                         bi->skb = skb;
1235                 }
1236
1237                 if (!bi->dma) {
1238                         bi->dma = dma_map_single(rx_ring->dev,
1239                                                  skb->data,
1240                                                  rx_ring->rx_buf_len,
1241                                                  DMA_FROM_DEVICE);
1242                         if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1243                                 rx_ring->rx_stats.alloc_buff_failed++;
1244                                 bi->dma = 0;
1245                                 goto no_buffers;
1246                         }
1247                 }
1248
1249                 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1250                 rx_desc->read.hdr_addr = 0;
1251                 i++;
1252                 if (i == rx_ring->count)
1253                         i = 0;
1254         }
1255
1256 no_buffers:
1257         if (rx_ring->next_to_use != i)
1258                 i40e_release_rx_desc(rx_ring, i);
1259 }
1260
1261 /**
1262  * i40e_receive_skb - Send a completed packet up the stack
1263  * @rx_ring:  rx ring in play
1264  * @skb: packet to send up
1265  * @vlan_tag: vlan tag for packet
1266  **/
1267 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1268                              struct sk_buff *skb, u16 vlan_tag)
1269 {
1270         struct i40e_q_vector *q_vector = rx_ring->q_vector;
1271         struct i40e_vsi *vsi = rx_ring->vsi;
1272         u64 flags = vsi->back->flags;
1273
1274         if (vlan_tag & VLAN_VID_MASK)
1275                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1276
1277         if (flags & I40E_FLAG_IN_NETPOLL)
1278                 netif_rx(skb);
1279         else
1280                 napi_gro_receive(&q_vector->napi, skb);
1281 }
1282
1283 /**
1284  * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1285  * @vsi: the VSI we care about
1286  * @skb: skb currently being received and modified
1287  * @rx_status: status value of last descriptor in packet
1288  * @rx_error: error value of last descriptor in packet
1289  * @rx_ptype: ptype value of last descriptor in packet
1290  **/
1291 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1292                                     struct sk_buff *skb,
1293                                     u32 rx_status,
1294                                     u32 rx_error,
1295                                     u16 rx_ptype)
1296 {
1297         struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
1298         bool ipv4 = false, ipv6 = false;
1299         bool ipv4_tunnel, ipv6_tunnel;
1300         __wsum rx_udp_csum;
1301         struct iphdr *iph;
1302         __sum16 csum;
1303
1304         ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1305                      (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1306         ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1307                      (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1308
1309         skb->ip_summed = CHECKSUM_NONE;
1310
1311         /* Rx csum enabled and ip headers found? */
1312         if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1313                 return;
1314
1315         /* did the hardware decode the packet and checksum? */
1316         if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1317                 return;
1318
1319         /* both known and outer_ip must be set for the below code to work */
1320         if (!(decoded.known && decoded.outer_ip))
1321                 return;
1322
1323         if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1324             decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
1325                 ipv4 = true;
1326         else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1327                  decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
1328                 ipv6 = true;
1329
1330         if (ipv4 &&
1331             (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1332                          BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1333                 goto checksum_fail;
1334
1335         /* likely incorrect csum if alternate IP extension headers found */
1336         if (ipv6 &&
1337             rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1338                 /* don't increment checksum err here, non-fatal err */
1339                 return;
1340
1341         /* there was some L4 error, count error and punt packet to the stack */
1342         if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1343                 goto checksum_fail;
1344
1345         /* handle packets that were not able to be checksummed due
1346          * to arrival speed, in this case the stack can compute
1347          * the csum.
1348          */
1349         if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1350                 return;
1351
1352         /* If VXLAN traffic has an outer UDPv4 checksum we need to check
1353          * it in the driver, hardware does not do it for us.
1354          * Since L3L4P bit was set we assume a valid IHL value (>=5)
1355          * so the total length of IPv4 header is IHL*4 bytes
1356          * The UDP_0 bit *may* bet set if the *inner* header is UDP
1357          */
1358         if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
1359             (ipv4_tunnel)) {
1360                 skb->transport_header = skb->mac_header +
1361                                         sizeof(struct ethhdr) +
1362                                         (ip_hdr(skb)->ihl * 4);
1363
1364                 /* Add 4 bytes for VLAN tagged packets */
1365                 skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1366                                           skb->protocol == htons(ETH_P_8021AD))
1367                                           ? VLAN_HLEN : 0;
1368
1369                 if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
1370                     (udp_hdr(skb)->check != 0)) {
1371                         rx_udp_csum = udp_csum(skb);
1372                         iph = ip_hdr(skb);
1373                         csum = csum_tcpudp_magic(
1374                                         iph->saddr, iph->daddr,
1375                                         (skb->len - skb_transport_offset(skb)),
1376                                         IPPROTO_UDP, rx_udp_csum);
1377
1378                         if (udp_hdr(skb)->check != csum)
1379                                 goto checksum_fail;
1380
1381                 } /* else its GRE and so no outer UDP header */
1382         }
1383
1384         skb->ip_summed = CHECKSUM_UNNECESSARY;
1385         skb->csum_level = ipv4_tunnel || ipv6_tunnel;
1386
1387         return;
1388
1389 checksum_fail:
1390         vsi->back->hw_csum_rx_error++;
1391 }
1392
1393 /**
1394  * i40e_rx_hash - returns the hash value from the Rx descriptor
1395  * @ring: descriptor ring
1396  * @rx_desc: specific descriptor
1397  **/
1398 static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1399                                union i40e_rx_desc *rx_desc)
1400 {
1401         const __le64 rss_mask =
1402                 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1403                             I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1404
1405         if ((ring->netdev->features & NETIF_F_RXHASH) &&
1406             (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1407                 return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1408         else
1409                 return 0;
1410 }
1411
1412 /**
1413  * i40e_ptype_to_hash - get a hash type
1414  * @ptype: the ptype value from the descriptor
1415  *
1416  * Returns a hash type to be used by skb_set_hash
1417  **/
1418 static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1419 {
1420         struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1421
1422         if (!decoded.known)
1423                 return PKT_HASH_TYPE_NONE;
1424
1425         if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1426             decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1427                 return PKT_HASH_TYPE_L4;
1428         else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1429                  decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1430                 return PKT_HASH_TYPE_L3;
1431         else
1432                 return PKT_HASH_TYPE_L2;
1433 }
1434
1435 /**
1436  * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
1437  * @rx_ring:  rx ring to clean
1438  * @budget:   how many cleans we're allowed
1439  *
1440  * Returns true if there's any budget left (e.g. the clean is finished)
1441  **/
1442 static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
1443 {
1444         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1445         u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1446         u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1447         const int current_node = numa_mem_id();
1448         struct i40e_vsi *vsi = rx_ring->vsi;
1449         u16 i = rx_ring->next_to_clean;
1450         union i40e_rx_desc *rx_desc;
1451         u32 rx_error, rx_status;
1452         u8 rx_ptype;
1453         u64 qword;
1454
1455         if (budget <= 0)
1456                 return 0;
1457
1458         do {
1459                 struct i40e_rx_buffer *rx_bi;
1460                 struct sk_buff *skb;
1461                 u16 vlan_tag;
1462                 /* return some buffers to hardware, one at a time is too slow */
1463                 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1464                         i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count);
1465                         cleaned_count = 0;
1466                 }
1467
1468                 i = rx_ring->next_to_clean;
1469                 rx_desc = I40E_RX_DESC(rx_ring, i);
1470                 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1471                 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1472                         I40E_RXD_QW1_STATUS_SHIFT;
1473
1474                 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1475                         break;
1476
1477                 /* This memory barrier is needed to keep us from reading
1478                  * any other fields out of the rx_desc until we know the
1479                  * DD bit is set.
1480                  */
1481                 dma_rmb();
1482                 if (i40e_rx_is_programming_status(qword)) {
1483                         i40e_clean_programming_status(rx_ring, rx_desc);
1484                         I40E_RX_INCREMENT(rx_ring, i);
1485                         continue;
1486                 }
1487                 rx_bi = &rx_ring->rx_bi[i];
1488                 skb = rx_bi->skb;
1489                 if (likely(!skb)) {
1490                         skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1491                                                         rx_ring->rx_hdr_len);
1492                         if (!skb) {
1493                                 rx_ring->rx_stats.alloc_buff_failed++;
1494                                 break;
1495                         }
1496
1497                         /* initialize queue mapping */
1498                         skb_record_rx_queue(skb, rx_ring->queue_index);
1499                         /* we are reusing so sync this buffer for CPU use */
1500                         dma_sync_single_range_for_cpu(rx_ring->dev,
1501                                                       rx_bi->dma,
1502                                                       0,
1503                                                       rx_ring->rx_hdr_len,
1504                                                       DMA_FROM_DEVICE);
1505                 }
1506                 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1507                                 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1508                 rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1509                                 I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1510                 rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1511                          I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1512
1513                 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1514                            I40E_RXD_QW1_ERROR_SHIFT;
1515                 rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1516                 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1517
1518                 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1519                            I40E_RXD_QW1_PTYPE_SHIFT;
1520                 prefetch(rx_bi->page);
1521                 rx_bi->skb = NULL;
1522                 cleaned_count++;
1523                 if (rx_hbo || rx_sph) {
1524                         int len;
1525
1526                         if (rx_hbo)
1527                                 len = I40E_RX_HDR_SIZE;
1528                         else
1529                                 len = rx_header_len;
1530                         memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
1531                 } else if (skb->len == 0) {
1532                         int len;
1533
1534                         len = (rx_packet_len > skb_headlen(skb) ?
1535                                 skb_headlen(skb) : rx_packet_len);
1536                         memcpy(__skb_put(skb, len),
1537                                rx_bi->page + rx_bi->page_offset,
1538                                len);
1539                         rx_bi->page_offset += len;
1540                         rx_packet_len -= len;
1541                 }
1542
1543                 /* Get the rest of the data if this was a header split */
1544                 if (rx_packet_len) {
1545                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1546                                            rx_bi->page,
1547                                            rx_bi->page_offset,
1548                                            rx_packet_len);
1549
1550                         skb->len += rx_packet_len;
1551                         skb->data_len += rx_packet_len;
1552                         skb->truesize += rx_packet_len;
1553
1554                         if ((page_count(rx_bi->page) == 1) &&
1555                             (page_to_nid(rx_bi->page) == current_node))
1556                                 get_page(rx_bi->page);
1557                         else
1558                                 rx_bi->page = NULL;
1559
1560                         dma_unmap_page(rx_ring->dev,
1561                                        rx_bi->page_dma,
1562                                        PAGE_SIZE / 2,
1563                                        DMA_FROM_DEVICE);
1564                         rx_bi->page_dma = 0;
1565                 }
1566                 I40E_RX_INCREMENT(rx_ring, i);
1567
1568                 if (unlikely(
1569                     !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1570                         struct i40e_rx_buffer *next_buffer;
1571
1572                         next_buffer = &rx_ring->rx_bi[i];
1573                         next_buffer->skb = skb;
1574                         rx_ring->rx_stats.non_eop_descs++;
1575                         continue;
1576                 }
1577
1578                 /* ERR_MASK will only have valid bits if EOP set */
1579                 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1580                         dev_kfree_skb_any(skb);
1581                         continue;
1582                 }
1583
1584                 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1585                              i40e_ptype_to_hash(rx_ptype));
1586                 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1587                         i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1588                                            I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1589                                            I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1590                         rx_ring->last_rx_timestamp = jiffies;
1591                 }
1592
1593                 /* probably a little skewed due to removing CRC */
1594                 total_rx_bytes += skb->len;
1595                 total_rx_packets++;
1596
1597                 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1598
1599                 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1600
1601                 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1602                          ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1603                          : 0;
1604 #ifdef I40E_FCOE
1605                 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1606                         dev_kfree_skb_any(skb);
1607                         continue;
1608                 }
1609 #endif
1610                 skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
1611                 i40e_receive_skb(rx_ring, skb, vlan_tag);
1612
1613                 rx_desc->wb.qword1.status_error_len = 0;
1614
1615         } while (likely(total_rx_packets < budget));
1616
1617         u64_stats_update_begin(&rx_ring->syncp);
1618         rx_ring->stats.packets += total_rx_packets;
1619         rx_ring->stats.bytes += total_rx_bytes;
1620         u64_stats_update_end(&rx_ring->syncp);
1621         rx_ring->q_vector->rx.total_packets += total_rx_packets;
1622         rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1623
1624         return total_rx_packets;
1625 }
1626
1627 /**
1628  * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
1629  * @rx_ring:  rx ring to clean
1630  * @budget:   how many cleans we're allowed
1631  *
1632  * Returns number of packets cleaned
1633  **/
1634 static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
1635 {
1636         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1637         u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1638         struct i40e_vsi *vsi = rx_ring->vsi;
1639         union i40e_rx_desc *rx_desc;
1640         u32 rx_error, rx_status;
1641         u16 rx_packet_len;
1642         u8 rx_ptype;
1643         u64 qword;
1644         u16 i;
1645
1646         do {
1647                 struct i40e_rx_buffer *rx_bi;
1648                 struct sk_buff *skb;
1649                 u16 vlan_tag;
1650                 /* return some buffers to hardware, one at a time is too slow */
1651                 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1652                         i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count);
1653                         cleaned_count = 0;
1654                 }
1655
1656                 i = rx_ring->next_to_clean;
1657                 rx_desc = I40E_RX_DESC(rx_ring, i);
1658                 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1659                 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1660                         I40E_RXD_QW1_STATUS_SHIFT;
1661
1662                 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
1663                         break;
1664
1665                 /* This memory barrier is needed to keep us from reading
1666                  * any other fields out of the rx_desc until we know the
1667                  * DD bit is set.
1668                  */
1669                 dma_rmb();
1670
1671                 if (i40e_rx_is_programming_status(qword)) {
1672                         i40e_clean_programming_status(rx_ring, rx_desc);
1673                         I40E_RX_INCREMENT(rx_ring, i);
1674                         continue;
1675                 }
1676                 rx_bi = &rx_ring->rx_bi[i];
1677                 skb = rx_bi->skb;
1678                 prefetch(skb->data);
1679
1680                 rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1681                                 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1682
1683                 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1684                            I40E_RXD_QW1_ERROR_SHIFT;
1685                 rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
1686
1687                 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1688                            I40E_RXD_QW1_PTYPE_SHIFT;
1689                 rx_bi->skb = NULL;
1690                 cleaned_count++;
1691
1692                 /* Get the header and possibly the whole packet
1693                  * If this is an skb from previous receive dma will be 0
1694                  */
1695                 skb_put(skb, rx_packet_len);
1696                 dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
1697                                  DMA_FROM_DEVICE);
1698                 rx_bi->dma = 0;
1699
1700                 I40E_RX_INCREMENT(rx_ring, i);
1701
1702                 if (unlikely(
1703                     !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1704                         rx_ring->rx_stats.non_eop_descs++;
1705                         continue;
1706                 }
1707
1708                 /* ERR_MASK will only have valid bits if EOP set */
1709                 if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1710                         dev_kfree_skb_any(skb);
1711                         continue;
1712                 }
1713
1714                 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1715                              i40e_ptype_to_hash(rx_ptype));
1716                 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1717                         i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1718                                            I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1719                                            I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1720                         rx_ring->last_rx_timestamp = jiffies;
1721                 }
1722
1723                 /* probably a little skewed due to removing CRC */
1724                 total_rx_bytes += skb->len;
1725                 total_rx_packets++;
1726
1727                 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1728
1729                 i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1730
1731                 vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1732                          ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1733                          : 0;
1734 #ifdef I40E_FCOE
1735                 if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
1736                         dev_kfree_skb_any(skb);
1737                         continue;
1738                 }
1739 #endif
1740                 i40e_receive_skb(rx_ring, skb, vlan_tag);
1741
1742                 rx_desc->wb.qword1.status_error_len = 0;
1743         } while (likely(total_rx_packets < budget));
1744
1745         u64_stats_update_begin(&rx_ring->syncp);
1746         rx_ring->stats.packets += total_rx_packets;
1747         rx_ring->stats.bytes += total_rx_bytes;
1748         u64_stats_update_end(&rx_ring->syncp);
1749         rx_ring->q_vector->rx.total_packets += total_rx_packets;
1750         rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1751
1752         return total_rx_packets;
1753 }
1754
1755 /**
1756  * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1757  * @vsi: the VSI we care about
1758  * @q_vector: q_vector for which itr is being updated and interrupt enabled
1759  *
1760  **/
1761 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
1762                                           struct i40e_q_vector *q_vector)
1763 {
1764         struct i40e_hw *hw = &vsi->back->hw;
1765         u16 old_itr;
1766         int vector;
1767         u32 val;
1768
1769         vector = (q_vector->v_idx + vsi->base_vector);
1770         if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
1771                 old_itr = q_vector->rx.itr;
1772                 i40e_set_new_dynamic_itr(&q_vector->rx);
1773                 if (old_itr != q_vector->rx.itr) {
1774                         val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1775                         I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1776                         (I40E_RX_ITR <<
1777                                 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1778                         (q_vector->rx.itr <<
1779                                 I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1780                 } else {
1781                         val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1782                         I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1783                         (I40E_ITR_NONE <<
1784                                 I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1785                 }
1786                 if (!test_bit(__I40E_DOWN, &vsi->state))
1787                         wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
1788         } else {
1789                 i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
1790         }
1791         if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
1792                 old_itr = q_vector->tx.itr;
1793                 i40e_set_new_dynamic_itr(&q_vector->tx);
1794                 if (old_itr != q_vector->tx.itr) {
1795                         val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1796                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1797                                 (I40E_TX_ITR <<
1798                                    I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
1799                                 (q_vector->tx.itr <<
1800                                    I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
1801                 } else {
1802                         val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1803                                 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1804                                 (I40E_ITR_NONE <<
1805                                    I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1806                 }
1807                 if (!test_bit(__I40E_DOWN, &vsi->state))
1808                         wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
1809                               vsi->base_vector - 1), val);
1810         } else {
1811                 i40e_irq_dynamic_enable(vsi, q_vector->v_idx);
1812         }
1813 }
1814
1815 /**
1816  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1817  * @napi: napi struct with our devices info in it
1818  * @budget: amount of work driver is allowed to do this pass, in packets
1819  *
1820  * This function will clean all queues associated with a q_vector.
1821  *
1822  * Returns the amount of work done
1823  **/
1824 int i40e_napi_poll(struct napi_struct *napi, int budget)
1825 {
1826         struct i40e_q_vector *q_vector =
1827                                container_of(napi, struct i40e_q_vector, napi);
1828         struct i40e_vsi *vsi = q_vector->vsi;
1829         struct i40e_ring *ring;
1830         bool clean_complete = true;
1831         bool arm_wb = false;
1832         int budget_per_ring;
1833         int cleaned;
1834
1835         if (test_bit(__I40E_DOWN, &vsi->state)) {
1836                 napi_complete(napi);
1837                 return 0;
1838         }
1839
1840         /* Since the actual Tx work is minimal, we can give the Tx a larger
1841          * budget and be more aggressive about cleaning up the Tx descriptors.
1842          */
1843         i40e_for_each_ring(ring, q_vector->tx) {
1844                 clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
1845                 arm_wb |= ring->arm_wb;
1846                 ring->arm_wb = false;
1847         }
1848
1849         /* Handle case where we are called by netpoll with a budget of 0 */
1850         if (budget <= 0)
1851                 goto tx_only;
1852
1853         /* We attempt to distribute budget to each Rx queue fairly, but don't
1854          * allow the budget to go below 1 because that would exit polling early.
1855          */
1856         budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
1857
1858         i40e_for_each_ring(ring, q_vector->rx) {
1859                 if (ring_is_ps_enabled(ring))
1860                         cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
1861                 else
1862                         cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
1863                 /* if we didn't clean as many as budgeted, we must be done */
1864                 clean_complete &= (budget_per_ring != cleaned);
1865         }
1866
1867         /* If work not completed, return budget and polling will return */
1868         if (!clean_complete) {
1869 tx_only:
1870                 if (arm_wb)
1871                         i40e_force_wb(vsi, q_vector);
1872                 return budget;
1873         }
1874
1875         if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
1876                 q_vector->arm_wb_state = false;
1877
1878         /* Work is done so exit the polling mode and re-enable the interrupt */
1879         napi_complete(napi);
1880         if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1881                 i40e_update_enable_itr(vsi, q_vector);
1882         } else { /* Legacy mode */
1883                 struct i40e_hw *hw = &vsi->back->hw;
1884                 /* We re-enable the queue 0 cause, but
1885                  * don't worry about dynamic_enable
1886                  * because we left it on for the other
1887                  * possible interrupts during napi
1888                  */
1889                 u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
1890                            I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1891
1892                 wr32(hw, I40E_QINT_RQCTL(0), qval);
1893                 qval = rd32(hw, I40E_QINT_TQCTL(0)) |
1894                        I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1895                 wr32(hw, I40E_QINT_TQCTL(0), qval);
1896                 i40e_irq_dynamic_enable_icr0(vsi->back);
1897         }
1898         return 0;
1899 }
1900
1901 /**
1902  * i40e_atr - Add a Flow Director ATR filter
1903  * @tx_ring:  ring to add programming descriptor to
1904  * @skb:      send buffer
1905  * @tx_flags: send tx flags
1906  * @protocol: wire protocol
1907  **/
1908 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1909                      u32 tx_flags, __be16 protocol)
1910 {
1911         struct i40e_filter_program_desc *fdir_desc;
1912         struct i40e_pf *pf = tx_ring->vsi->back;
1913         union {
1914                 unsigned char *network;
1915                 struct iphdr *ipv4;
1916                 struct ipv6hdr *ipv6;
1917         } hdr;
1918         struct tcphdr *th;
1919         unsigned int hlen;
1920         u32 flex_ptype, dtype_cmd;
1921         u16 i;
1922
1923         /* make sure ATR is enabled */
1924         if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
1925                 return;
1926
1927         if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1928                 return;
1929
1930         /* if sampling is disabled do nothing */
1931         if (!tx_ring->atr_sample_rate)
1932                 return;
1933
1934         if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
1935                 return;
1936
1937         if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
1938                 /* snag network header to get L4 type and address */
1939                 hdr.network = skb_network_header(skb);
1940
1941                 /* Currently only IPv4/IPv6 with TCP is supported
1942                  * access ihl as u8 to avoid unaligned access on ia64
1943                  */
1944                 if (tx_flags & I40E_TX_FLAGS_IPV4)
1945                         hlen = (hdr.network[0] & 0x0F) << 2;
1946                 else if (protocol == htons(ETH_P_IPV6))
1947                         hlen = sizeof(struct ipv6hdr);
1948                 else
1949                         return;
1950         } else {
1951                 hdr.network = skb_inner_network_header(skb);
1952                 hlen = skb_inner_network_header_len(skb);
1953         }
1954
1955         /* Currently only IPv4/IPv6 with TCP is supported
1956          * Note: tx_flags gets modified to reflect inner protocols in
1957          * tx_enable_csum function if encap is enabled.
1958          */
1959         if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
1960             (hdr.ipv4->protocol != IPPROTO_TCP))
1961                 return;
1962         else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
1963                  (hdr.ipv6->nexthdr != IPPROTO_TCP))
1964                 return;
1965
1966         th = (struct tcphdr *)(hdr.network + hlen);
1967
1968         /* Due to lack of space, no more new filters can be programmed */
1969         if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1970                 return;
1971         if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
1972                 /* HW ATR eviction will take care of removing filters on FIN
1973                  * and RST packets.
1974                  */
1975                 if (th->fin || th->rst)
1976                         return;
1977         }
1978
1979         tx_ring->atr_count++;
1980
1981         /* sample on all syn/fin/rst packets or once every atr sample rate */
1982         if (!th->fin &&
1983             !th->syn &&
1984             !th->rst &&
1985             (tx_ring->atr_count < tx_ring->atr_sample_rate))
1986                 return;
1987
1988         tx_ring->atr_count = 0;
1989
1990         /* grab the next descriptor */
1991         i = tx_ring->next_to_use;
1992         fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
1993
1994         i++;
1995         tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1996
1997         flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1998                       I40E_TXD_FLTR_QW0_QINDEX_MASK;
1999         flex_ptype |= (protocol == htons(ETH_P_IP)) ?
2000                       (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2001                        I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2002                       (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2003                        I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2004
2005         flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2006
2007         dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2008
2009         dtype_cmd |= (th->fin || th->rst) ?
2010                      (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2011                       I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2012                      (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2013                       I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2014
2015         dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2016                      I40E_TXD_FLTR_QW1_DEST_SHIFT;
2017
2018         dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2019                      I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2020
2021         dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2022         if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
2023                 dtype_cmd |=
2024                         ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2025                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2026                         I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2027         else
2028                 dtype_cmd |=
2029                         ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2030                         I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2031                         I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2032
2033         if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2034                 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2035
2036         fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2037         fdir_desc->rsvd = cpu_to_le32(0);
2038         fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2039         fdir_desc->fd_id = cpu_to_le32(0);
2040 }
2041
2042 /**
2043  * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2044  * @skb:     send buffer
2045  * @tx_ring: ring to send buffer on
2046  * @flags:   the tx flags to be set
2047  *
2048  * Checks the skb and set up correspondingly several generic transmit flags
2049  * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2050  *
2051  * Returns error code indicate the frame should be dropped upon error and the
2052  * otherwise  returns 0 to indicate the flags has been set properly.
2053  **/
2054 #ifdef I40E_FCOE
2055 inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2056                                       struct i40e_ring *tx_ring,
2057                                       u32 *flags)
2058 #else
2059 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2060                                              struct i40e_ring *tx_ring,
2061                                              u32 *flags)
2062 #endif
2063 {
2064         __be16 protocol = skb->protocol;
2065         u32  tx_flags = 0;
2066
2067         if (protocol == htons(ETH_P_8021Q) &&
2068             !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2069                 /* When HW VLAN acceleration is turned off by the user the
2070                  * stack sets the protocol to 8021q so that the driver
2071                  * can take any steps required to support the SW only
2072                  * VLAN handling.  In our case the driver doesn't need
2073                  * to take any further steps so just set the protocol
2074                  * to the encapsulated ethertype.
2075                  */
2076                 skb->protocol = vlan_get_protocol(skb);
2077                 goto out;
2078         }
2079
2080         /* if we have a HW VLAN tag being added, default to the HW one */
2081         if (skb_vlan_tag_present(skb)) {
2082                 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2083                 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2084         /* else if it is a SW VLAN, check the next protocol and store the tag */
2085         } else if (protocol == htons(ETH_P_8021Q)) {
2086                 struct vlan_hdr *vhdr, _vhdr;
2087
2088                 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2089                 if (!vhdr)
2090                         return -EINVAL;
2091
2092                 protocol = vhdr->h_vlan_encapsulated_proto;
2093                 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2094                 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2095         }
2096
2097         if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2098                 goto out;
2099
2100         /* Insert 802.1p priority into VLAN header */
2101         if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2102             (skb->priority != TC_PRIO_CONTROL)) {
2103                 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2104                 tx_flags |= (skb->priority & 0x7) <<
2105                                 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2106                 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2107                         struct vlan_ethhdr *vhdr;
2108                         int rc;
2109
2110                         rc = skb_cow_head(skb, 0);
2111                         if (rc < 0)
2112                                 return rc;
2113                         vhdr = (struct vlan_ethhdr *)skb->data;
2114                         vhdr->h_vlan_TCI = htons(tx_flags >>
2115                                                  I40E_TX_FLAGS_VLAN_SHIFT);
2116                 } else {
2117                         tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2118                 }
2119         }
2120
2121 out:
2122         *flags = tx_flags;
2123         return 0;
2124 }
2125
2126 /**
2127  * i40e_tso - set up the tso context descriptor
2128  * @tx_ring:  ptr to the ring to send
2129  * @skb:      ptr to the skb we're sending
2130  * @hdr_len:  ptr to the size of the packet header
2131  * @cd_tunneling: ptr to context descriptor bits
2132  *
2133  * Returns 0 if no TSO can happen, 1 if tso is going, or error
2134  **/
2135 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
2136                     u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
2137                     u32 *cd_tunneling)
2138 {
2139         u32 cd_cmd, cd_tso_len, cd_mss;
2140         struct ipv6hdr *ipv6h;
2141         struct tcphdr *tcph;
2142         struct iphdr *iph;
2143         u32 l4len;
2144         int err;
2145
2146         if (!skb_is_gso(skb))
2147                 return 0;
2148
2149         err = skb_cow_head(skb, 0);
2150         if (err < 0)
2151                 return err;
2152
2153         iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
2154         ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
2155
2156         if (iph->version == 4) {
2157                 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2158                 iph->tot_len = 0;
2159                 iph->check = 0;
2160                 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2161                                                  0, IPPROTO_TCP, 0);
2162         } else if (ipv6h->version == 6) {
2163                 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
2164                 ipv6h->payload_len = 0;
2165                 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
2166                                                0, IPPROTO_TCP, 0);
2167         }
2168
2169         l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
2170         *hdr_len = (skb->encapsulation
2171                     ? (skb_inner_transport_header(skb) - skb->data)
2172                     : skb_transport_offset(skb)) + l4len;
2173
2174         /* find the field values */
2175         cd_cmd = I40E_TX_CTX_DESC_TSO;
2176         cd_tso_len = skb->len - *hdr_len;
2177         cd_mss = skb_shinfo(skb)->gso_size;
2178         *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2179                                 ((u64)cd_tso_len <<
2180                                  I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2181                                 ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2182         return 1;
2183 }
2184
2185 /**
2186  * i40e_tsyn - set up the tsyn context descriptor
2187  * @tx_ring:  ptr to the ring to send
2188  * @skb:      ptr to the skb we're sending
2189  * @tx_flags: the collected send information
2190  *
2191  * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2192  **/
2193 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2194                      u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2195 {
2196         struct i40e_pf *pf;
2197
2198         if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2199                 return 0;
2200
2201         /* Tx timestamps cannot be sampled when doing TSO */
2202         if (tx_flags & I40E_TX_FLAGS_TSO)
2203                 return 0;
2204
2205         /* only timestamp the outbound packet if the user has requested it and
2206          * we are not already transmitting a packet to be timestamped
2207          */
2208         pf = i40e_netdev_to_pf(tx_ring->netdev);
2209         if (!(pf->flags & I40E_FLAG_PTP))
2210                 return 0;
2211
2212         if (pf->ptp_tx &&
2213             !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2214                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2215                 pf->ptp_tx_skb = skb_get(skb);
2216         } else {
2217                 return 0;
2218         }
2219
2220         *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2221                                 I40E_TXD_CTX_QW1_CMD_SHIFT;
2222
2223         return 1;
2224 }
2225
2226 /**
2227  * i40e_tx_enable_csum - Enable Tx checksum offloads
2228  * @skb: send buffer
2229  * @tx_flags: pointer to Tx flags currently set
2230  * @td_cmd: Tx descriptor command bits to set
2231  * @td_offset: Tx descriptor header offsets to set
2232  * @cd_tunneling: ptr to context desc bits
2233  **/
2234 static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2235                                 u32 *td_cmd, u32 *td_offset,
2236                                 struct i40e_ring *tx_ring,
2237                                 u32 *cd_tunneling)
2238 {
2239         struct ipv6hdr *this_ipv6_hdr;
2240         unsigned int this_tcp_hdrlen;
2241         struct iphdr *this_ip_hdr;
2242         u32 network_hdr_len;
2243         u8 l4_hdr = 0;
2244         struct udphdr *oudph;
2245         struct iphdr *oiph;
2246         u32 l4_tunnel = 0;
2247
2248         if (skb->encapsulation) {
2249                 switch (ip_hdr(skb)->protocol) {
2250                 case IPPROTO_UDP:
2251                         oudph = udp_hdr(skb);
2252                         oiph = ip_hdr(skb);
2253                         l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
2254                         *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
2255                         break;
2256                 case IPPROTO_GRE:
2257                         l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING;
2258                         break;
2259                 default:
2260                         return;
2261                 }
2262                 network_hdr_len = skb_inner_network_header_len(skb);
2263                 this_ip_hdr = inner_ip_hdr(skb);
2264                 this_ipv6_hdr = inner_ipv6_hdr(skb);
2265                 this_tcp_hdrlen = inner_tcp_hdrlen(skb);
2266
2267                 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2268                         if (*tx_flags & I40E_TX_FLAGS_TSO) {
2269                                 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
2270                                 ip_hdr(skb)->check = 0;
2271                         } else {
2272                                 *cd_tunneling |=
2273                                          I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2274                         }
2275                 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2276                         *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
2277                         if (*tx_flags & I40E_TX_FLAGS_TSO)
2278                                 ip_hdr(skb)->check = 0;
2279                 }
2280
2281                 /* Now set the ctx descriptor fields */
2282                 *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
2283                                    I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT      |
2284                                    l4_tunnel                             |
2285                                    ((skb_inner_network_offset(skb) -
2286                                         skb_transport_offset(skb)) >> 1) <<
2287                                    I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2288                 if (this_ip_hdr->version == 6) {
2289                         *tx_flags &= ~I40E_TX_FLAGS_IPV4;
2290                         *tx_flags |= I40E_TX_FLAGS_IPV6;
2291                 }
2292                 if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
2293                     (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING)        &&
2294                     (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
2295                         oudph->check = ~csum_tcpudp_magic(oiph->saddr,
2296                                         oiph->daddr,
2297                                         (skb->len - skb_transport_offset(skb)),
2298                                         IPPROTO_UDP, 0);
2299                         *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2300                 }
2301         } else {
2302                 network_hdr_len = skb_network_header_len(skb);
2303                 this_ip_hdr = ip_hdr(skb);
2304                 this_ipv6_hdr = ipv6_hdr(skb);
2305                 this_tcp_hdrlen = tcp_hdrlen(skb);
2306         }
2307
2308         /* Enable IP checksum offloads */
2309         if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2310                 l4_hdr = this_ip_hdr->protocol;
2311                 /* the stack computes the IP header already, the only time we
2312                  * need the hardware to recompute it is in the case of TSO.
2313                  */
2314                 if (*tx_flags & I40E_TX_FLAGS_TSO) {
2315                         *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
2316                         this_ip_hdr->check = 0;
2317                 } else {
2318                         *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
2319                 }
2320                 /* Now set the td_offset for IP header length */
2321                 *td_offset = (network_hdr_len >> 2) <<
2322                               I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2323         } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2324                 l4_hdr = this_ipv6_hdr->nexthdr;
2325                 *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2326                 /* Now set the td_offset for IP header length */
2327                 *td_offset = (network_hdr_len >> 2) <<
2328                               I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2329         }
2330         /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
2331         *td_offset |= (skb_network_offset(skb) >> 1) <<
2332                        I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2333
2334         /* Enable L4 checksum offloads */
2335         switch (l4_hdr) {
2336         case IPPROTO_TCP:
2337                 /* enable checksum offloads */
2338                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2339                 *td_offset |= (this_tcp_hdrlen >> 2) <<
2340                                I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2341                 break;
2342         case IPPROTO_SCTP:
2343                 /* enable SCTP checksum offload */
2344                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2345                 *td_offset |= (sizeof(struct sctphdr) >> 2) <<
2346                                I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2347                 break;
2348         case IPPROTO_UDP:
2349                 /* enable UDP checksum offload */
2350                 *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2351                 *td_offset |= (sizeof(struct udphdr) >> 2) <<
2352                                I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2353                 break;
2354         default:
2355                 break;
2356         }
2357 }
2358
2359 /**
2360  * i40e_create_tx_ctx Build the Tx context descriptor
2361  * @tx_ring:  ring to create the descriptor on
2362  * @cd_type_cmd_tso_mss: Quad Word 1
2363  * @cd_tunneling: Quad Word 0 - bits 0-31
2364  * @cd_l2tag2: Quad Word 0 - bits 32-63
2365  **/
2366 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2367                                const u64 cd_type_cmd_tso_mss,
2368                                const u32 cd_tunneling, const u32 cd_l2tag2)
2369 {
2370         struct i40e_tx_context_desc *context_desc;
2371         int i = tx_ring->next_to_use;
2372
2373         if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2374             !cd_tunneling && !cd_l2tag2)
2375                 return;
2376
2377         /* grab the next descriptor */
2378         context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2379
2380         i++;
2381         tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2382
2383         /* cpu_to_le32 and assign to struct fields */
2384         context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2385         context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2386         context_desc->rsvd = cpu_to_le16(0);
2387         context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2388 }
2389
2390 /**
2391  * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2392  * @tx_ring: the ring to be checked
2393  * @size:    the size buffer we want to assure is available
2394  *
2395  * Returns -EBUSY if a stop is needed, else 0
2396  **/
2397 static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2398 {
2399         netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2400         /* Memory barrier before checking head and tail */
2401         smp_mb();
2402
2403         /* Check again in a case another CPU has just made room available. */
2404         if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2405                 return -EBUSY;
2406
2407         /* A reprieve! - use start_queue because it doesn't call schedule */
2408         netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2409         ++tx_ring->tx_stats.restart_queue;
2410         return 0;
2411 }
2412
2413 /**
2414  * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2415  * @tx_ring: the ring to be checked
2416  * @size:    the size buffer we want to assure is available
2417  *
2418  * Returns 0 if stop is not needed
2419  **/
2420 #ifdef I40E_FCOE
2421 inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2422 #else
2423 static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2424 #endif
2425 {
2426         if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2427                 return 0;
2428         return __i40e_maybe_stop_tx(tx_ring, size);
2429 }
2430
2431 /**
2432  * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2433  * @skb:      send buffer
2434  * @tx_flags: collected send information
2435  *
2436  * Note: Our HW can't scatter-gather more than 8 fragments to build
2437  * a packet on the wire and so we need to figure out the cases where we
2438  * need to linearize the skb.
2439  **/
2440 static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
2441 {
2442         struct skb_frag_struct *frag;
2443         bool linearize = false;
2444         unsigned int size = 0;
2445         u16 num_frags;
2446         u16 gso_segs;
2447
2448         num_frags = skb_shinfo(skb)->nr_frags;
2449         gso_segs = skb_shinfo(skb)->gso_segs;
2450
2451         if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2452                 u16 j = 0;
2453
2454                 if (num_frags < (I40E_MAX_BUFFER_TXD))
2455                         goto linearize_chk_done;
2456                 /* try the simple math, if we have too many frags per segment */
2457                 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2458                     I40E_MAX_BUFFER_TXD) {
2459                         linearize = true;
2460                         goto linearize_chk_done;
2461                 }
2462                 frag = &skb_shinfo(skb)->frags[0];
2463                 /* we might still have more fragments per segment */
2464                 do {
2465                         size += skb_frag_size(frag);
2466                         frag++; j++;
2467                         if ((size >= skb_shinfo(skb)->gso_size) &&
2468                             (j < I40E_MAX_BUFFER_TXD)) {
2469                                 size = (size % skb_shinfo(skb)->gso_size);
2470                                 j = (size) ? 1 : 0;
2471                         }
2472                         if (j == I40E_MAX_BUFFER_TXD) {
2473                                 linearize = true;
2474                                 break;
2475                         }
2476                         num_frags--;
2477                 } while (num_frags);
2478         } else {
2479                 if (num_frags >= I40E_MAX_BUFFER_TXD)
2480                         linearize = true;
2481         }
2482
2483 linearize_chk_done:
2484         return linearize;
2485 }
2486
2487 /**
2488  * i40e_tx_map - Build the Tx descriptor
2489  * @tx_ring:  ring to send buffer on
2490  * @skb:      send buffer
2491  * @first:    first buffer info buffer to use
2492  * @tx_flags: collected send information
2493  * @hdr_len:  size of the packet header
2494  * @td_cmd:   the command field in the descriptor
2495  * @td_offset: offset for checksum or crc
2496  **/
2497 #ifdef I40E_FCOE
2498 inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2499                         struct i40e_tx_buffer *first, u32 tx_flags,
2500                         const u8 hdr_len, u32 td_cmd, u32 td_offset)
2501 #else
2502 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2503                                struct i40e_tx_buffer *first, u32 tx_flags,
2504                                const u8 hdr_len, u32 td_cmd, u32 td_offset)
2505 #endif
2506 {
2507         unsigned int data_len = skb->data_len;
2508         unsigned int size = skb_headlen(skb);
2509         struct skb_frag_struct *frag;
2510         struct i40e_tx_buffer *tx_bi;
2511         struct i40e_tx_desc *tx_desc;
2512         u16 i = tx_ring->next_to_use;
2513         u32 td_tag = 0;
2514         dma_addr_t dma;
2515         u16 gso_segs;
2516         u16 desc_count = 0;
2517         bool tail_bump = true;
2518         bool do_rs = false;
2519
2520         if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2521                 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2522                 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2523                          I40E_TX_FLAGS_VLAN_SHIFT;
2524         }
2525
2526         if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2527                 gso_segs = skb_shinfo(skb)->gso_segs;
2528         else
2529                 gso_segs = 1;
2530
2531         /* multiply data chunks by size of headers */
2532         first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2533         first->gso_segs = gso_segs;
2534         first->skb = skb;
2535         first->tx_flags = tx_flags;
2536
2537         dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2538
2539         tx_desc = I40E_TX_DESC(tx_ring, i);
2540         tx_bi = first;
2541
2542         for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2543                 if (dma_mapping_error(tx_ring->dev, dma))
2544                         goto dma_error;
2545
2546                 /* record length, and DMA address */
2547                 dma_unmap_len_set(tx_bi, len, size);
2548                 dma_unmap_addr_set(tx_bi, dma, dma);
2549
2550                 tx_desc->buffer_addr = cpu_to_le64(dma);
2551
2552                 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2553                         tx_desc->cmd_type_offset_bsz =
2554                                 build_ctob(td_cmd, td_offset,
2555                                            I40E_MAX_DATA_PER_TXD, td_tag);
2556
2557                         tx_desc++;
2558                         i++;
2559                         desc_count++;
2560
2561                         if (i == tx_ring->count) {
2562                                 tx_desc = I40E_TX_DESC(tx_ring, 0);
2563                                 i = 0;
2564                         }
2565
2566                         dma += I40E_MAX_DATA_PER_TXD;
2567                         size -= I40E_MAX_DATA_PER_TXD;
2568
2569                         tx_desc->buffer_addr = cpu_to_le64(dma);
2570                 }
2571
2572                 if (likely(!data_len))
2573                         break;
2574
2575                 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2576                                                           size, td_tag);
2577
2578                 tx_desc++;
2579                 i++;
2580                 desc_count++;
2581
2582                 if (i == tx_ring->count) {
2583                         tx_desc = I40E_TX_DESC(tx_ring, 0);
2584                         i = 0;
2585                 }
2586
2587                 size = skb_frag_size(frag);
2588                 data_len -= size;
2589
2590                 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2591                                        DMA_TO_DEVICE);
2592
2593                 tx_bi = &tx_ring->tx_bi[i];
2594         }
2595
2596         /* set next_to_watch value indicating a packet is present */
2597         first->next_to_watch = tx_desc;
2598
2599         i++;
2600         if (i == tx_ring->count)
2601                 i = 0;
2602
2603         tx_ring->next_to_use = i;
2604
2605         netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2606                                                  tx_ring->queue_index),
2607                                                  first->bytecount);
2608         i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2609
2610         /* Algorithm to optimize tail and RS bit setting:
2611          * if xmit_more is supported
2612          *      if xmit_more is true
2613          *              do not update tail and do not mark RS bit.
2614          *      if xmit_more is false and last xmit_more was false
2615          *              if every packet spanned less than 4 desc
2616          *                      then set RS bit on 4th packet and update tail
2617          *                      on every packet
2618          *              else
2619          *                      update tail and set RS bit on every packet.
2620          *      if xmit_more is false and last_xmit_more was true
2621          *              update tail and set RS bit.
2622          *
2623          * Optimization: wmb to be issued only in case of tail update.
2624          * Also optimize the Descriptor WB path for RS bit with the same
2625          * algorithm.
2626          *
2627          * Note: If there are less than 4 packets
2628          * pending and interrupts were disabled the service task will
2629          * trigger a force WB.
2630          */
2631         if (skb->xmit_more  &&
2632             !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2633                                                     tx_ring->queue_index))) {
2634                 tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2635                 tail_bump = false;
2636         } else if (!skb->xmit_more &&
2637                    !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
2638                                                        tx_ring->queue_index)) &&
2639                    (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
2640                    (tx_ring->packet_stride < WB_STRIDE) &&
2641                    (desc_count < WB_STRIDE)) {
2642                 tx_ring->packet_stride++;
2643         } else {
2644                 tx_ring->packet_stride = 0;
2645                 tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
2646                 do_rs = true;
2647         }
2648         if (do_rs)
2649                 tx_ring->packet_stride = 0;
2650
2651         tx_desc->cmd_type_offset_bsz =
2652                         build_ctob(td_cmd, td_offset, size, td_tag) |
2653                         cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
2654                                                   I40E_TX_DESC_CMD_EOP) <<
2655                                                   I40E_TXD_QW1_CMD_SHIFT);
2656
2657         /* notify HW of packet */
2658         if (!tail_bump)
2659                 prefetchw(tx_desc + 1);
2660
2661         if (tail_bump) {
2662                 /* Force memory writes to complete before letting h/w
2663                  * know there are new descriptors to fetch.  (Only
2664                  * applicable for weak-ordered memory model archs,
2665                  * such as IA-64).
2666                  */
2667                 wmb();
2668                 writel(i, tx_ring->tail);
2669         }
2670
2671         return;
2672
2673 dma_error:
2674         dev_info(tx_ring->dev, "TX DMA map failed\n");
2675
2676         /* clear dma mappings for failed tx_bi map */
2677         for (;;) {
2678                 tx_bi = &tx_ring->tx_bi[i];
2679                 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2680                 if (tx_bi == first)
2681                         break;
2682                 if (i == 0)
2683                         i = tx_ring->count;
2684                 i--;
2685         }
2686
2687         tx_ring->next_to_use = i;
2688 }
2689
2690 /**
2691  * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2692  * @skb:     send buffer
2693  * @tx_ring: ring to send buffer on
2694  *
2695  * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2696  * there is not enough descriptors available in this ring since we need at least
2697  * one descriptor.
2698  **/
2699 #ifdef I40E_FCOE
2700 inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2701                                       struct i40e_ring *tx_ring)
2702 #else
2703 static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
2704                                              struct i40e_ring *tx_ring)
2705 #endif
2706 {
2707         unsigned int f;
2708         int count = 0;
2709
2710         /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2711          *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2712          *       + 4 desc gap to avoid the cache line where head is,
2713          *       + 1 desc for context descriptor,
2714          * otherwise try next time
2715          */
2716         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2717                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2718
2719         count += TXD_USE_COUNT(skb_headlen(skb));
2720         if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2721                 tx_ring->tx_stats.tx_busy++;
2722                 return 0;
2723         }
2724         return count;
2725 }
2726
2727 /**
2728  * i40e_xmit_frame_ring - Sends buffer on Tx ring
2729  * @skb:     send buffer
2730  * @tx_ring: ring to send buffer on
2731  *
2732  * Returns NETDEV_TX_OK if sent, else an error code
2733  **/
2734 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2735                                         struct i40e_ring *tx_ring)
2736 {
2737         u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2738         u32 cd_tunneling = 0, cd_l2tag2 = 0;
2739         struct i40e_tx_buffer *first;
2740         u32 td_offset = 0;
2741         u32 tx_flags = 0;
2742         __be16 protocol;
2743         u32 td_cmd = 0;
2744         u8 hdr_len = 0;
2745         int tsyn;
2746         int tso;
2747
2748         if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2749                 return NETDEV_TX_BUSY;
2750
2751         /* prepare the xmit flags */
2752         if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2753                 goto out_drop;
2754
2755         /* obtain protocol of skb */
2756         protocol = vlan_get_protocol(skb);
2757
2758         /* record the location of the first descriptor for this packet */
2759         first = &tx_ring->tx_bi[tx_ring->next_to_use];
2760
2761         /* setup IPv4/IPv6 offloads */
2762         if (protocol == htons(ETH_P_IP))
2763                 tx_flags |= I40E_TX_FLAGS_IPV4;
2764         else if (protocol == htons(ETH_P_IPV6))
2765                 tx_flags |= I40E_TX_FLAGS_IPV6;
2766
2767         tso = i40e_tso(tx_ring, skb, &hdr_len,
2768                        &cd_type_cmd_tso_mss, &cd_tunneling);
2769
2770         if (tso < 0)
2771                 goto out_drop;
2772         else if (tso)
2773                 tx_flags |= I40E_TX_FLAGS_TSO;
2774
2775         tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2776
2777         if (tsyn)
2778                 tx_flags |= I40E_TX_FLAGS_TSYN;
2779
2780         if (i40e_chk_linearize(skb, tx_flags)) {
2781                 if (skb_linearize(skb))
2782                         goto out_drop;
2783                 tx_ring->tx_stats.tx_linearize++;
2784         }
2785         skb_tx_timestamp(skb);
2786
2787         /* always enable CRC insertion offload */
2788         td_cmd |= I40E_TX_DESC_CMD_ICRC;
2789
2790         /* Always offload the checksum, since it's in the data descriptor */
2791         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2792                 tx_flags |= I40E_TX_FLAGS_CSUM;
2793
2794                 i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
2795                                     tx_ring, &cd_tunneling);
2796         }
2797
2798         i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2799                            cd_tunneling, cd_l2tag2);
2800
2801         /* Add Flow Director ATR if it's enabled.
2802          *
2803          * NOTE: this must always be directly before the data descriptor.
2804          */
2805         i40e_atr(tx_ring, skb, tx_flags, protocol);
2806
2807         i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2808                     td_cmd, td_offset);
2809
2810         return NETDEV_TX_OK;
2811
2812 out_drop:
2813         dev_kfree_skb_any(skb);
2814         return NETDEV_TX_OK;
2815 }
2816
2817 /**
2818  * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2819  * @skb:    send buffer
2820  * @netdev: network interface device structure
2821  *
2822  * Returns NETDEV_TX_OK if sent, else an error code
2823  **/
2824 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2825 {
2826         struct i40e_netdev_priv *np = netdev_priv(netdev);
2827         struct i40e_vsi *vsi = np->vsi;
2828         struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2829
2830         /* hardware can't handle really short frames, hardware padding works
2831          * beyond this point
2832          */
2833         if (skb_put_padto(skb, I40E_MIN_TX_LEN))
2834                 return NETDEV_TX_OK;
2835
2836         return i40e_xmit_frame_ring(skb, tx_ring);
2837 }