1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
30 #include "i40e_prototype.h"
32 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
35 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
36 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
37 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
38 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
39 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
42 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
44 * i40e_fdir - Generate a Flow Director descriptor based on fdata
45 * @tx_ring: Tx ring to send buffer on
46 * @fdata: Flow director filter data
47 * @add: Indicate if we are adding a rule or deleting one
50 static void i40e_fdir(struct i40e_ring *tx_ring,
51 struct i40e_fdir_filter *fdata, bool add)
53 struct i40e_filter_program_desc *fdir_desc;
54 struct i40e_pf *pf = tx_ring->vsi->back;
55 u32 flex_ptype, dtype_cmd;
58 /* grab the next descriptor */
59 i = tx_ring->next_to_use;
60 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
63 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
65 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
66 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
68 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
69 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
71 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
72 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
74 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
75 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
77 /* Use LAN VSI Id if not programmed by user */
78 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
79 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
80 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
82 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
85 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
86 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
87 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
88 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
90 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
91 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
93 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
94 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
96 if (fdata->cnt_index) {
97 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
98 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
99 ((u32)fdata->cnt_index <<
100 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
103 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
104 fdir_desc->rsvd = cpu_to_le32(0);
105 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
106 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
109 #define I40E_FD_CLEAN_DELAY 10
111 * i40e_program_fdir_filter - Program a Flow Director filter
112 * @fdir_data: Packet data that will be filter parameters
113 * @raw_packet: the pre-allocated packet buffer for FDir
114 * @pf: The PF pointer
115 * @add: True for add/update, False for remove
117 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
118 u8 *raw_packet, struct i40e_pf *pf,
121 struct i40e_tx_buffer *tx_buf, *first;
122 struct i40e_tx_desc *tx_desc;
123 struct i40e_ring *tx_ring;
124 struct i40e_vsi *vsi;
130 /* find existing FDIR VSI */
131 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
135 tx_ring = vsi->tx_rings[0];
138 /* we need two descriptors to add/del a filter and we can wait */
139 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
142 msleep_interruptible(1);
145 dma = dma_map_single(dev, raw_packet,
146 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
147 if (dma_mapping_error(dev, dma))
150 /* grab the next descriptor */
151 i = tx_ring->next_to_use;
152 first = &tx_ring->tx_bi[i];
153 i40e_fdir(tx_ring, fdir_data, add);
155 /* Now program a dummy descriptor */
156 i = tx_ring->next_to_use;
157 tx_desc = I40E_TX_DESC(tx_ring, i);
158 tx_buf = &tx_ring->tx_bi[i];
160 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
162 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
164 /* record length, and DMA address */
165 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
166 dma_unmap_addr_set(tx_buf, dma, dma);
168 tx_desc->buffer_addr = cpu_to_le64(dma);
169 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
171 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
172 tx_buf->raw_buf = (void *)raw_packet;
174 tx_desc->cmd_type_offset_bsz =
175 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
177 /* Force memory writes to complete before letting h/w
178 * know there are new descriptors to fetch.
182 /* Mark the data descriptor to be watched */
183 first->next_to_watch = tx_desc;
185 writel(tx_ring->next_to_use, tx_ring->tail);
192 #define IP_HEADER_OFFSET 14
193 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
195 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
196 * @vsi: pointer to the targeted VSI
197 * @fd_data: the flow director data required for the FDir descriptor
198 * @add: true adds a filter, false removes it
200 * Returns 0 if the filters were successfully added or removed
202 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
203 struct i40e_fdir_filter *fd_data,
206 struct i40e_pf *pf = vsi->back;
211 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
212 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
213 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
215 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
218 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
220 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
221 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
222 + sizeof(struct iphdr));
224 ip->daddr = fd_data->dst_ip;
225 udp->dest = fd_data->dst_port;
226 ip->saddr = fd_data->src_ip;
227 udp->source = fd_data->src_port;
229 if (fd_data->flex_filter) {
230 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
231 __be16 pattern = fd_data->flex_word;
232 u16 off = fd_data->flex_offset;
234 *((__force __be16 *)(payload + off)) = pattern;
237 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
238 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
240 dev_info(&pf->pdev->dev,
241 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
242 fd_data->pctype, fd_data->fd_id, ret);
243 /* Free the packet buffer since it wasn't added to the ring */
246 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
248 dev_info(&pf->pdev->dev,
249 "Filter OK for PCTYPE %d loc = %d\n",
250 fd_data->pctype, fd_data->fd_id);
252 dev_info(&pf->pdev->dev,
253 "Filter deleted for PCTYPE %d loc = %d\n",
254 fd_data->pctype, fd_data->fd_id);
258 pf->fd_udp4_filter_cnt++;
260 pf->fd_udp4_filter_cnt--;
265 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
267 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
268 * @vsi: pointer to the targeted VSI
269 * @fd_data: the flow director data required for the FDir descriptor
270 * @add: true adds a filter, false removes it
272 * Returns 0 if the filters were successfully added or removed
274 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
275 struct i40e_fdir_filter *fd_data,
278 struct i40e_pf *pf = vsi->back;
284 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
285 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
286 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
287 0x0, 0x72, 0, 0, 0, 0};
289 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
292 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
294 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
295 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
296 + sizeof(struct iphdr));
298 ip->daddr = fd_data->dst_ip;
299 tcp->dest = fd_data->dst_port;
300 ip->saddr = fd_data->src_ip;
301 tcp->source = fd_data->src_port;
303 if (fd_data->flex_filter) {
304 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
305 __be16 pattern = fd_data->flex_word;
306 u16 off = fd_data->flex_offset;
308 *((__force __be16 *)(payload + off)) = pattern;
311 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
312 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
314 dev_info(&pf->pdev->dev,
315 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
316 fd_data->pctype, fd_data->fd_id, ret);
317 /* Free the packet buffer since it wasn't added to the ring */
320 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
322 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
323 fd_data->pctype, fd_data->fd_id);
325 dev_info(&pf->pdev->dev,
326 "Filter deleted for PCTYPE %d loc = %d\n",
327 fd_data->pctype, fd_data->fd_id);
331 pf->fd_tcp4_filter_cnt++;
332 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
333 I40E_DEBUG_FD & pf->hw.debug_mask)
334 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
335 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
337 pf->fd_tcp4_filter_cnt--;
338 if (pf->fd_tcp4_filter_cnt == 0) {
339 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
340 I40E_DEBUG_FD & pf->hw.debug_mask)
341 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
342 pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
349 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
351 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
352 * a specific flow spec
353 * @vsi: pointer to the targeted VSI
354 * @fd_data: the flow director data required for the FDir descriptor
355 * @add: true adds a filter, false removes it
357 * Returns 0 if the filters were successfully added or removed
359 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
360 struct i40e_fdir_filter *fd_data,
363 struct i40e_pf *pf = vsi->back;
364 struct sctphdr *sctp;
369 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
370 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
371 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
373 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
376 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
378 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
379 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
380 + sizeof(struct iphdr));
382 ip->daddr = fd_data->dst_ip;
383 sctp->dest = fd_data->dst_port;
384 ip->saddr = fd_data->src_ip;
385 sctp->source = fd_data->src_port;
387 if (fd_data->flex_filter) {
388 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
389 __be16 pattern = fd_data->flex_word;
390 u16 off = fd_data->flex_offset;
392 *((__force __be16 *)(payload + off)) = pattern;
395 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
396 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
398 dev_info(&pf->pdev->dev,
399 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
400 fd_data->pctype, fd_data->fd_id, ret);
401 /* Free the packet buffer since it wasn't added to the ring */
404 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
406 dev_info(&pf->pdev->dev,
407 "Filter OK for PCTYPE %d loc = %d\n",
408 fd_data->pctype, fd_data->fd_id);
410 dev_info(&pf->pdev->dev,
411 "Filter deleted for PCTYPE %d loc = %d\n",
412 fd_data->pctype, fd_data->fd_id);
416 pf->fd_sctp4_filter_cnt++;
418 pf->fd_sctp4_filter_cnt--;
423 #define I40E_IP_DUMMY_PACKET_LEN 34
425 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
426 * a specific flow spec
427 * @vsi: pointer to the targeted VSI
428 * @fd_data: the flow director data required for the FDir descriptor
429 * @add: true adds a filter, false removes it
431 * Returns 0 if the filters were successfully added or removed
433 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
434 struct i40e_fdir_filter *fd_data,
437 struct i40e_pf *pf = vsi->back;
442 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
443 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
446 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
447 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
448 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
451 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
452 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
454 ip->saddr = fd_data->src_ip;
455 ip->daddr = fd_data->dst_ip;
458 if (fd_data->flex_filter) {
459 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
460 __be16 pattern = fd_data->flex_word;
461 u16 off = fd_data->flex_offset;
463 *((__force __be16 *)(payload + off)) = pattern;
467 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
469 dev_info(&pf->pdev->dev,
470 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
471 fd_data->pctype, fd_data->fd_id, ret);
472 /* The packet buffer wasn't added to the ring so we
473 * need to free it now.
477 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
479 dev_info(&pf->pdev->dev,
480 "Filter OK for PCTYPE %d loc = %d\n",
481 fd_data->pctype, fd_data->fd_id);
483 dev_info(&pf->pdev->dev,
484 "Filter deleted for PCTYPE %d loc = %d\n",
485 fd_data->pctype, fd_data->fd_id);
490 pf->fd_ip4_filter_cnt++;
492 pf->fd_ip4_filter_cnt--;
498 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
499 * @vsi: pointer to the targeted VSI
500 * @cmd: command to get or set RX flow classification rules
501 * @add: true adds a filter, false removes it
504 int i40e_add_del_fdir(struct i40e_vsi *vsi,
505 struct i40e_fdir_filter *input, bool add)
507 struct i40e_pf *pf = vsi->back;
510 switch (input->flow_type & ~FLOW_EXT) {
512 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
515 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
518 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
521 switch (input->ip4_proto) {
523 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
526 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
529 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
532 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
535 /* We cannot support masking based on protocol */
536 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
542 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
547 /* The buffer allocated here will be normally be freed by
548 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
549 * completion. In the event of an error adding the buffer to the FDIR
550 * ring, it will immediately be freed. It may also be freed by
551 * i40e_clean_tx_ring() when closing the VSI.
557 * i40e_fd_handle_status - check the Programming Status for FD
558 * @rx_ring: the Rx ring for this descriptor
559 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
560 * @prog_id: the id originally used for programming
562 * This is used to verify if the FD programming or invalidation
563 * requested by SW to the HW is successful or not and take actions accordingly.
565 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
566 union i40e_rx_desc *rx_desc, u8 prog_id)
568 struct i40e_pf *pf = rx_ring->vsi->back;
569 struct pci_dev *pdev = pf->pdev;
570 u32 fcnt_prog, fcnt_avail;
574 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
575 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
576 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
578 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
579 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
580 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
581 (I40E_DEBUG_FD & pf->hw.debug_mask))
582 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
585 /* Check if the programming error is for ATR.
586 * If so, auto disable ATR and set a state for
587 * flush in progress. Next time we come here if flush is in
588 * progress do nothing, once flush is complete the state will
591 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
595 /* store the current atr filter count */
596 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
598 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
599 (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
600 pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
601 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
604 /* filter programming failed most likely due to table full */
605 fcnt_prog = i40e_get_global_fd_count(pf);
606 fcnt_avail = pf->fdir_pf_filter_count;
607 /* If ATR is running fcnt_prog can quickly change,
608 * if we are very close to full, it makes sense to disable
609 * FD ATR/SB and then re-enable it when there is room.
611 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
612 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
613 !(pf->hw_disabled_flags &
614 I40E_FLAG_FD_SB_ENABLED)) {
615 if (I40E_DEBUG_FD & pf->hw.debug_mask)
616 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
617 pf->hw_disabled_flags |=
618 I40E_FLAG_FD_SB_ENABLED;
621 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
622 if (I40E_DEBUG_FD & pf->hw.debug_mask)
623 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
624 rx_desc->wb.qword0.hi_dword.fd_id);
629 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
630 * @ring: the ring that owns the buffer
631 * @tx_buffer: the buffer to free
633 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
634 struct i40e_tx_buffer *tx_buffer)
636 if (tx_buffer->skb) {
637 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
638 kfree(tx_buffer->raw_buf);
640 dev_kfree_skb_any(tx_buffer->skb);
641 if (dma_unmap_len(tx_buffer, len))
642 dma_unmap_single(ring->dev,
643 dma_unmap_addr(tx_buffer, dma),
644 dma_unmap_len(tx_buffer, len),
646 } else if (dma_unmap_len(tx_buffer, len)) {
647 dma_unmap_page(ring->dev,
648 dma_unmap_addr(tx_buffer, dma),
649 dma_unmap_len(tx_buffer, len),
653 tx_buffer->next_to_watch = NULL;
654 tx_buffer->skb = NULL;
655 dma_unmap_len_set(tx_buffer, len, 0);
656 /* tx_buffer must be completely set up in the transmit path */
660 * i40e_clean_tx_ring - Free any empty Tx buffers
661 * @tx_ring: ring to be cleaned
663 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
665 unsigned long bi_size;
668 /* ring already cleared, nothing to do */
672 /* Free all the Tx ring sk_buffs */
673 for (i = 0; i < tx_ring->count; i++)
674 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
676 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
677 memset(tx_ring->tx_bi, 0, bi_size);
679 /* Zero out the descriptor ring */
680 memset(tx_ring->desc, 0, tx_ring->size);
682 tx_ring->next_to_use = 0;
683 tx_ring->next_to_clean = 0;
685 if (!tx_ring->netdev)
688 /* cleanup Tx queue statistics */
689 netdev_tx_reset_queue(txring_txq(tx_ring));
693 * i40e_free_tx_resources - Free Tx resources per queue
694 * @tx_ring: Tx descriptor ring for a specific queue
696 * Free all transmit software resources
698 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
700 i40e_clean_tx_ring(tx_ring);
701 kfree(tx_ring->tx_bi);
702 tx_ring->tx_bi = NULL;
705 dma_free_coherent(tx_ring->dev, tx_ring->size,
706 tx_ring->desc, tx_ring->dma);
707 tx_ring->desc = NULL;
712 * i40e_get_tx_pending - how many tx descriptors not processed
713 * @tx_ring: the ring of descriptors
715 * Since there is no access to the ring head register
716 * in XL710, we need to use our local copies
718 u32 i40e_get_tx_pending(struct i40e_ring *ring)
722 head = i40e_get_head(ring);
723 tail = readl(ring->tail);
726 return (head < tail) ?
727 tail - head : (tail + ring->count - head);
735 * i40e_clean_tx_irq - Reclaim resources after transmit completes
736 * @vsi: the VSI we care about
737 * @tx_ring: Tx ring to clean
738 * @napi_budget: Used to determine if we are in netpoll
740 * Returns true if there's any budget left (e.g. the clean is finished)
742 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
743 struct i40e_ring *tx_ring, int napi_budget)
745 u16 i = tx_ring->next_to_clean;
746 struct i40e_tx_buffer *tx_buf;
747 struct i40e_tx_desc *tx_head;
748 struct i40e_tx_desc *tx_desc;
749 unsigned int total_bytes = 0, total_packets = 0;
750 unsigned int budget = vsi->work_limit;
752 tx_buf = &tx_ring->tx_bi[i];
753 tx_desc = I40E_TX_DESC(tx_ring, i);
756 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
759 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
761 /* if next_to_watch is not set then there is no work pending */
765 /* prevent any other reads prior to eop_desc */
766 read_barrier_depends();
768 /* we have caught up to head, no work left to do */
769 if (tx_head == tx_desc)
772 /* clear next_to_watch to prevent false hangs */
773 tx_buf->next_to_watch = NULL;
775 /* update the statistics for this packet */
776 total_bytes += tx_buf->bytecount;
777 total_packets += tx_buf->gso_segs;
780 napi_consume_skb(tx_buf->skb, napi_budget);
782 /* unmap skb header data */
783 dma_unmap_single(tx_ring->dev,
784 dma_unmap_addr(tx_buf, dma),
785 dma_unmap_len(tx_buf, len),
788 /* clear tx_buffer data */
790 dma_unmap_len_set(tx_buf, len, 0);
792 /* unmap remaining buffers */
793 while (tx_desc != eop_desc) {
800 tx_buf = tx_ring->tx_bi;
801 tx_desc = I40E_TX_DESC(tx_ring, 0);
804 /* unmap any remaining paged data */
805 if (dma_unmap_len(tx_buf, len)) {
806 dma_unmap_page(tx_ring->dev,
807 dma_unmap_addr(tx_buf, dma),
808 dma_unmap_len(tx_buf, len),
810 dma_unmap_len_set(tx_buf, len, 0);
814 /* move us one more past the eop_desc for start of next pkt */
820 tx_buf = tx_ring->tx_bi;
821 tx_desc = I40E_TX_DESC(tx_ring, 0);
826 /* update budget accounting */
828 } while (likely(budget));
831 tx_ring->next_to_clean = i;
832 u64_stats_update_begin(&tx_ring->syncp);
833 tx_ring->stats.bytes += total_bytes;
834 tx_ring->stats.packets += total_packets;
835 u64_stats_update_end(&tx_ring->syncp);
836 tx_ring->q_vector->tx.total_bytes += total_bytes;
837 tx_ring->q_vector->tx.total_packets += total_packets;
839 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
840 /* check to see if there are < 4 descriptors
841 * waiting to be written back, then kick the hardware to force
842 * them to be written back in case we stay in NAPI.
843 * In this mode on X722 we do not enable Interrupt.
845 unsigned int j = i40e_get_tx_pending(tx_ring);
848 ((j / WB_STRIDE) == 0) && (j > 0) &&
849 !test_bit(__I40E_DOWN, &vsi->state) &&
850 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
851 tx_ring->arm_wb = true;
854 /* notify netdev of completed buffers */
855 netdev_tx_completed_queue(txring_txq(tx_ring),
856 total_packets, total_bytes);
858 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
859 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
860 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
861 /* Make sure that anybody stopping the queue after this
862 * sees the new next_to_clean.
865 if (__netif_subqueue_stopped(tx_ring->netdev,
866 tx_ring->queue_index) &&
867 !test_bit(__I40E_DOWN, &vsi->state)) {
868 netif_wake_subqueue(tx_ring->netdev,
869 tx_ring->queue_index);
870 ++tx_ring->tx_stats.restart_queue;
878 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
879 * @vsi: the VSI we care about
880 * @q_vector: the vector on which to enable writeback
883 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
884 struct i40e_q_vector *q_vector)
886 u16 flags = q_vector->tx.ring[0].flags;
889 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
892 if (q_vector->arm_wb_state)
895 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
896 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
897 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
900 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
903 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
904 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
906 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
908 q_vector->arm_wb_state = true;
912 * i40e_force_wb - Issue SW Interrupt so HW does a wb
913 * @vsi: the VSI we care about
914 * @q_vector: the vector on which to force writeback
917 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
919 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
920 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
921 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
922 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
923 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
924 /* allow 00 to be written to the index */
927 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
928 vsi->base_vector - 1), val);
930 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
931 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
932 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
933 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
934 /* allow 00 to be written to the index */
936 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
941 * i40e_set_new_dynamic_itr - Find new ITR level
942 * @rc: structure containing ring performance data
944 * Returns true if ITR changed, false if not
946 * Stores a new ITR value based on packets and byte counts during
947 * the last interrupt. The advantage of per interrupt computation
948 * is faster updates and more accurate ITR for the current traffic
949 * pattern. Constants in this function were computed based on
950 * theoretical maximum wire speed and thresholds were set based on
951 * testing data as well as attempting to minimize response time
952 * while increasing bulk throughput.
954 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
956 enum i40e_latency_range new_latency_range = rc->latency_range;
957 struct i40e_q_vector *qv = rc->ring->q_vector;
958 u32 new_itr = rc->itr;
962 if (rc->total_packets == 0 || !rc->itr)
965 /* simple throttlerate management
966 * 0-10MB/s lowest (50000 ints/s)
967 * 10-20MB/s low (20000 ints/s)
968 * 20-1249MB/s bulk (18000 ints/s)
969 * > 40000 Rx packets per second (8000 ints/s)
971 * The math works out because the divisor is in 10^(-6) which
972 * turns the bytes/us input value into MB/s values, but
973 * make sure to use usecs, as the register values written
974 * are in 2 usec increments in the ITR registers, and make sure
975 * to use the smoothed values that the countdown timer gives us.
977 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
978 bytes_per_int = rc->total_bytes / usecs;
980 switch (new_latency_range) {
981 case I40E_LOWEST_LATENCY:
982 if (bytes_per_int > 10)
983 new_latency_range = I40E_LOW_LATENCY;
985 case I40E_LOW_LATENCY:
986 if (bytes_per_int > 20)
987 new_latency_range = I40E_BULK_LATENCY;
988 else if (bytes_per_int <= 10)
989 new_latency_range = I40E_LOWEST_LATENCY;
991 case I40E_BULK_LATENCY:
992 case I40E_ULTRA_LATENCY:
994 if (bytes_per_int <= 20)
995 new_latency_range = I40E_LOW_LATENCY;
999 /* this is to adjust RX more aggressively when streaming small
1000 * packets. The value of 40000 was picked as it is just beyond
1001 * what the hardware can receive per second if in low latency
1004 #define RX_ULTRA_PACKET_RATE 40000
1006 if ((((rc->total_packets * 1000000) / usecs) > RX_ULTRA_PACKET_RATE) &&
1008 new_latency_range = I40E_ULTRA_LATENCY;
1010 rc->latency_range = new_latency_range;
1012 switch (new_latency_range) {
1013 case I40E_LOWEST_LATENCY:
1014 new_itr = I40E_ITR_50K;
1016 case I40E_LOW_LATENCY:
1017 new_itr = I40E_ITR_20K;
1019 case I40E_BULK_LATENCY:
1020 new_itr = I40E_ITR_18K;
1022 case I40E_ULTRA_LATENCY:
1023 new_itr = I40E_ITR_8K;
1029 rc->total_bytes = 0;
1030 rc->total_packets = 0;
1032 if (new_itr != rc->itr) {
1041 * i40e_clean_programming_status - clean the programming status descriptor
1042 * @rx_ring: the rx ring that has this descriptor
1043 * @rx_desc: the rx descriptor written back by HW
1045 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1046 * status being successful or not and take actions accordingly. FCoE should
1047 * handle its context/filter programming/invalidation status and take actions.
1050 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1051 union i40e_rx_desc *rx_desc)
1056 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1057 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1058 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1060 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1061 i40e_fd_handle_status(rx_ring, rx_desc, id);
1065 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1066 * @tx_ring: the tx ring to set up
1068 * Return 0 on success, negative on error
1070 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1072 struct device *dev = tx_ring->dev;
1078 /* warn if we are about to overwrite the pointer */
1079 WARN_ON(tx_ring->tx_bi);
1080 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1081 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1082 if (!tx_ring->tx_bi)
1085 /* round up to nearest 4K */
1086 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1087 /* add u32 for head writeback, align after this takes care of
1088 * guaranteeing this is at least one cache line in size
1090 tx_ring->size += sizeof(u32);
1091 tx_ring->size = ALIGN(tx_ring->size, 4096);
1092 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1093 &tx_ring->dma, GFP_KERNEL);
1094 if (!tx_ring->desc) {
1095 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1100 tx_ring->next_to_use = 0;
1101 tx_ring->next_to_clean = 0;
1105 kfree(tx_ring->tx_bi);
1106 tx_ring->tx_bi = NULL;
1111 * i40e_clean_rx_ring - Free Rx buffers
1112 * @rx_ring: ring to be cleaned
1114 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1116 unsigned long bi_size;
1119 /* ring already cleared, nothing to do */
1120 if (!rx_ring->rx_bi)
1124 dev_kfree_skb(rx_ring->skb);
1125 rx_ring->skb = NULL;
1128 /* Free all the Rx ring sk_buffs */
1129 for (i = 0; i < rx_ring->count; i++) {
1130 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1135 /* Invalidate cache lines that may have been written to by
1136 * device so that we avoid corrupting memory.
1138 dma_sync_single_range_for_cpu(rx_ring->dev,
1144 /* free resources associated with mapping */
1145 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1149 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1152 rx_bi->page_offset = 0;
1155 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1156 memset(rx_ring->rx_bi, 0, bi_size);
1158 /* Zero out the descriptor ring */
1159 memset(rx_ring->desc, 0, rx_ring->size);
1161 rx_ring->next_to_alloc = 0;
1162 rx_ring->next_to_clean = 0;
1163 rx_ring->next_to_use = 0;
1167 * i40e_free_rx_resources - Free Rx resources
1168 * @rx_ring: ring to clean the resources from
1170 * Free all receive software resources
1172 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1174 i40e_clean_rx_ring(rx_ring);
1175 kfree(rx_ring->rx_bi);
1176 rx_ring->rx_bi = NULL;
1178 if (rx_ring->desc) {
1179 dma_free_coherent(rx_ring->dev, rx_ring->size,
1180 rx_ring->desc, rx_ring->dma);
1181 rx_ring->desc = NULL;
1186 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1187 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1189 * Returns 0 on success, negative on failure
1191 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1193 struct device *dev = rx_ring->dev;
1196 /* warn if we are about to overwrite the pointer */
1197 WARN_ON(rx_ring->rx_bi);
1198 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1199 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1200 if (!rx_ring->rx_bi)
1203 u64_stats_init(&rx_ring->syncp);
1205 /* Round up to nearest 4K */
1206 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1207 rx_ring->size = ALIGN(rx_ring->size, 4096);
1208 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1209 &rx_ring->dma, GFP_KERNEL);
1211 if (!rx_ring->desc) {
1212 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1217 rx_ring->next_to_alloc = 0;
1218 rx_ring->next_to_clean = 0;
1219 rx_ring->next_to_use = 0;
1223 kfree(rx_ring->rx_bi);
1224 rx_ring->rx_bi = NULL;
1229 * i40e_release_rx_desc - Store the new tail and head values
1230 * @rx_ring: ring to bump
1231 * @val: new head index
1233 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1235 rx_ring->next_to_use = val;
1237 /* update next to alloc since we have filled the ring */
1238 rx_ring->next_to_alloc = val;
1240 /* Force memory writes to complete before letting h/w
1241 * know there are new descriptors to fetch. (Only
1242 * applicable for weak-ordered memory model archs,
1246 writel(val, rx_ring->tail);
1250 * i40e_alloc_mapped_page - recycle or make a new page
1251 * @rx_ring: ring to use
1252 * @bi: rx_buffer struct to modify
1254 * Returns true if the page was successfully allocated or
1257 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1258 struct i40e_rx_buffer *bi)
1260 struct page *page = bi->page;
1263 /* since we are recycling buffers we should seldom need to alloc */
1265 rx_ring->rx_stats.page_reuse_count++;
1269 /* alloc new page for storage */
1270 page = dev_alloc_page();
1271 if (unlikely(!page)) {
1272 rx_ring->rx_stats.alloc_page_failed++;
1276 /* map page for use */
1277 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1282 /* if mapping failed free memory back to system since
1283 * there isn't much point in holding memory we can't use
1285 if (dma_mapping_error(rx_ring->dev, dma)) {
1286 __free_pages(page, 0);
1287 rx_ring->rx_stats.alloc_page_failed++;
1293 bi->page_offset = 0;
1295 /* initialize pagecnt_bias to 1 representing we fully own page */
1296 bi->pagecnt_bias = 1;
1302 * i40e_receive_skb - Send a completed packet up the stack
1303 * @rx_ring: rx ring in play
1304 * @skb: packet to send up
1305 * @vlan_tag: vlan tag for packet
1307 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1308 struct sk_buff *skb, u16 vlan_tag)
1310 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1312 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1313 (vlan_tag & VLAN_VID_MASK))
1314 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1316 napi_gro_receive(&q_vector->napi, skb);
1320 * i40e_alloc_rx_buffers - Replace used receive buffers
1321 * @rx_ring: ring to place buffers on
1322 * @cleaned_count: number of buffers to replace
1324 * Returns false if all allocations were successful, true if any fail
1326 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1328 u16 ntu = rx_ring->next_to_use;
1329 union i40e_rx_desc *rx_desc;
1330 struct i40e_rx_buffer *bi;
1332 /* do nothing if no valid netdev defined */
1333 if (!rx_ring->netdev || !cleaned_count)
1336 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1337 bi = &rx_ring->rx_bi[ntu];
1340 if (!i40e_alloc_mapped_page(rx_ring, bi))
1343 /* sync the buffer for use by the device */
1344 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1349 /* Refresh the desc even if buffer_addrs didn't change
1350 * because each write-back erases this info.
1352 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1357 if (unlikely(ntu == rx_ring->count)) {
1358 rx_desc = I40E_RX_DESC(rx_ring, 0);
1359 bi = rx_ring->rx_bi;
1363 /* clear the status bits for the next_to_use descriptor */
1364 rx_desc->wb.qword1.status_error_len = 0;
1367 } while (cleaned_count);
1369 if (rx_ring->next_to_use != ntu)
1370 i40e_release_rx_desc(rx_ring, ntu);
1375 if (rx_ring->next_to_use != ntu)
1376 i40e_release_rx_desc(rx_ring, ntu);
1378 /* make sure to come back via polling to try again after
1379 * allocation failure
1385 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1386 * @vsi: the VSI we care about
1387 * @skb: skb currently being received and modified
1388 * @rx_desc: the receive descriptor
1390 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1391 struct sk_buff *skb,
1392 union i40e_rx_desc *rx_desc)
1394 struct i40e_rx_ptype_decoded decoded;
1395 u32 rx_error, rx_status;
1400 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1401 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1402 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1403 I40E_RXD_QW1_ERROR_SHIFT;
1404 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1405 I40E_RXD_QW1_STATUS_SHIFT;
1406 decoded = decode_rx_desc_ptype(ptype);
1408 skb->ip_summed = CHECKSUM_NONE;
1410 skb_checksum_none_assert(skb);
1412 /* Rx csum enabled and ip headers found? */
1413 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1416 /* did the hardware decode the packet and checksum? */
1417 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1420 /* both known and outer_ip must be set for the below code to work */
1421 if (!(decoded.known && decoded.outer_ip))
1424 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1425 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1426 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1427 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1430 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1431 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1434 /* likely incorrect csum if alternate IP extension headers found */
1436 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1437 /* don't increment checksum err here, non-fatal err */
1440 /* there was some L4 error, count error and punt packet to the stack */
1441 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1444 /* handle packets that were not able to be checksummed due
1445 * to arrival speed, in this case the stack can compute
1448 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1451 /* If there is an outer header present that might contain a checksum
1452 * we need to bump the checksum level by 1 to reflect the fact that
1453 * we are indicating we validated the inner checksum.
1455 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1456 skb->csum_level = 1;
1458 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1459 switch (decoded.inner_prot) {
1460 case I40E_RX_PTYPE_INNER_PROT_TCP:
1461 case I40E_RX_PTYPE_INNER_PROT_UDP:
1462 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1463 skb->ip_summed = CHECKSUM_UNNECESSARY;
1472 vsi->back->hw_csum_rx_error++;
1476 * i40e_ptype_to_htype - get a hash type
1477 * @ptype: the ptype value from the descriptor
1479 * Returns a hash type to be used by skb_set_hash
1481 static inline int i40e_ptype_to_htype(u8 ptype)
1483 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1486 return PKT_HASH_TYPE_NONE;
1488 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1489 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1490 return PKT_HASH_TYPE_L4;
1491 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1492 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1493 return PKT_HASH_TYPE_L3;
1495 return PKT_HASH_TYPE_L2;
1499 * i40e_rx_hash - set the hash value in the skb
1500 * @ring: descriptor ring
1501 * @rx_desc: specific descriptor
1503 static inline void i40e_rx_hash(struct i40e_ring *ring,
1504 union i40e_rx_desc *rx_desc,
1505 struct sk_buff *skb,
1509 const __le64 rss_mask =
1510 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1511 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1513 if (!(ring->netdev->features & NETIF_F_RXHASH))
1516 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1517 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1518 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1523 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1524 * @rx_ring: rx descriptor ring packet is being transacted on
1525 * @rx_desc: pointer to the EOP Rx descriptor
1526 * @skb: pointer to current skb being populated
1527 * @rx_ptype: the packet type decoded by hardware
1529 * This function checks the ring, descriptor, and packet information in
1530 * order to populate the hash, checksum, VLAN, protocol, and
1531 * other fields within the skb.
1534 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1535 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1538 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1539 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1540 I40E_RXD_QW1_STATUS_SHIFT;
1541 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1542 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1543 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1545 if (unlikely(tsynvalid))
1546 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1548 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1550 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1552 skb_record_rx_queue(skb, rx_ring->queue_index);
1554 /* modifies the skb - consumes the enet header */
1555 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1559 * i40e_cleanup_headers - Correct empty headers
1560 * @rx_ring: rx descriptor ring packet is being transacted on
1561 * @skb: pointer to current skb being fixed
1563 * Also address the case where we are pulling data in on pages only
1564 * and as such no data is present in the skb header.
1566 * In addition if skb is not at least 60 bytes we need to pad it so that
1567 * it is large enough to qualify as a valid Ethernet frame.
1569 * Returns true if an error was encountered and skb was freed.
1571 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
1573 /* if eth_skb_pad returns an error the skb was freed */
1574 if (eth_skb_pad(skb))
1581 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1582 * @rx_ring: rx descriptor ring to store buffers on
1583 * @old_buff: donor buffer to have page reused
1585 * Synchronizes page for reuse by the adapter
1587 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1588 struct i40e_rx_buffer *old_buff)
1590 struct i40e_rx_buffer *new_buff;
1591 u16 nta = rx_ring->next_to_alloc;
1593 new_buff = &rx_ring->rx_bi[nta];
1595 /* update, and store next to alloc */
1597 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1599 /* transfer page from old buffer to new buffer */
1600 new_buff->dma = old_buff->dma;
1601 new_buff->page = old_buff->page;
1602 new_buff->page_offset = old_buff->page_offset;
1603 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1607 * i40e_page_is_reusable - check if any reuse is possible
1608 * @page: page struct to check
1610 * A page is not reusable if it was allocated under low memory
1611 * conditions, or it's not in the same NUMA node as this CPU.
1613 static inline bool i40e_page_is_reusable(struct page *page)
1615 return (page_to_nid(page) == numa_mem_id()) &&
1616 !page_is_pfmemalloc(page);
1620 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1621 * the adapter for another receive
1623 * @rx_buffer: buffer containing the page
1625 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1626 * an unused region in the page.
1628 * For small pages, @truesize will be a constant value, half the size
1629 * of the memory at page. We'll attempt to alternate between high and
1630 * low halves of the page, with one half ready for use by the hardware
1631 * and the other half being consumed by the stack. We use the page
1632 * ref count to determine whether the stack has finished consuming the
1633 * portion of this page that was passed up with a previous packet. If
1634 * the page ref count is >1, we'll assume the "other" half page is
1635 * still busy, and this page cannot be reused.
1637 * For larger pages, @truesize will be the actual space used by the
1638 * received packet (adjusted upward to an even multiple of the cache
1639 * line size). This will advance through the page by the amount
1640 * actually consumed by the received packets while there is still
1641 * space for a buffer. Each region of larger pages will be used at
1642 * most once, after which the page will not be reused.
1644 * In either case, if the page is reusable its refcount is increased.
1646 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1648 #if (PAGE_SIZE >= 8192)
1649 unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
1651 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1652 struct page *page = rx_buffer->page;
1654 /* Is any reuse possible? */
1655 if (unlikely(!i40e_page_is_reusable(page)))
1658 #if (PAGE_SIZE < 8192)
1659 /* if we are only owner of page we can reuse it */
1660 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1663 if (rx_buffer->page_offset > last_offset)
1667 /* If we have drained the page fragment pool we need to update
1668 * the pagecnt_bias and page count so that we fully restock the
1669 * number of references the driver holds.
1671 if (unlikely(!pagecnt_bias)) {
1672 page_ref_add(page, USHRT_MAX);
1673 rx_buffer->pagecnt_bias = USHRT_MAX;
1680 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1681 * @rx_ring: rx descriptor ring to transact packets on
1682 * @rx_buffer: buffer containing page to add
1683 * @skb: sk_buff to place the data into
1684 * @size: packet length from rx_desc
1686 * This function will add the data contained in rx_buffer->page to the skb.
1687 * It will just attach the page as a frag to the skb.
1689 * The function will then update the page offset.
1691 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1692 struct i40e_rx_buffer *rx_buffer,
1693 struct sk_buff *skb,
1696 #if (PAGE_SIZE < 8192)
1697 unsigned int truesize = I40E_RXBUFFER_2048;
1699 unsigned int truesize = SKB_DATA_ALIGN(size);
1702 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1703 rx_buffer->page_offset, size, truesize);
1705 /* page is being used so we must update the page offset */
1706 #if (PAGE_SIZE < 8192)
1707 rx_buffer->page_offset ^= truesize;
1709 rx_buffer->page_offset += truesize;
1714 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1715 * @rx_ring: rx descriptor ring to transact packets on
1716 * @size: size of buffer to add to skb
1718 * This function will pull an Rx buffer from the ring and synchronize it
1719 * for use by the CPU.
1721 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1722 const unsigned int size)
1724 struct i40e_rx_buffer *rx_buffer;
1726 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1727 prefetchw(rx_buffer->page);
1729 /* we are reusing so sync this buffer for CPU use */
1730 dma_sync_single_range_for_cpu(rx_ring->dev,
1732 rx_buffer->page_offset,
1736 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1737 rx_buffer->pagecnt_bias--;
1743 * i40e_construct_skb - Allocate skb and populate it
1744 * @rx_ring: rx descriptor ring to transact packets on
1745 * @rx_buffer: rx buffer to pull data from
1746 * @size: size of buffer to add to skb
1748 * This function allocates an skb. It then populates it with the page
1749 * data from the current receive descriptor, taking care to set up the
1752 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1753 struct i40e_rx_buffer *rx_buffer,
1756 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1757 #if (PAGE_SIZE < 8192)
1758 unsigned int truesize = I40E_RXBUFFER_2048;
1760 unsigned int truesize = SKB_DATA_ALIGN(size);
1762 unsigned int headlen;
1763 struct sk_buff *skb;
1765 /* prefetch first cache line of first page */
1767 #if L1_CACHE_BYTES < 128
1768 prefetch(va + L1_CACHE_BYTES);
1771 /* allocate a skb to store the frags */
1772 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1774 GFP_ATOMIC | __GFP_NOWARN);
1778 /* Determine available headroom for copy */
1780 if (headlen > I40E_RX_HDR_SIZE)
1781 headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
1783 /* align pull length to size of long to optimize memcpy performance */
1784 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1786 /* update all of the pointers */
1789 skb_add_rx_frag(skb, 0, rx_buffer->page,
1790 rx_buffer->page_offset + headlen,
1793 /* buffer is used by skb, update page_offset */
1794 #if (PAGE_SIZE < 8192)
1795 rx_buffer->page_offset ^= truesize;
1797 rx_buffer->page_offset += truesize;
1800 /* buffer is unused, reset bias back to rx_buffer */
1801 rx_buffer->pagecnt_bias++;
1808 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
1809 * @rx_ring: rx descriptor ring to transact packets on
1810 * @rx_buffer: rx buffer to pull data from
1812 * This function will clean up the contents of the rx_buffer. It will
1813 * either recycle the bufer or unmap it and free the associated resources.
1815 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
1816 struct i40e_rx_buffer *rx_buffer)
1818 if (i40e_can_reuse_rx_page(rx_buffer)) {
1819 /* hand second half of page back to the ring */
1820 i40e_reuse_rx_page(rx_ring, rx_buffer);
1821 rx_ring->rx_stats.page_reuse_count++;
1823 /* we are not reusing the buffer so unmap it */
1824 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
1825 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
1826 __page_frag_cache_drain(rx_buffer->page,
1827 rx_buffer->pagecnt_bias);
1830 /* clear contents of buffer_info */
1831 rx_buffer->page = NULL;
1835 * i40e_is_non_eop - process handling of non-EOP buffers
1836 * @rx_ring: Rx ring being processed
1837 * @rx_desc: Rx descriptor for current buffer
1838 * @skb: Current socket buffer containing buffer in progress
1840 * This function updates next to clean. If the buffer is an EOP buffer
1841 * this function exits returning false, otherwise it will place the
1842 * sk_buff in the next buffer to be chained and return true indicating
1843 * that this is in fact a non-EOP buffer.
1845 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1846 union i40e_rx_desc *rx_desc,
1847 struct sk_buff *skb)
1849 u32 ntc = rx_ring->next_to_clean + 1;
1851 /* fetch, update, and store next to clean */
1852 ntc = (ntc < rx_ring->count) ? ntc : 0;
1853 rx_ring->next_to_clean = ntc;
1855 prefetch(I40E_RX_DESC(rx_ring, ntc));
1857 #define staterrlen rx_desc->wb.qword1.status_error_len
1858 if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
1859 i40e_clean_programming_status(rx_ring, rx_desc);
1862 /* if we are the last buffer then there is nothing else to do */
1863 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1864 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1867 rx_ring->rx_stats.non_eop_descs++;
1873 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1874 * @rx_ring: rx descriptor ring to transact packets on
1875 * @budget: Total limit on number of packets to process
1877 * This function provides a "bounce buffer" approach to Rx interrupt
1878 * processing. The advantage to this is that on systems that have
1879 * expensive overhead for IOMMU access this provides a means of avoiding
1880 * it by maintaining the mapping of the page to the system.
1882 * Returns amount of work completed
1884 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1886 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1887 struct sk_buff *skb = rx_ring->skb;
1888 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1889 bool failure = false;
1891 while (likely(total_rx_packets < budget)) {
1892 struct i40e_rx_buffer *rx_buffer;
1893 union i40e_rx_desc *rx_desc;
1899 /* return some buffers to hardware, one at a time is too slow */
1900 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1901 failure = failure ||
1902 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1906 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
1908 /* status_error_len will always be zero for unused descriptors
1909 * because it's cleared in cleanup, and overlaps with hdr_addr
1910 * which is always zero because packet split isn't used, if the
1911 * hardware wrote DD then the length will be non-zero
1913 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1914 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1915 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1919 /* This memory barrier is needed to keep us from reading
1920 * any other fields out of the rx_desc until we have
1921 * verified the descriptor has been written back.
1925 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
1927 /* retrieve a buffer from the ring */
1929 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
1931 skb = i40e_construct_skb(rx_ring, rx_buffer, size);
1933 /* exit if we failed to retrieve a buffer */
1935 rx_ring->rx_stats.alloc_buff_failed++;
1936 rx_buffer->pagecnt_bias++;
1940 i40e_put_rx_buffer(rx_ring, rx_buffer);
1943 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
1946 /* ERR_MASK will only have valid bits if EOP set, and
1947 * what we are doing here is actually checking
1948 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1951 if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1952 dev_kfree_skb_any(skb);
1957 if (i40e_cleanup_headers(rx_ring, skb)) {
1962 /* probably a little skewed due to removing CRC */
1963 total_rx_bytes += skb->len;
1965 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1966 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1967 I40E_RXD_QW1_PTYPE_SHIFT;
1969 /* populate checksum, VLAN, and protocol */
1970 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1972 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
1973 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
1975 i40e_receive_skb(rx_ring, skb, vlan_tag);
1978 /* update budget accounting */
1984 u64_stats_update_begin(&rx_ring->syncp);
1985 rx_ring->stats.packets += total_rx_packets;
1986 rx_ring->stats.bytes += total_rx_bytes;
1987 u64_stats_update_end(&rx_ring->syncp);
1988 rx_ring->q_vector->rx.total_packets += total_rx_packets;
1989 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1991 /* guarantee a trip back through this routine if there was a failure */
1992 return failure ? budget : total_rx_packets;
1995 static u32 i40e_buildreg_itr(const int type, const u16 itr)
1999 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2000 /* Don't clear PBA because that can cause lost interrupts that
2001 * came in while we were cleaning/polling
2003 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2004 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
2009 /* a small macro to shorten up some long lines */
2010 #define INTREG I40E_PFINT_DYN_CTLN
2011 static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
2013 return vsi->rx_rings[idx]->rx_itr_setting;
2016 static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
2018 return vsi->tx_rings[idx]->tx_itr_setting;
2022 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2023 * @vsi: the VSI we care about
2024 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2027 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2028 struct i40e_q_vector *q_vector)
2030 struct i40e_hw *hw = &vsi->back->hw;
2031 bool rx = false, tx = false;
2034 int idx = q_vector->v_idx;
2035 int rx_itr_setting, tx_itr_setting;
2037 vector = (q_vector->v_idx + vsi->base_vector);
2039 /* avoid dynamic calculation if in countdown mode OR if
2040 * all dynamic is disabled
2042 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2044 rx_itr_setting = get_rx_itr(vsi, idx);
2045 tx_itr_setting = get_tx_itr(vsi, idx);
2047 if (q_vector->itr_countdown > 0 ||
2048 (!ITR_IS_DYNAMIC(rx_itr_setting) &&
2049 !ITR_IS_DYNAMIC(tx_itr_setting))) {
2053 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
2054 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
2055 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
2058 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
2059 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
2060 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
2064 /* get the higher of the two ITR adjustments and
2065 * use the same value for both ITR registers
2066 * when in adaptive mode (Rx and/or Tx)
2068 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
2070 q_vector->tx.itr = q_vector->rx.itr = itr;
2071 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
2073 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
2077 /* only need to enable the interrupt once, but need
2078 * to possibly update both ITR values
2081 /* set the INTENA_MSK_MASK so that this first write
2082 * won't actually enable the interrupt, instead just
2083 * updating the ITR (it's bit 31 PF and VF)
2086 /* don't check _DOWN because interrupt isn't being enabled */
2087 wr32(hw, INTREG(vector - 1), rxval);
2091 if (!test_bit(__I40E_DOWN, &vsi->state))
2092 wr32(hw, INTREG(vector - 1), txval);
2094 if (q_vector->itr_countdown)
2095 q_vector->itr_countdown--;
2097 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2101 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2102 * @napi: napi struct with our devices info in it
2103 * @budget: amount of work driver is allowed to do this pass, in packets
2105 * This function will clean all queues associated with a q_vector.
2107 * Returns the amount of work done
2109 int i40e_napi_poll(struct napi_struct *napi, int budget)
2111 struct i40e_q_vector *q_vector =
2112 container_of(napi, struct i40e_q_vector, napi);
2113 struct i40e_vsi *vsi = q_vector->vsi;
2114 struct i40e_ring *ring;
2115 bool clean_complete = true;
2116 bool arm_wb = false;
2117 int budget_per_ring;
2120 if (test_bit(__I40E_DOWN, &vsi->state)) {
2121 napi_complete(napi);
2125 /* Since the actual Tx work is minimal, we can give the Tx a larger
2126 * budget and be more aggressive about cleaning up the Tx descriptors.
2128 i40e_for_each_ring(ring, q_vector->tx) {
2129 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2130 clean_complete = false;
2133 arm_wb |= ring->arm_wb;
2134 ring->arm_wb = false;
2137 /* Handle case where we are called by netpoll with a budget of 0 */
2141 /* We attempt to distribute budget to each Rx queue fairly, but don't
2142 * allow the budget to go below 1 because that would exit polling early.
2144 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2146 i40e_for_each_ring(ring, q_vector->rx) {
2147 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2149 work_done += cleaned;
2150 /* if we clean as many as budgeted, we must not be done */
2151 if (cleaned >= budget_per_ring)
2152 clean_complete = false;
2155 /* If work not completed, return budget and polling will return */
2156 if (!clean_complete) {
2157 const cpumask_t *aff_mask = &q_vector->affinity_mask;
2158 int cpu_id = smp_processor_id();
2160 /* It is possible that the interrupt affinity has changed but,
2161 * if the cpu is pegged at 100%, polling will never exit while
2162 * traffic continues and the interrupt will be stuck on this
2163 * cpu. We check to make sure affinity is correct before we
2164 * continue to poll, otherwise we must stop polling so the
2165 * interrupt can move to the correct cpu.
2167 if (likely(cpumask_test_cpu(cpu_id, aff_mask) ||
2168 !(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))) {
2171 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2172 i40e_enable_wb_on_itr(vsi, q_vector);
2178 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2179 q_vector->arm_wb_state = false;
2181 /* Work is done so exit the polling mode and re-enable the interrupt */
2182 napi_complete_done(napi, work_done);
2184 /* If we're prematurely stopping polling to fix the interrupt
2185 * affinity we want to make sure polling starts back up so we
2186 * issue a call to i40e_force_wb which triggers a SW interrupt.
2188 if (!clean_complete)
2189 i40e_force_wb(vsi, q_vector);
2190 else if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED))
2191 i40e_irq_dynamic_enable_icr0(vsi->back, false);
2193 i40e_update_enable_itr(vsi, q_vector);
2195 return min(work_done, budget - 1);
2199 * i40e_atr - Add a Flow Director ATR filter
2200 * @tx_ring: ring to add programming descriptor to
2202 * @tx_flags: send tx flags
2204 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2207 struct i40e_filter_program_desc *fdir_desc;
2208 struct i40e_pf *pf = tx_ring->vsi->back;
2210 unsigned char *network;
2212 struct ipv6hdr *ipv6;
2216 u32 flex_ptype, dtype_cmd;
2220 /* make sure ATR is enabled */
2221 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2224 if ((pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
2227 /* if sampling is disabled do nothing */
2228 if (!tx_ring->atr_sample_rate)
2231 /* Currently only IPv4/IPv6 with TCP is supported */
2232 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2235 /* snag network header to get L4 type and address */
2236 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2237 skb_inner_network_header(skb) : skb_network_header(skb);
2239 /* Note: tx_flags gets modified to reflect inner protocols in
2240 * tx_enable_csum function if encap is enabled.
2242 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2243 /* access ihl as u8 to avoid unaligned access on ia64 */
2244 hlen = (hdr.network[0] & 0x0F) << 2;
2245 l4_proto = hdr.ipv4->protocol;
2247 hlen = hdr.network - skb->data;
2248 l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
2249 hlen -= hdr.network - skb->data;
2252 if (l4_proto != IPPROTO_TCP)
2255 th = (struct tcphdr *)(hdr.network + hlen);
2257 /* Due to lack of space, no more new filters can be programmed */
2258 if (th->syn && (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
2260 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
2261 /* HW ATR eviction will take care of removing filters on FIN
2264 if (th->fin || th->rst)
2268 tx_ring->atr_count++;
2270 /* sample on all syn/fin/rst packets or once every atr sample rate */
2274 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2277 tx_ring->atr_count = 0;
2279 /* grab the next descriptor */
2280 i = tx_ring->next_to_use;
2281 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2284 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2286 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2287 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2288 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2289 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2290 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2291 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2292 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2294 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2296 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2298 dtype_cmd |= (th->fin || th->rst) ?
2299 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2300 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2301 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2302 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2304 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2305 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2307 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2308 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2310 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2311 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2313 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2314 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2315 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2318 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2319 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2320 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2322 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
2323 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2325 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2326 fdir_desc->rsvd = cpu_to_le32(0);
2327 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2328 fdir_desc->fd_id = cpu_to_le32(0);
2332 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2334 * @tx_ring: ring to send buffer on
2335 * @flags: the tx flags to be set
2337 * Checks the skb and set up correspondingly several generic transmit flags
2338 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2340 * Returns error code indicate the frame should be dropped upon error and the
2341 * otherwise returns 0 to indicate the flags has been set properly.
2343 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2344 struct i40e_ring *tx_ring,
2347 __be16 protocol = skb->protocol;
2350 if (protocol == htons(ETH_P_8021Q) &&
2351 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2352 /* When HW VLAN acceleration is turned off by the user the
2353 * stack sets the protocol to 8021q so that the driver
2354 * can take any steps required to support the SW only
2355 * VLAN handling. In our case the driver doesn't need
2356 * to take any further steps so just set the protocol
2357 * to the encapsulated ethertype.
2359 skb->protocol = vlan_get_protocol(skb);
2363 /* if we have a HW VLAN tag being added, default to the HW one */
2364 if (skb_vlan_tag_present(skb)) {
2365 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2366 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2367 /* else if it is a SW VLAN, check the next protocol and store the tag */
2368 } else if (protocol == htons(ETH_P_8021Q)) {
2369 struct vlan_hdr *vhdr, _vhdr;
2371 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2375 protocol = vhdr->h_vlan_encapsulated_proto;
2376 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2377 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2380 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2383 /* Insert 802.1p priority into VLAN header */
2384 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2385 (skb->priority != TC_PRIO_CONTROL)) {
2386 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2387 tx_flags |= (skb->priority & 0x7) <<
2388 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2389 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2390 struct vlan_ethhdr *vhdr;
2393 rc = skb_cow_head(skb, 0);
2396 vhdr = (struct vlan_ethhdr *)skb->data;
2397 vhdr->h_vlan_TCI = htons(tx_flags >>
2398 I40E_TX_FLAGS_VLAN_SHIFT);
2400 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2410 * i40e_tso - set up the tso context descriptor
2411 * @first: pointer to first Tx buffer for xmit
2412 * @hdr_len: ptr to the size of the packet header
2413 * @cd_type_cmd_tso_mss: Quad Word 1
2415 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2417 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2418 u64 *cd_type_cmd_tso_mss)
2420 struct sk_buff *skb = first->skb;
2421 u64 cd_cmd, cd_tso_len, cd_mss;
2432 u32 paylen, l4_offset;
2433 u16 gso_segs, gso_size;
2436 if (skb->ip_summed != CHECKSUM_PARTIAL)
2439 if (!skb_is_gso(skb))
2442 err = skb_cow_head(skb, 0);
2446 ip.hdr = skb_network_header(skb);
2447 l4.hdr = skb_transport_header(skb);
2449 /* initialize outer IP header fields */
2450 if (ip.v4->version == 4) {
2454 ip.v6->payload_len = 0;
2457 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2461 SKB_GSO_UDP_TUNNEL |
2462 SKB_GSO_UDP_TUNNEL_CSUM)) {
2463 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2464 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2467 /* determine offset of outer transport header */
2468 l4_offset = l4.hdr - skb->data;
2470 /* remove payload length from outer checksum */
2471 paylen = skb->len - l4_offset;
2472 csum_replace_by_diff(&l4.udp->check,
2473 (__force __wsum)htonl(paylen));
2476 /* reset pointers to inner headers */
2477 ip.hdr = skb_inner_network_header(skb);
2478 l4.hdr = skb_inner_transport_header(skb);
2480 /* initialize inner IP header fields */
2481 if (ip.v4->version == 4) {
2485 ip.v6->payload_len = 0;
2489 /* determine offset of inner transport header */
2490 l4_offset = l4.hdr - skb->data;
2492 /* remove payload length from inner checksum */
2493 paylen = skb->len - l4_offset;
2494 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2496 /* compute length of segmentation header */
2497 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2499 /* pull values out of skb_shinfo */
2500 gso_size = skb_shinfo(skb)->gso_size;
2501 gso_segs = skb_shinfo(skb)->gso_segs;
2503 /* update GSO size and bytecount with header size */
2504 first->gso_segs = gso_segs;
2505 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2507 /* find the field values */
2508 cd_cmd = I40E_TX_CTX_DESC_TSO;
2509 cd_tso_len = skb->len - *hdr_len;
2511 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2512 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2513 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2518 * i40e_tsyn - set up the tsyn context descriptor
2519 * @tx_ring: ptr to the ring to send
2520 * @skb: ptr to the skb we're sending
2521 * @tx_flags: the collected send information
2522 * @cd_type_cmd_tso_mss: Quad Word 1
2524 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2526 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2527 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2531 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2534 /* Tx timestamps cannot be sampled when doing TSO */
2535 if (tx_flags & I40E_TX_FLAGS_TSO)
2538 /* only timestamp the outbound packet if the user has requested it and
2539 * we are not already transmitting a packet to be timestamped
2541 pf = i40e_netdev_to_pf(tx_ring->netdev);
2542 if (!(pf->flags & I40E_FLAG_PTP))
2546 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
2547 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2548 pf->ptp_tx_skb = skb_get(skb);
2553 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2554 I40E_TXD_CTX_QW1_CMD_SHIFT;
2560 * i40e_tx_enable_csum - Enable Tx checksum offloads
2562 * @tx_flags: pointer to Tx flags currently set
2563 * @td_cmd: Tx descriptor command bits to set
2564 * @td_offset: Tx descriptor header offsets to set
2565 * @tx_ring: Tx descriptor ring
2566 * @cd_tunneling: ptr to context desc bits
2568 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2569 u32 *td_cmd, u32 *td_offset,
2570 struct i40e_ring *tx_ring,
2583 unsigned char *exthdr;
2584 u32 offset, cmd = 0;
2588 if (skb->ip_summed != CHECKSUM_PARTIAL)
2591 ip.hdr = skb_network_header(skb);
2592 l4.hdr = skb_transport_header(skb);
2594 /* compute outer L2 header size */
2595 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2597 if (skb->encapsulation) {
2599 /* define outer network header type */
2600 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2601 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2602 I40E_TX_CTX_EXT_IP_IPV4 :
2603 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2605 l4_proto = ip.v4->protocol;
2606 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2607 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
2609 exthdr = ip.hdr + sizeof(*ip.v6);
2610 l4_proto = ip.v6->nexthdr;
2611 if (l4.hdr != exthdr)
2612 ipv6_skip_exthdr(skb, exthdr - skb->data,
2613 &l4_proto, &frag_off);
2616 /* define outer transport */
2619 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
2620 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2623 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
2624 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2628 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2629 l4.hdr = skb_inner_network_header(skb);
2632 if (*tx_flags & I40E_TX_FLAGS_TSO)
2635 skb_checksum_help(skb);
2639 /* compute outer L3 header size */
2640 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2641 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2643 /* switch IP header pointer from outer to inner header */
2644 ip.hdr = skb_inner_network_header(skb);
2646 /* compute tunnel header size */
2647 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2648 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2650 /* indicate if we need to offload outer UDP header */
2651 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
2652 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2653 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2654 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2656 /* record tunnel offload values */
2657 *cd_tunneling |= tunnel;
2659 /* switch L4 header pointer from outer to inner */
2660 l4.hdr = skb_inner_transport_header(skb);
2663 /* reset type as we transition from outer to inner headers */
2664 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2665 if (ip.v4->version == 4)
2666 *tx_flags |= I40E_TX_FLAGS_IPV4;
2667 if (ip.v6->version == 6)
2668 *tx_flags |= I40E_TX_FLAGS_IPV6;
2671 /* Enable IP checksum offloads */
2672 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2673 l4_proto = ip.v4->protocol;
2674 /* the stack computes the IP header already, the only time we
2675 * need the hardware to recompute it is in the case of TSO.
2677 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2678 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2679 I40E_TX_DESC_CMD_IIPT_IPV4;
2680 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2681 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2683 exthdr = ip.hdr + sizeof(*ip.v6);
2684 l4_proto = ip.v6->nexthdr;
2685 if (l4.hdr != exthdr)
2686 ipv6_skip_exthdr(skb, exthdr - skb->data,
2687 &l4_proto, &frag_off);
2690 /* compute inner L3 header size */
2691 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2693 /* Enable L4 checksum offloads */
2696 /* enable checksum offloads */
2697 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2698 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2701 /* enable SCTP checksum offload */
2702 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2703 offset |= (sizeof(struct sctphdr) >> 2) <<
2704 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2707 /* enable UDP checksum offload */
2708 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2709 offset |= (sizeof(struct udphdr) >> 2) <<
2710 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2713 if (*tx_flags & I40E_TX_FLAGS_TSO)
2715 skb_checksum_help(skb);
2720 *td_offset |= offset;
2726 * i40e_create_tx_ctx Build the Tx context descriptor
2727 * @tx_ring: ring to create the descriptor on
2728 * @cd_type_cmd_tso_mss: Quad Word 1
2729 * @cd_tunneling: Quad Word 0 - bits 0-31
2730 * @cd_l2tag2: Quad Word 0 - bits 32-63
2732 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2733 const u64 cd_type_cmd_tso_mss,
2734 const u32 cd_tunneling, const u32 cd_l2tag2)
2736 struct i40e_tx_context_desc *context_desc;
2737 int i = tx_ring->next_to_use;
2739 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2740 !cd_tunneling && !cd_l2tag2)
2743 /* grab the next descriptor */
2744 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2747 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2749 /* cpu_to_le32 and assign to struct fields */
2750 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2751 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2752 context_desc->rsvd = cpu_to_le16(0);
2753 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2757 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2758 * @tx_ring: the ring to be checked
2759 * @size: the size buffer we want to assure is available
2761 * Returns -EBUSY if a stop is needed, else 0
2763 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2765 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2766 /* Memory barrier before checking head and tail */
2769 /* Check again in a case another CPU has just made room available. */
2770 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2773 /* A reprieve! - use start_queue because it doesn't call schedule */
2774 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2775 ++tx_ring->tx_stats.restart_queue;
2780 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
2783 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2784 * and so we need to figure out the cases where we need to linearize the skb.
2786 * For TSO we need to count the TSO header and segment payload separately.
2787 * As such we need to check cases where we have 7 fragments or more as we
2788 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2789 * the segment payload in the first descriptor, and another 7 for the
2792 bool __i40e_chk_linearize(struct sk_buff *skb)
2794 const struct skb_frag_struct *frag, *stale;
2797 /* no need to check if number of frags is less than 7 */
2798 nr_frags = skb_shinfo(skb)->nr_frags;
2799 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
2802 /* We need to walk through the list and validate that each group
2803 * of 6 fragments totals at least gso_size.
2805 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
2806 frag = &skb_shinfo(skb)->frags[0];
2808 /* Initialize size to the negative value of gso_size minus 1. We
2809 * use this as the worst case scenerio in which the frag ahead
2810 * of us only provides one byte which is why we are limited to 6
2811 * descriptors for a single transmit as the header and previous
2812 * fragment are already consuming 2 descriptors.
2814 sum = 1 - skb_shinfo(skb)->gso_size;
2816 /* Add size of frags 0 through 4 to create our initial sum */
2817 sum += skb_frag_size(frag++);
2818 sum += skb_frag_size(frag++);
2819 sum += skb_frag_size(frag++);
2820 sum += skb_frag_size(frag++);
2821 sum += skb_frag_size(frag++);
2823 /* Walk through fragments adding latest fragment, testing it, and
2824 * then removing stale fragments from the sum.
2826 stale = &skb_shinfo(skb)->frags[0];
2828 sum += skb_frag_size(frag++);
2830 /* if sum is negative we failed to make sufficient progress */
2837 sum -= skb_frag_size(stale++);
2844 * i40e_tx_map - Build the Tx descriptor
2845 * @tx_ring: ring to send buffer on
2847 * @first: first buffer info buffer to use
2848 * @tx_flags: collected send information
2849 * @hdr_len: size of the packet header
2850 * @td_cmd: the command field in the descriptor
2851 * @td_offset: offset for checksum or crc
2853 static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
2854 struct i40e_tx_buffer *first, u32 tx_flags,
2855 const u8 hdr_len, u32 td_cmd, u32 td_offset)
2857 unsigned int data_len = skb->data_len;
2858 unsigned int size = skb_headlen(skb);
2859 struct skb_frag_struct *frag;
2860 struct i40e_tx_buffer *tx_bi;
2861 struct i40e_tx_desc *tx_desc;
2862 u16 i = tx_ring->next_to_use;
2867 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
2868 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2869 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2870 I40E_TX_FLAGS_VLAN_SHIFT;
2873 first->tx_flags = tx_flags;
2875 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2877 tx_desc = I40E_TX_DESC(tx_ring, i);
2880 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
2881 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2883 if (dma_mapping_error(tx_ring->dev, dma))
2886 /* record length, and DMA address */
2887 dma_unmap_len_set(tx_bi, len, size);
2888 dma_unmap_addr_set(tx_bi, dma, dma);
2890 /* align size to end of page */
2891 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
2892 tx_desc->buffer_addr = cpu_to_le64(dma);
2894 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2895 tx_desc->cmd_type_offset_bsz =
2896 build_ctob(td_cmd, td_offset,
2903 if (i == tx_ring->count) {
2904 tx_desc = I40E_TX_DESC(tx_ring, 0);
2911 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
2912 tx_desc->buffer_addr = cpu_to_le64(dma);
2915 if (likely(!data_len))
2918 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2925 if (i == tx_ring->count) {
2926 tx_desc = I40E_TX_DESC(tx_ring, 0);
2930 size = skb_frag_size(frag);
2933 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2936 tx_bi = &tx_ring->tx_bi[i];
2939 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
2942 if (i == tx_ring->count)
2945 tx_ring->next_to_use = i;
2947 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2949 /* write last descriptor with EOP bit */
2950 td_cmd |= I40E_TX_DESC_CMD_EOP;
2952 /* We can OR these values together as they both are checked against
2953 * 4 below and at this point desc_count will be used as a boolean value
2954 * after this if/else block.
2956 desc_count |= ++tx_ring->packet_stride;
2958 /* Algorithm to optimize tail and RS bit setting:
2959 * if queue is stopped
2961 * reset packet counter
2962 * else if xmit_more is supported and is true
2963 * advance packet counter to 4
2964 * reset desc_count to 0
2966 * if desc_count >= 4
2968 * reset packet counter
2972 * Note: If there are less than 4 descriptors
2973 * pending and interrupts were disabled the service task will
2974 * trigger a force WB.
2976 if (netif_xmit_stopped(txring_txq(tx_ring))) {
2978 } else if (skb->xmit_more) {
2979 /* set stride to arm on next packet and reset desc_count */
2980 tx_ring->packet_stride = WB_STRIDE;
2982 } else if (desc_count >= WB_STRIDE) {
2984 /* write last descriptor with RS bit set */
2985 td_cmd |= I40E_TX_DESC_CMD_RS;
2986 tx_ring->packet_stride = 0;
2989 tx_desc->cmd_type_offset_bsz =
2990 build_ctob(td_cmd, td_offset, size, td_tag);
2992 /* Force memory writes to complete before letting h/w know there
2993 * are new descriptors to fetch.
2995 * We also use this memory barrier to make certain all of the
2996 * status bits have been updated before next_to_watch is written.
3000 /* set next_to_watch value indicating a packet is present */
3001 first->next_to_watch = tx_desc;
3003 /* notify HW of packet */
3005 writel(i, tx_ring->tail);
3007 /* we need this if more than one processor can write to our tail
3008 * at a time, it synchronizes IO on IA64/Altix systems
3016 dev_info(tx_ring->dev, "TX DMA map failed\n");
3018 /* clear dma mappings for failed tx_bi map */
3020 tx_bi = &tx_ring->tx_bi[i];
3021 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3029 tx_ring->next_to_use = i;
3033 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3035 * @tx_ring: ring to send buffer on
3037 * Returns NETDEV_TX_OK if sent, else an error code
3039 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3040 struct i40e_ring *tx_ring)
3042 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3043 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3044 struct i40e_tx_buffer *first;
3053 /* prefetch the data, we'll need it later */
3054 prefetch(skb->data);
3056 count = i40e_xmit_descriptor_count(skb);
3057 if (i40e_chk_linearize(skb, count)) {
3058 if (__skb_linearize(skb)) {
3059 dev_kfree_skb_any(skb);
3060 return NETDEV_TX_OK;
3062 count = i40e_txd_use_count(skb->len);
3063 tx_ring->tx_stats.tx_linearize++;
3066 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3067 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3068 * + 4 desc gap to avoid the cache line where head is,
3069 * + 1 desc for context descriptor,
3070 * otherwise try next time
3072 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3073 tx_ring->tx_stats.tx_busy++;
3074 return NETDEV_TX_BUSY;
3077 /* record the location of the first descriptor for this packet */
3078 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3080 first->bytecount = skb->len;
3081 first->gso_segs = 1;
3083 /* prepare the xmit flags */
3084 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3087 /* obtain protocol of skb */
3088 protocol = vlan_get_protocol(skb);
3090 /* setup IPv4/IPv6 offloads */
3091 if (protocol == htons(ETH_P_IP))
3092 tx_flags |= I40E_TX_FLAGS_IPV4;
3093 else if (protocol == htons(ETH_P_IPV6))
3094 tx_flags |= I40E_TX_FLAGS_IPV6;
3096 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3101 tx_flags |= I40E_TX_FLAGS_TSO;
3103 /* Always offload the checksum, since it's in the data descriptor */
3104 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3105 tx_ring, &cd_tunneling);
3109 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3112 tx_flags |= I40E_TX_FLAGS_TSYN;
3114 skb_tx_timestamp(skb);
3116 /* always enable CRC insertion offload */
3117 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3119 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3120 cd_tunneling, cd_l2tag2);
3122 /* Add Flow Director ATR if it's enabled.
3124 * NOTE: this must always be directly before the data descriptor.
3126 i40e_atr(tx_ring, skb, tx_flags);
3128 i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3131 return NETDEV_TX_OK;
3134 dev_kfree_skb_any(first->skb);
3136 return NETDEV_TX_OK;
3140 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3142 * @netdev: network interface device structure
3144 * Returns NETDEV_TX_OK if sent, else an error code
3146 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3148 struct i40e_netdev_priv *np = netdev_priv(netdev);
3149 struct i40e_vsi *vsi = np->vsi;
3150 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3152 /* hardware can't handle really short frames, hardware padding works
3155 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3156 return NETDEV_TX_OK;
3158 return i40e_xmit_frame_ring(skb, tx_ring);