]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/sfc/tx.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[karo-tx-linux.git] / drivers / net / ethernet / sfc / tx.c
1 /****************************************************************************
2  * Driver for Solarflare network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2013 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
13 #include <linux/ip.h>
14 #include <linux/in.h>
15 #include <linux/ipv6.h>
16 #include <linux/slab.h>
17 #include <net/ipv6.h>
18 #include <linux/if_ether.h>
19 #include <linux/highmem.h>
20 #include <linux/cache.h>
21 #include "net_driver.h"
22 #include "efx.h"
23 #include "io.h"
24 #include "nic.h"
25 #include "workarounds.h"
26 #include "ef10_regs.h"
27
28 #ifdef EFX_USE_PIO
29
30 #define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
31 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
32 unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
33
34 #endif /* EFX_USE_PIO */
35
36 static inline unsigned int
37 efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
38 {
39         return tx_queue->insert_count & tx_queue->ptr_mask;
40 }
41
42 static inline struct efx_tx_buffer *
43 __efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
44 {
45         return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
46 }
47
48 static inline struct efx_tx_buffer *
49 efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
50 {
51         struct efx_tx_buffer *buffer =
52                 __efx_tx_queue_get_insert_buffer(tx_queue);
53
54         EFX_BUG_ON_PARANOID(buffer->len);
55         EFX_BUG_ON_PARANOID(buffer->flags);
56         EFX_BUG_ON_PARANOID(buffer->unmap_len);
57
58         return buffer;
59 }
60
61 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
62                                struct efx_tx_buffer *buffer,
63                                unsigned int *pkts_compl,
64                                unsigned int *bytes_compl)
65 {
66         if (buffer->unmap_len) {
67                 struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
68                 dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
69                 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
70                         dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
71                                          DMA_TO_DEVICE);
72                 else
73                         dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
74                                        DMA_TO_DEVICE);
75                 buffer->unmap_len = 0;
76         }
77
78         if (buffer->flags & EFX_TX_BUF_SKB) {
79                 (*pkts_compl)++;
80                 (*bytes_compl) += buffer->skb->len;
81                 dev_consume_skb_any((struct sk_buff *)buffer->skb);
82                 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
83                            "TX queue %d transmission id %x complete\n",
84                            tx_queue->queue, tx_queue->read_count);
85         } else if (buffer->flags & EFX_TX_BUF_HEAP) {
86                 kfree(buffer->heap_buf);
87         }
88
89         buffer->len = 0;
90         buffer->flags = 0;
91 }
92
93 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
94                                struct sk_buff *skb);
95
96 static inline unsigned
97 efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
98 {
99         /* Depending on the NIC revision, we can use descriptor
100          * lengths up to 8K or 8K-1.  However, since PCI Express
101          * devices must split read requests at 4K boundaries, there is
102          * little benefit from using descriptors that cross those
103          * boundaries and we keep things simple by not doing so.
104          */
105         unsigned len = (~dma_addr & (EFX_PAGE_SIZE - 1)) + 1;
106
107         /* Work around hardware bug for unaligned buffers. */
108         if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
109                 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
110
111         return len;
112 }
113
114 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
115 {
116         /* Header and payload descriptor for each output segment, plus
117          * one for every input fragment boundary within a segment
118          */
119         unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
120
121         /* Possibly one more per segment for the alignment workaround,
122          * or for option descriptors
123          */
124         if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
125                 max_descs += EFX_TSO_MAX_SEGS;
126
127         /* Possibly more for PCIe page boundaries within input fragments */
128         if (PAGE_SIZE > EFX_PAGE_SIZE)
129                 max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
130                                    DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
131
132         return max_descs;
133 }
134
135 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
136 {
137         /* We need to consider both queues that the net core sees as one */
138         struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
139         struct efx_nic *efx = txq1->efx;
140         unsigned int fill_level;
141
142         fill_level = max(txq1->insert_count - txq1->old_read_count,
143                          txq2->insert_count - txq2->old_read_count);
144         if (likely(fill_level < efx->txq_stop_thresh))
145                 return;
146
147         /* We used the stale old_read_count above, which gives us a
148          * pessimistic estimate of the fill level (which may even
149          * validly be >= efx->txq_entries).  Now try again using
150          * read_count (more likely to be a cache miss).
151          *
152          * If we read read_count and then conditionally stop the
153          * queue, it is possible for the completion path to race with
154          * us and complete all outstanding descriptors in the middle,
155          * after which there will be no more completions to wake it.
156          * Therefore we stop the queue first, then read read_count
157          * (with a memory barrier to ensure the ordering), then
158          * restart the queue if the fill level turns out to be low
159          * enough.
160          */
161         netif_tx_stop_queue(txq1->core_txq);
162         smp_mb();
163         txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
164         txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
165
166         fill_level = max(txq1->insert_count - txq1->old_read_count,
167                          txq2->insert_count - txq2->old_read_count);
168         EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
169         if (likely(fill_level < efx->txq_stop_thresh)) {
170                 smp_mb();
171                 if (likely(!efx->loopback_selftest))
172                         netif_tx_start_queue(txq1->core_txq);
173         }
174 }
175
176 #ifdef EFX_USE_PIO
177
178 struct efx_short_copy_buffer {
179         int used;
180         u8 buf[L1_CACHE_BYTES];
181 };
182
183 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
184  * Advances piobuf pointer. Leaves additional data in the copy buffer.
185  */
186 static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
187                                     u8 *data, int len,
188                                     struct efx_short_copy_buffer *copy_buf)
189 {
190         int block_len = len & ~(sizeof(copy_buf->buf) - 1);
191
192         __iowrite64_copy(*piobuf, data, block_len >> 3);
193         *piobuf += block_len;
194         len -= block_len;
195
196         if (len) {
197                 data += block_len;
198                 BUG_ON(copy_buf->used);
199                 BUG_ON(len > sizeof(copy_buf->buf));
200                 memcpy(copy_buf->buf, data, len);
201                 copy_buf->used = len;
202         }
203 }
204
205 /* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
206  * Advances piobuf pointer. Leaves additional data in the copy buffer.
207  */
208 static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
209                                        u8 *data, int len,
210                                        struct efx_short_copy_buffer *copy_buf)
211 {
212         if (copy_buf->used) {
213                 /* if the copy buffer is partially full, fill it up and write */
214                 int copy_to_buf =
215                         min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
216
217                 memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
218                 copy_buf->used += copy_to_buf;
219
220                 /* if we didn't fill it up then we're done for now */
221                 if (copy_buf->used < sizeof(copy_buf->buf))
222                         return;
223
224                 __iowrite64_copy(*piobuf, copy_buf->buf,
225                                  sizeof(copy_buf->buf) >> 3);
226                 *piobuf += sizeof(copy_buf->buf);
227                 data += copy_to_buf;
228                 len -= copy_to_buf;
229                 copy_buf->used = 0;
230         }
231
232         efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
233 }
234
235 static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
236                                   struct efx_short_copy_buffer *copy_buf)
237 {
238         /* if there's anything in it, write the whole buffer, including junk */
239         if (copy_buf->used)
240                 __iowrite64_copy(piobuf, copy_buf->buf,
241                                  sizeof(copy_buf->buf) >> 3);
242 }
243
244 /* Traverse skb structure and copy fragments in to PIO buffer.
245  * Advances piobuf pointer.
246  */
247 static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
248                                      u8 __iomem **piobuf,
249                                      struct efx_short_copy_buffer *copy_buf)
250 {
251         int i;
252
253         efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
254                                 copy_buf);
255
256         for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
257                 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
258                 u8 *vaddr;
259
260                 vaddr = kmap_atomic(skb_frag_page(f));
261
262                 efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
263                                            skb_frag_size(f), copy_buf);
264                 kunmap_atomic(vaddr);
265         }
266
267         EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
268 }
269
270 static struct efx_tx_buffer *
271 efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
272 {
273         struct efx_tx_buffer *buffer =
274                 efx_tx_queue_get_insert_buffer(tx_queue);
275         u8 __iomem *piobuf = tx_queue->piobuf;
276
277         /* Copy to PIO buffer. Ensure the writes are padded to the end
278          * of a cache line, as this is required for write-combining to be
279          * effective on at least x86.
280          */
281
282         if (skb_shinfo(skb)->nr_frags) {
283                 /* The size of the copy buffer will ensure all writes
284                  * are the size of a cache line.
285                  */
286                 struct efx_short_copy_buffer copy_buf;
287
288                 copy_buf.used = 0;
289
290                 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
291                                          &piobuf, &copy_buf);
292                 efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
293         } else {
294                 /* Pad the write to the size of a cache line.
295                  * We can do this because we know the skb_shared_info sruct is
296                  * after the source, and the destination buffer is big enough.
297                  */
298                 BUILD_BUG_ON(L1_CACHE_BYTES >
299                              SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
300                 __iowrite64_copy(tx_queue->piobuf, skb->data,
301                                  ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
302         }
303
304         EFX_POPULATE_QWORD_5(buffer->option,
305                              ESF_DZ_TX_DESC_IS_OPT, 1,
306                              ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
307                              ESF_DZ_TX_PIO_CONT, 0,
308                              ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
309                              ESF_DZ_TX_PIO_BUF_ADDR,
310                              tx_queue->piobuf_offset);
311         ++tx_queue->pio_packets;
312         ++tx_queue->insert_count;
313         return buffer;
314 }
315 #endif /* EFX_USE_PIO */
316
317 /*
318  * Add a socket buffer to a TX queue
319  *
320  * This maps all fragments of a socket buffer for DMA and adds them to
321  * the TX queue.  The queue's insert pointer will be incremented by
322  * the number of fragments in the socket buffer.
323  *
324  * If any DMA mapping fails, any mapped fragments will be unmapped,
325  * the queue's insert pointer will be restored to its original value.
326  *
327  * This function is split out from efx_hard_start_xmit to allow the
328  * loopback test to direct packets via specific TX queues.
329  *
330  * Returns NETDEV_TX_OK.
331  * You must hold netif_tx_lock() to call this function.
332  */
333 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
334 {
335         struct efx_nic *efx = tx_queue->efx;
336         struct device *dma_dev = &efx->pci_dev->dev;
337         struct efx_tx_buffer *buffer;
338         unsigned int old_insert_count = tx_queue->insert_count;
339         skb_frag_t *fragment;
340         unsigned int len, unmap_len = 0;
341         dma_addr_t dma_addr, unmap_addr = 0;
342         unsigned int dma_len;
343         unsigned short dma_flags;
344         int i = 0;
345
346         if (skb_shinfo(skb)->gso_size)
347                 return efx_enqueue_skb_tso(tx_queue, skb);
348
349         /* Get size of the initial fragment */
350         len = skb_headlen(skb);
351
352         /* Pad if necessary */
353         if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) {
354                 EFX_BUG_ON_PARANOID(skb->data_len);
355                 len = 32 + 1;
356                 if (skb_pad(skb, len - skb->len))
357                         return NETDEV_TX_OK;
358         }
359
360         /* Consider using PIO for short packets */
361 #ifdef EFX_USE_PIO
362         if (skb->len <= efx_piobuf_size && !skb->xmit_more &&
363             efx_nic_may_tx_pio(tx_queue)) {
364                 buffer = efx_enqueue_skb_pio(tx_queue, skb);
365                 dma_flags = EFX_TX_BUF_OPTION;
366                 goto finish_packet;
367         }
368 #endif
369
370         /* Map for DMA.  Use dma_map_single rather than dma_map_page
371          * since this is more efficient on machines with sparse
372          * memory.
373          */
374         dma_flags = EFX_TX_BUF_MAP_SINGLE;
375         dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
376
377         /* Process all fragments */
378         while (1) {
379                 if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
380                         goto dma_err;
381
382                 /* Store fields for marking in the per-fragment final
383                  * descriptor */
384                 unmap_len = len;
385                 unmap_addr = dma_addr;
386
387                 /* Add to TX queue, splitting across DMA boundaries */
388                 do {
389                         buffer = efx_tx_queue_get_insert_buffer(tx_queue);
390
391                         dma_len = efx_max_tx_len(efx, dma_addr);
392                         if (likely(dma_len >= len))
393                                 dma_len = len;
394
395                         /* Fill out per descriptor fields */
396                         buffer->len = dma_len;
397                         buffer->dma_addr = dma_addr;
398                         buffer->flags = EFX_TX_BUF_CONT;
399                         len -= dma_len;
400                         dma_addr += dma_len;
401                         ++tx_queue->insert_count;
402                 } while (len);
403
404                 /* Transfer ownership of the unmapping to the final buffer */
405                 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
406                 buffer->unmap_len = unmap_len;
407                 buffer->dma_offset = buffer->dma_addr - unmap_addr;
408                 unmap_len = 0;
409
410                 /* Get address and size of next fragment */
411                 if (i >= skb_shinfo(skb)->nr_frags)
412                         break;
413                 fragment = &skb_shinfo(skb)->frags[i];
414                 len = skb_frag_size(fragment);
415                 i++;
416                 /* Map for DMA */
417                 dma_flags = 0;
418                 dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
419                                             DMA_TO_DEVICE);
420         }
421
422         /* Transfer ownership of the skb to the final buffer */
423 #ifdef EFX_USE_PIO
424 finish_packet:
425 #endif
426         buffer->skb = skb;
427         buffer->flags = EFX_TX_BUF_SKB | dma_flags;
428
429         netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
430
431         efx_tx_maybe_stop_queue(tx_queue);
432
433         /* Pass off to hardware */
434         if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
435                 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
436
437                 /* There could be packets left on the partner queue if those
438                  * SKBs had skb->xmit_more set. If we do not push those they
439                  * could be left for a long time and cause a netdev watchdog.
440                  */
441                 if (txq2->xmit_more_available)
442                         efx_nic_push_buffers(txq2);
443
444                 efx_nic_push_buffers(tx_queue);
445         } else {
446                 tx_queue->xmit_more_available = skb->xmit_more;
447         }
448
449         tx_queue->tx_packets++;
450
451         return NETDEV_TX_OK;
452
453  dma_err:
454         netif_err(efx, tx_err, efx->net_dev,
455                   " TX queue %d could not map skb with %d bytes %d "
456                   "fragments for DMA\n", tx_queue->queue, skb->len,
457                   skb_shinfo(skb)->nr_frags + 1);
458
459         /* Mark the packet as transmitted, and free the SKB ourselves */
460         dev_kfree_skb_any(skb);
461
462         /* Work backwards until we hit the original insert pointer value */
463         while (tx_queue->insert_count != old_insert_count) {
464                 unsigned int pkts_compl = 0, bytes_compl = 0;
465                 --tx_queue->insert_count;
466                 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
467                 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
468         }
469
470         /* Free the fragment we were mid-way through pushing */
471         if (unmap_len) {
472                 if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
473                         dma_unmap_single(dma_dev, unmap_addr, unmap_len,
474                                          DMA_TO_DEVICE);
475                 else
476                         dma_unmap_page(dma_dev, unmap_addr, unmap_len,
477                                        DMA_TO_DEVICE);
478         }
479
480         return NETDEV_TX_OK;
481 }
482
483 /* Remove packets from the TX queue
484  *
485  * This removes packets from the TX queue, up to and including the
486  * specified index.
487  */
488 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
489                                 unsigned int index,
490                                 unsigned int *pkts_compl,
491                                 unsigned int *bytes_compl)
492 {
493         struct efx_nic *efx = tx_queue->efx;
494         unsigned int stop_index, read_ptr;
495
496         stop_index = (index + 1) & tx_queue->ptr_mask;
497         read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
498
499         while (read_ptr != stop_index) {
500                 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
501
502                 if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
503                     unlikely(buffer->len == 0)) {
504                         netif_err(efx, tx_err, efx->net_dev,
505                                   "TX queue %d spurious TX completion id %x\n",
506                                   tx_queue->queue, read_ptr);
507                         efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
508                         return;
509                 }
510
511                 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
512
513                 ++tx_queue->read_count;
514                 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
515         }
516 }
517
518 /* Initiate a packet transmission.  We use one channel per CPU
519  * (sharing when we have more CPUs than channels).  On Falcon, the TX
520  * completion events will be directed back to the CPU that transmitted
521  * the packet, which should be cache-efficient.
522  *
523  * Context: non-blocking.
524  * Note that returning anything other than NETDEV_TX_OK will cause the
525  * OS to free the skb.
526  */
527 netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
528                                 struct net_device *net_dev)
529 {
530         struct efx_nic *efx = netdev_priv(net_dev);
531         struct efx_tx_queue *tx_queue;
532         unsigned index, type;
533
534         EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
535
536         /* PTP "event" packet */
537         if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
538             unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
539                 return efx_ptp_tx(efx, skb);
540         }
541
542         index = skb_get_queue_mapping(skb);
543         type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
544         if (index >= efx->n_tx_channels) {
545                 index -= efx->n_tx_channels;
546                 type |= EFX_TXQ_TYPE_HIGHPRI;
547         }
548         tx_queue = efx_get_tx_queue(efx, index, type);
549
550         return efx_enqueue_skb(tx_queue, skb);
551 }
552
553 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
554 {
555         struct efx_nic *efx = tx_queue->efx;
556
557         /* Must be inverse of queue lookup in efx_hard_start_xmit() */
558         tx_queue->core_txq =
559                 netdev_get_tx_queue(efx->net_dev,
560                                     tx_queue->queue / EFX_TXQ_TYPES +
561                                     ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
562                                      efx->n_tx_channels : 0));
563 }
564
565 int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
566 {
567         struct efx_nic *efx = netdev_priv(net_dev);
568         struct efx_channel *channel;
569         struct efx_tx_queue *tx_queue;
570         unsigned tc;
571         int rc;
572
573         if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
574                 return -EINVAL;
575
576         if (num_tc == net_dev->num_tc)
577                 return 0;
578
579         for (tc = 0; tc < num_tc; tc++) {
580                 net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
581                 net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
582         }
583
584         if (num_tc > net_dev->num_tc) {
585                 /* Initialise high-priority queues as necessary */
586                 efx_for_each_channel(channel, efx) {
587                         efx_for_each_possible_channel_tx_queue(tx_queue,
588                                                                channel) {
589                                 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
590                                         continue;
591                                 if (!tx_queue->buffer) {
592                                         rc = efx_probe_tx_queue(tx_queue);
593                                         if (rc)
594                                                 return rc;
595                                 }
596                                 if (!tx_queue->initialised)
597                                         efx_init_tx_queue(tx_queue);
598                                 efx_init_tx_queue_core_txq(tx_queue);
599                         }
600                 }
601         } else {
602                 /* Reduce number of classes before number of queues */
603                 net_dev->num_tc = num_tc;
604         }
605
606         rc = netif_set_real_num_tx_queues(net_dev,
607                                           max_t(int, num_tc, 1) *
608                                           efx->n_tx_channels);
609         if (rc)
610                 return rc;
611
612         /* Do not destroy high-priority queues when they become
613          * unused.  We would have to flush them first, and it is
614          * fairly difficult to flush a subset of TX queues.  Leave
615          * it to efx_fini_channels().
616          */
617
618         net_dev->num_tc = num_tc;
619         return 0;
620 }
621
622 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
623 {
624         unsigned fill_level;
625         struct efx_nic *efx = tx_queue->efx;
626         struct efx_tx_queue *txq2;
627         unsigned int pkts_compl = 0, bytes_compl = 0;
628
629         EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
630
631         efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
632         tx_queue->pkts_compl += pkts_compl;
633         tx_queue->bytes_compl += bytes_compl;
634
635         if (pkts_compl > 1)
636                 ++tx_queue->merge_events;
637
638         /* See if we need to restart the netif queue.  This memory
639          * barrier ensures that we write read_count (inside
640          * efx_dequeue_buffers()) before reading the queue status.
641          */
642         smp_mb();
643         if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
644             likely(efx->port_enabled) &&
645             likely(netif_device_present(efx->net_dev))) {
646                 txq2 = efx_tx_queue_partner(tx_queue);
647                 fill_level = max(tx_queue->insert_count - tx_queue->read_count,
648                                  txq2->insert_count - txq2->read_count);
649                 if (fill_level <= efx->txq_wake_thresh)
650                         netif_tx_wake_queue(tx_queue->core_txq);
651         }
652
653         /* Check whether the hardware queue is now empty */
654         if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
655                 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
656                 if (tx_queue->read_count == tx_queue->old_write_count) {
657                         smp_mb();
658                         tx_queue->empty_read_count =
659                                 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
660                 }
661         }
662 }
663
664 /* Size of page-based TSO header buffers.  Larger blocks must be
665  * allocated from the heap.
666  */
667 #define TSOH_STD_SIZE   128
668 #define TSOH_PER_PAGE   (PAGE_SIZE / TSOH_STD_SIZE)
669
670 /* At most half the descriptors in the queue at any time will refer to
671  * a TSO header buffer, since they must always be followed by a
672  * payload descriptor referring to an skb.
673  */
674 static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
675 {
676         return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
677 }
678
679 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
680 {
681         struct efx_nic *efx = tx_queue->efx;
682         unsigned int entries;
683         int rc;
684
685         /* Create the smallest power-of-two aligned ring */
686         entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
687         EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
688         tx_queue->ptr_mask = entries - 1;
689
690         netif_dbg(efx, probe, efx->net_dev,
691                   "creating TX queue %d size %#x mask %#x\n",
692                   tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
693
694         /* Allocate software ring */
695         tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
696                                    GFP_KERNEL);
697         if (!tx_queue->buffer)
698                 return -ENOMEM;
699
700         if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
701                 tx_queue->tsoh_page =
702                         kcalloc(efx_tsoh_page_count(tx_queue),
703                                 sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
704                 if (!tx_queue->tsoh_page) {
705                         rc = -ENOMEM;
706                         goto fail1;
707                 }
708         }
709
710         /* Allocate hardware ring */
711         rc = efx_nic_probe_tx(tx_queue);
712         if (rc)
713                 goto fail2;
714
715         return 0;
716
717 fail2:
718         kfree(tx_queue->tsoh_page);
719         tx_queue->tsoh_page = NULL;
720 fail1:
721         kfree(tx_queue->buffer);
722         tx_queue->buffer = NULL;
723         return rc;
724 }
725
726 void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
727 {
728         netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
729                   "initialising TX queue %d\n", tx_queue->queue);
730
731         tx_queue->insert_count = 0;
732         tx_queue->write_count = 0;
733         tx_queue->old_write_count = 0;
734         tx_queue->read_count = 0;
735         tx_queue->old_read_count = 0;
736         tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
737         tx_queue->xmit_more_available = false;
738
739         /* Set up TX descriptor ring */
740         efx_nic_init_tx(tx_queue);
741
742         tx_queue->initialised = true;
743 }
744
745 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
746 {
747         struct efx_tx_buffer *buffer;
748
749         netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
750                   "shutting down TX queue %d\n", tx_queue->queue);
751
752         if (!tx_queue->buffer)
753                 return;
754
755         /* Free any buffers left in the ring */
756         while (tx_queue->read_count != tx_queue->write_count) {
757                 unsigned int pkts_compl = 0, bytes_compl = 0;
758                 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
759                 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
760
761                 ++tx_queue->read_count;
762         }
763         tx_queue->xmit_more_available = false;
764         netdev_tx_reset_queue(tx_queue->core_txq);
765 }
766
767 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
768 {
769         int i;
770
771         if (!tx_queue->buffer)
772                 return;
773
774         netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
775                   "destroying TX queue %d\n", tx_queue->queue);
776         efx_nic_remove_tx(tx_queue);
777
778         if (tx_queue->tsoh_page) {
779                 for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
780                         efx_nic_free_buffer(tx_queue->efx,
781                                             &tx_queue->tsoh_page[i]);
782                 kfree(tx_queue->tsoh_page);
783                 tx_queue->tsoh_page = NULL;
784         }
785
786         kfree(tx_queue->buffer);
787         tx_queue->buffer = NULL;
788 }
789
790
791 /* Efx TCP segmentation acceleration.
792  *
793  * Why?  Because by doing it here in the driver we can go significantly
794  * faster than the GSO.
795  *
796  * Requires TX checksum offload support.
797  */
798
799 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
800
801 /**
802  * struct tso_state - TSO state for an SKB
803  * @out_len: Remaining length in current segment
804  * @seqnum: Current sequence number
805  * @ipv4_id: Current IPv4 ID, host endian
806  * @packet_space: Remaining space in current packet
807  * @dma_addr: DMA address of current position
808  * @in_len: Remaining length in current SKB fragment
809  * @unmap_len: Length of SKB fragment
810  * @unmap_addr: DMA address of SKB fragment
811  * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
812  * @protocol: Network protocol (after any VLAN header)
813  * @ip_off: Offset of IP header
814  * @tcp_off: Offset of TCP header
815  * @header_len: Number of bytes of header
816  * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
817  * @header_dma_addr: Header DMA address, when using option descriptors
818  * @header_unmap_len: Header DMA mapped length, or 0 if not using option
819  *      descriptors
820  *
821  * The state used during segmentation.  It is put into this data structure
822  * just to make it easy to pass into inline functions.
823  */
824 struct tso_state {
825         /* Output position */
826         unsigned out_len;
827         unsigned seqnum;
828         u16 ipv4_id;
829         unsigned packet_space;
830
831         /* Input position */
832         dma_addr_t dma_addr;
833         unsigned in_len;
834         unsigned unmap_len;
835         dma_addr_t unmap_addr;
836         unsigned short dma_flags;
837
838         __be16 protocol;
839         unsigned int ip_off;
840         unsigned int tcp_off;
841         unsigned header_len;
842         unsigned int ip_base_len;
843         dma_addr_t header_dma_addr;
844         unsigned int header_unmap_len;
845 };
846
847
848 /*
849  * Verify that our various assumptions about sk_buffs and the conditions
850  * under which TSO will be attempted hold true.  Return the protocol number.
851  */
852 static __be16 efx_tso_check_protocol(struct sk_buff *skb)
853 {
854         __be16 protocol = skb->protocol;
855
856         EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
857                             protocol);
858         if (protocol == htons(ETH_P_8021Q)) {
859                 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
860                 protocol = veh->h_vlan_encapsulated_proto;
861         }
862
863         if (protocol == htons(ETH_P_IP)) {
864                 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
865         } else {
866                 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6));
867                 EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP);
868         }
869         EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
870                              + (tcp_hdr(skb)->doff << 2u)) >
871                             skb_headlen(skb));
872
873         return protocol;
874 }
875
876 static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
877                                struct efx_tx_buffer *buffer, unsigned int len)
878 {
879         u8 *result;
880
881         EFX_BUG_ON_PARANOID(buffer->len);
882         EFX_BUG_ON_PARANOID(buffer->flags);
883         EFX_BUG_ON_PARANOID(buffer->unmap_len);
884
885         if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
886                 unsigned index =
887                         (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
888                 struct efx_buffer *page_buf =
889                         &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
890                 unsigned offset =
891                         TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
892
893                 if (unlikely(!page_buf->addr) &&
894                     efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
895                                          GFP_ATOMIC))
896                         return NULL;
897
898                 result = (u8 *)page_buf->addr + offset;
899                 buffer->dma_addr = page_buf->dma_addr + offset;
900                 buffer->flags = EFX_TX_BUF_CONT;
901         } else {
902                 tx_queue->tso_long_headers++;
903
904                 buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
905                 if (unlikely(!buffer->heap_buf))
906                         return NULL;
907                 result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
908                 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
909         }
910
911         buffer->len = len;
912
913         return result;
914 }
915
916 /**
917  * efx_tx_queue_insert - push descriptors onto the TX queue
918  * @tx_queue:           Efx TX queue
919  * @dma_addr:           DMA address of fragment
920  * @len:                Length of fragment
921  * @final_buffer:       The final buffer inserted into the queue
922  *
923  * Push descriptors onto the TX queue.
924  */
925 static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
926                                 dma_addr_t dma_addr, unsigned len,
927                                 struct efx_tx_buffer **final_buffer)
928 {
929         struct efx_tx_buffer *buffer;
930         struct efx_nic *efx = tx_queue->efx;
931         unsigned dma_len;
932
933         EFX_BUG_ON_PARANOID(len <= 0);
934
935         while (1) {
936                 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
937                 ++tx_queue->insert_count;
938
939                 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
940                                     tx_queue->read_count >=
941                                     efx->txq_entries);
942
943                 buffer->dma_addr = dma_addr;
944
945                 dma_len = efx_max_tx_len(efx, dma_addr);
946
947                 /* If there is enough space to send then do so */
948                 if (dma_len >= len)
949                         break;
950
951                 buffer->len = dma_len;
952                 buffer->flags = EFX_TX_BUF_CONT;
953                 dma_addr += dma_len;
954                 len -= dma_len;
955         }
956
957         EFX_BUG_ON_PARANOID(!len);
958         buffer->len = len;
959         *final_buffer = buffer;
960 }
961
962
963 /*
964  * Put a TSO header into the TX queue.
965  *
966  * This is special-cased because we know that it is small enough to fit in
967  * a single fragment, and we know it doesn't cross a page boundary.  It
968  * also allows us to not worry about end-of-packet etc.
969  */
970 static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
971                               struct efx_tx_buffer *buffer, u8 *header)
972 {
973         if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
974                 buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
975                                                   header, buffer->len,
976                                                   DMA_TO_DEVICE);
977                 if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
978                                                buffer->dma_addr))) {
979                         kfree(buffer->heap_buf);
980                         buffer->len = 0;
981                         buffer->flags = 0;
982                         return -ENOMEM;
983                 }
984                 buffer->unmap_len = buffer->len;
985                 buffer->dma_offset = 0;
986                 buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
987         }
988
989         ++tx_queue->insert_count;
990         return 0;
991 }
992
993
994 /* Remove buffers put into a tx_queue.  None of the buffers must have
995  * an skb attached.
996  */
997 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
998                                unsigned int insert_count)
999 {
1000         struct efx_tx_buffer *buffer;
1001
1002         /* Work backwards until we hit the original insert pointer value */
1003         while (tx_queue->insert_count != insert_count) {
1004                 --tx_queue->insert_count;
1005                 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
1006                 efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
1007         }
1008 }
1009
1010
1011 /* Parse the SKB header and initialise state. */
1012 static int tso_start(struct tso_state *st, struct efx_nic *efx,
1013                      const struct sk_buff *skb)
1014 {
1015         bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
1016         struct device *dma_dev = &efx->pci_dev->dev;
1017         unsigned int header_len, in_len;
1018         dma_addr_t dma_addr;
1019
1020         st->ip_off = skb_network_header(skb) - skb->data;
1021         st->tcp_off = skb_transport_header(skb) - skb->data;
1022         header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
1023         in_len = skb_headlen(skb) - header_len;
1024         st->header_len = header_len;
1025         st->in_len = in_len;
1026         if (st->protocol == htons(ETH_P_IP)) {
1027                 st->ip_base_len = st->header_len - st->ip_off;
1028                 st->ipv4_id = ntohs(ip_hdr(skb)->id);
1029         } else {
1030                 st->ip_base_len = st->header_len - st->tcp_off;
1031                 st->ipv4_id = 0;
1032         }
1033         st->seqnum = ntohl(tcp_hdr(skb)->seq);
1034
1035         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
1036         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
1037         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
1038
1039         st->out_len = skb->len - header_len;
1040
1041         if (!use_opt_desc) {
1042                 st->header_unmap_len = 0;
1043
1044                 if (likely(in_len == 0)) {
1045                         st->dma_flags = 0;
1046                         st->unmap_len = 0;
1047                         return 0;
1048                 }
1049
1050                 dma_addr = dma_map_single(dma_dev, skb->data + header_len,
1051                                           in_len, DMA_TO_DEVICE);
1052                 st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
1053                 st->dma_addr = dma_addr;
1054                 st->unmap_addr = dma_addr;
1055                 st->unmap_len = in_len;
1056         } else {
1057                 dma_addr = dma_map_single(dma_dev, skb->data,
1058                                           skb_headlen(skb), DMA_TO_DEVICE);
1059                 st->header_dma_addr = dma_addr;
1060                 st->header_unmap_len = skb_headlen(skb);
1061                 st->dma_flags = 0;
1062                 st->dma_addr = dma_addr + header_len;
1063                 st->unmap_len = 0;
1064         }
1065
1066         return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
1067 }
1068
1069 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
1070                             skb_frag_t *frag)
1071 {
1072         st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
1073                                           skb_frag_size(frag), DMA_TO_DEVICE);
1074         if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
1075                 st->dma_flags = 0;
1076                 st->unmap_len = skb_frag_size(frag);
1077                 st->in_len = skb_frag_size(frag);
1078                 st->dma_addr = st->unmap_addr;
1079                 return 0;
1080         }
1081         return -ENOMEM;
1082 }
1083
1084
1085 /**
1086  * tso_fill_packet_with_fragment - form descriptors for the current fragment
1087  * @tx_queue:           Efx TX queue
1088  * @skb:                Socket buffer
1089  * @st:                 TSO state
1090  *
1091  * Form descriptors for the current fragment, until we reach the end
1092  * of fragment or end-of-packet.
1093  */
1094 static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
1095                                           const struct sk_buff *skb,
1096                                           struct tso_state *st)
1097 {
1098         struct efx_tx_buffer *buffer;
1099         int n;
1100
1101         if (st->in_len == 0)
1102                 return;
1103         if (st->packet_space == 0)
1104                 return;
1105
1106         EFX_BUG_ON_PARANOID(st->in_len <= 0);
1107         EFX_BUG_ON_PARANOID(st->packet_space <= 0);
1108
1109         n = min(st->in_len, st->packet_space);
1110
1111         st->packet_space -= n;
1112         st->out_len -= n;
1113         st->in_len -= n;
1114
1115         efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
1116
1117         if (st->out_len == 0) {
1118                 /* Transfer ownership of the skb */
1119                 buffer->skb = skb;
1120                 buffer->flags = EFX_TX_BUF_SKB;
1121         } else if (st->packet_space != 0) {
1122                 buffer->flags = EFX_TX_BUF_CONT;
1123         }
1124
1125         if (st->in_len == 0) {
1126                 /* Transfer ownership of the DMA mapping */
1127                 buffer->unmap_len = st->unmap_len;
1128                 buffer->dma_offset = buffer->unmap_len - buffer->len;
1129                 buffer->flags |= st->dma_flags;
1130                 st->unmap_len = 0;
1131         }
1132
1133         st->dma_addr += n;
1134 }
1135
1136
1137 /**
1138  * tso_start_new_packet - generate a new header and prepare for the new packet
1139  * @tx_queue:           Efx TX queue
1140  * @skb:                Socket buffer
1141  * @st:                 TSO state
1142  *
1143  * Generate a new header and prepare for the new packet.  Return 0 on
1144  * success, or -%ENOMEM if failed to alloc header.
1145  */
1146 static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1147                                 const struct sk_buff *skb,
1148                                 struct tso_state *st)
1149 {
1150         struct efx_tx_buffer *buffer =
1151                 efx_tx_queue_get_insert_buffer(tx_queue);
1152         bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
1153         u8 tcp_flags_clear;
1154
1155         if (!is_last) {
1156                 st->packet_space = skb_shinfo(skb)->gso_size;
1157                 tcp_flags_clear = 0x09; /* mask out FIN and PSH */
1158         } else {
1159                 st->packet_space = st->out_len;
1160                 tcp_flags_clear = 0x00;
1161         }
1162
1163         if (!st->header_unmap_len) {
1164                 /* Allocate and insert a DMA-mapped header buffer. */
1165                 struct tcphdr *tsoh_th;
1166                 unsigned ip_length;
1167                 u8 *header;
1168                 int rc;
1169
1170                 header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
1171                 if (!header)
1172                         return -ENOMEM;
1173
1174                 tsoh_th = (struct tcphdr *)(header + st->tcp_off);
1175
1176                 /* Copy and update the headers. */
1177                 memcpy(header, skb->data, st->header_len);
1178
1179                 tsoh_th->seq = htonl(st->seqnum);
1180                 ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
1181
1182                 ip_length = st->ip_base_len + st->packet_space;
1183
1184                 if (st->protocol == htons(ETH_P_IP)) {
1185                         struct iphdr *tsoh_iph =
1186                                 (struct iphdr *)(header + st->ip_off);
1187
1188                         tsoh_iph->tot_len = htons(ip_length);
1189                         tsoh_iph->id = htons(st->ipv4_id);
1190                 } else {
1191                         struct ipv6hdr *tsoh_iph =
1192                                 (struct ipv6hdr *)(header + st->ip_off);
1193
1194                         tsoh_iph->payload_len = htons(ip_length);
1195                 }
1196
1197                 rc = efx_tso_put_header(tx_queue, buffer, header);
1198                 if (unlikely(rc))
1199                         return rc;
1200         } else {
1201                 /* Send the original headers with a TSO option descriptor
1202                  * in front
1203                  */
1204                 u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
1205
1206                 buffer->flags = EFX_TX_BUF_OPTION;
1207                 buffer->len = 0;
1208                 buffer->unmap_len = 0;
1209                 EFX_POPULATE_QWORD_5(buffer->option,
1210                                      ESF_DZ_TX_DESC_IS_OPT, 1,
1211                                      ESF_DZ_TX_OPTION_TYPE,
1212                                      ESE_DZ_TX_OPTION_DESC_TSO,
1213                                      ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
1214                                      ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
1215                                      ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
1216                 ++tx_queue->insert_count;
1217
1218                 /* We mapped the headers in tso_start().  Unmap them
1219                  * when the last segment is completed.
1220                  */
1221                 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
1222                 buffer->dma_addr = st->header_dma_addr;
1223                 buffer->len = st->header_len;
1224                 if (is_last) {
1225                         buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
1226                         buffer->unmap_len = st->header_unmap_len;
1227                         buffer->dma_offset = 0;
1228                         /* Ensure we only unmap them once in case of a
1229                          * later DMA mapping error and rollback
1230                          */
1231                         st->header_unmap_len = 0;
1232                 } else {
1233                         buffer->flags = EFX_TX_BUF_CONT;
1234                         buffer->unmap_len = 0;
1235                 }
1236                 ++tx_queue->insert_count;
1237         }
1238
1239         st->seqnum += skb_shinfo(skb)->gso_size;
1240
1241         /* Linux leaves suitable gaps in the IP ID space for us to fill. */
1242         ++st->ipv4_id;
1243
1244         ++tx_queue->tso_packets;
1245
1246         ++tx_queue->tx_packets;
1247
1248         return 0;
1249 }
1250
1251
1252 /**
1253  * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
1254  * @tx_queue:           Efx TX queue
1255  * @skb:                Socket buffer
1256  *
1257  * Context: You must hold netif_tx_lock() to call this function.
1258  *
1259  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1260  * @skb was not enqueued.  In all cases @skb is consumed.  Return
1261  * %NETDEV_TX_OK.
1262  */
1263 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1264                                struct sk_buff *skb)
1265 {
1266         struct efx_nic *efx = tx_queue->efx;
1267         unsigned int old_insert_count = tx_queue->insert_count;
1268         int frag_i, rc;
1269         struct tso_state state;
1270
1271         /* Find the packet protocol and sanity-check it */
1272         state.protocol = efx_tso_check_protocol(skb);
1273
1274         rc = tso_start(&state, efx, skb);
1275         if (rc)
1276                 goto mem_err;
1277
1278         if (likely(state.in_len == 0)) {
1279                 /* Grab the first payload fragment. */
1280                 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1281                 frag_i = 0;
1282                 rc = tso_get_fragment(&state, efx,
1283                                       skb_shinfo(skb)->frags + frag_i);
1284                 if (rc)
1285                         goto mem_err;
1286         } else {
1287                 /* Payload starts in the header area. */
1288                 frag_i = -1;
1289         }
1290
1291         if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1292                 goto mem_err;
1293
1294         while (1) {
1295                 tso_fill_packet_with_fragment(tx_queue, skb, &state);
1296
1297                 /* Move onto the next fragment? */
1298                 if (state.in_len == 0) {
1299                         if (++frag_i >= skb_shinfo(skb)->nr_frags)
1300                                 /* End of payload reached. */
1301                                 break;
1302                         rc = tso_get_fragment(&state, efx,
1303                                               skb_shinfo(skb)->frags + frag_i);
1304                         if (rc)
1305                                 goto mem_err;
1306                 }
1307
1308                 /* Start at new packet? */
1309                 if (state.packet_space == 0 &&
1310                     tso_start_new_packet(tx_queue, skb, &state) < 0)
1311                         goto mem_err;
1312         }
1313
1314         netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
1315
1316         efx_tx_maybe_stop_queue(tx_queue);
1317
1318         /* Pass off to hardware */
1319         if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
1320                 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
1321
1322                 /* There could be packets left on the partner queue if those
1323                  * SKBs had skb->xmit_more set. If we do not push those they
1324                  * could be left for a long time and cause a netdev watchdog.
1325                  */
1326                 if (txq2->xmit_more_available)
1327                         efx_nic_push_buffers(txq2);
1328
1329                 efx_nic_push_buffers(tx_queue);
1330         } else {
1331                 tx_queue->xmit_more_available = skb->xmit_more;
1332         }
1333
1334         tx_queue->tso_bursts++;
1335         return NETDEV_TX_OK;
1336
1337  mem_err:
1338         netif_err(efx, tx_err, efx->net_dev,
1339                   "Out of memory for TSO headers, or DMA mapping error\n");
1340         dev_kfree_skb_any(skb);
1341
1342         /* Free the DMA mapping we were in the process of writing out */
1343         if (state.unmap_len) {
1344                 if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
1345                         dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
1346                                          state.unmap_len, DMA_TO_DEVICE);
1347                 else
1348                         dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
1349                                        state.unmap_len, DMA_TO_DEVICE);
1350         }
1351
1352         /* Free the header DMA mapping, if using option descriptors */
1353         if (state.header_unmap_len)
1354                 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
1355                                  state.header_unmap_len, DMA_TO_DEVICE);
1356
1357         efx_enqueue_unwind(tx_queue, old_insert_count);
1358         return NETDEV_TX_OK;
1359 }