]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/sfc/tx.c
sfc: Don't leak PCI DMA maps in the TSO code when the queue fills up
[linux-2.6-omap-h63xx.git] / drivers / net / sfc / tx.c
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2008 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10
11 #include <linux/pci.h>
12 #include <linux/tcp.h>
13 #include <linux/ip.h>
14 #include <linux/in.h>
15 #include <linux/if_ether.h>
16 #include <linux/highmem.h>
17 #include "net_driver.h"
18 #include "tx.h"
19 #include "efx.h"
20 #include "falcon.h"
21 #include "workarounds.h"
22
23 /*
24  * TX descriptor ring full threshold
25  *
26  * The tx_queue descriptor ring fill-level must fall below this value
27  * before we restart the netif queue
28  */
29 #define EFX_NETDEV_TX_THRESHOLD(_tx_queue)      \
30         (_tx_queue->efx->type->txd_ring_mask / 2u)
31
32 /* We want to be able to nest calls to netif_stop_queue(), since each
33  * channel can have an individual stop on the queue.
34  */
35 void efx_stop_queue(struct efx_nic *efx)
36 {
37         spin_lock_bh(&efx->netif_stop_lock);
38         EFX_TRACE(efx, "stop TX queue\n");
39
40         atomic_inc(&efx->netif_stop_count);
41         netif_stop_queue(efx->net_dev);
42
43         spin_unlock_bh(&efx->netif_stop_lock);
44 }
45
46 /* Wake netif's TX queue
47  * We want to be able to nest calls to netif_stop_queue(), since each
48  * channel can have an individual stop on the queue.
49  */
50 inline void efx_wake_queue(struct efx_nic *efx)
51 {
52         local_bh_disable();
53         if (atomic_dec_and_lock(&efx->netif_stop_count,
54                                 &efx->netif_stop_lock)) {
55                 EFX_TRACE(efx, "waking TX queue\n");
56                 netif_wake_queue(efx->net_dev);
57                 spin_unlock(&efx->netif_stop_lock);
58         }
59         local_bh_enable();
60 }
61
62 static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63                                       struct efx_tx_buffer *buffer)
64 {
65         if (buffer->unmap_len) {
66                 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
67                 if (buffer->unmap_single)
68                         pci_unmap_single(pci_dev, buffer->unmap_addr,
69                                          buffer->unmap_len, PCI_DMA_TODEVICE);
70                 else
71                         pci_unmap_page(pci_dev, buffer->unmap_addr,
72                                        buffer->unmap_len, PCI_DMA_TODEVICE);
73                 buffer->unmap_len = 0;
74                 buffer->unmap_single = 0;
75         }
76
77         if (buffer->skb) {
78                 dev_kfree_skb_any((struct sk_buff *) buffer->skb);
79                 buffer->skb = NULL;
80                 EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
81                           "complete\n", tx_queue->queue, read_ptr);
82         }
83 }
84
85 /**
86  * struct efx_tso_header - a DMA mapped buffer for packet headers
87  * @next: Linked list of free ones.
88  *      The list is protected by the TX queue lock.
89  * @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
90  * @dma_addr: The DMA address of the header below.
91  *
92  * This controls the memory used for a TSO header.  Use TSOH_DATA()
93  * to find the packet header data.  Use TSOH_SIZE() to calculate the
94  * total size required for a given packet header length.  TSO headers
95  * in the free list are exactly %TSOH_STD_SIZE bytes in size.
96  */
97 struct efx_tso_header {
98         union {
99                 struct efx_tso_header *next;
100                 size_t unmap_len;
101         };
102         dma_addr_t dma_addr;
103 };
104
105 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
106                                const struct sk_buff *skb);
107 static void efx_fini_tso(struct efx_tx_queue *tx_queue);
108 static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
109                                struct efx_tso_header *tsoh);
110
111 static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
112                                  struct efx_tx_buffer *buffer)
113 {
114         if (buffer->tsoh) {
115                 if (likely(!buffer->tsoh->unmap_len)) {
116                         buffer->tsoh->next = tx_queue->tso_headers_free;
117                         tx_queue->tso_headers_free = buffer->tsoh;
118                 } else {
119                         efx_tsoh_heap_free(tx_queue, buffer->tsoh);
120                 }
121                 buffer->tsoh = NULL;
122         }
123 }
124
125
126 /*
127  * Add a socket buffer to a TX queue
128  *
129  * This maps all fragments of a socket buffer for DMA and adds them to
130  * the TX queue.  The queue's insert pointer will be incremented by
131  * the number of fragments in the socket buffer.
132  *
133  * If any DMA mapping fails, any mapped fragments will be unmapped,
134  * the queue's insert pointer will be restored to its original value.
135  *
136  * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
137  * You must hold netif_tx_lock() to call this function.
138  */
139 static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
140                                   const struct sk_buff *skb)
141 {
142         struct efx_nic *efx = tx_queue->efx;
143         struct pci_dev *pci_dev = efx->pci_dev;
144         struct efx_tx_buffer *buffer;
145         skb_frag_t *fragment;
146         struct page *page;
147         int page_offset;
148         unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
149         dma_addr_t dma_addr, unmap_addr = 0;
150         unsigned int dma_len;
151         unsigned unmap_single;
152         int q_space, i = 0;
153         int rc = NETDEV_TX_OK;
154
155         EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
156
157         if (skb_shinfo((struct sk_buff *)skb)->gso_size)
158                 return efx_enqueue_skb_tso(tx_queue, skb);
159
160         /* Get size of the initial fragment */
161         len = skb_headlen(skb);
162
163         fill_level = tx_queue->insert_count - tx_queue->old_read_count;
164         q_space = efx->type->txd_ring_mask - 1 - fill_level;
165
166         /* Map for DMA.  Use pci_map_single rather than pci_map_page
167          * since this is more efficient on machines with sparse
168          * memory.
169          */
170         unmap_single = 1;
171         dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
172
173         /* Process all fragments */
174         while (1) {
175                 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
176                         goto pci_err;
177
178                 /* Store fields for marking in the per-fragment final
179                  * descriptor */
180                 unmap_len = len;
181                 unmap_addr = dma_addr;
182
183                 /* Add to TX queue, splitting across DMA boundaries */
184                 do {
185                         if (unlikely(q_space-- <= 0)) {
186                                 /* It might be that completions have
187                                  * happened since the xmit path last
188                                  * checked.  Update the xmit path's
189                                  * copy of read_count.
190                                  */
191                                 ++tx_queue->stopped;
192                                 /* This memory barrier protects the
193                                  * change of stopped from the access
194                                  * of read_count. */
195                                 smp_mb();
196                                 tx_queue->old_read_count =
197                                         *(volatile unsigned *)
198                                         &tx_queue->read_count;
199                                 fill_level = (tx_queue->insert_count
200                                               - tx_queue->old_read_count);
201                                 q_space = (efx->type->txd_ring_mask - 1 -
202                                            fill_level);
203                                 if (unlikely(q_space-- <= 0))
204                                         goto stop;
205                                 smp_mb();
206                                 --tx_queue->stopped;
207                         }
208
209                         insert_ptr = (tx_queue->insert_count &
210                                       efx->type->txd_ring_mask);
211                         buffer = &tx_queue->buffer[insert_ptr];
212                         efx_tsoh_free(tx_queue, buffer);
213                         EFX_BUG_ON_PARANOID(buffer->tsoh);
214                         EFX_BUG_ON_PARANOID(buffer->skb);
215                         EFX_BUG_ON_PARANOID(buffer->len);
216                         EFX_BUG_ON_PARANOID(buffer->continuation != 1);
217                         EFX_BUG_ON_PARANOID(buffer->unmap_len);
218
219                         dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
220                         if (likely(dma_len > len))
221                                 dma_len = len;
222
223                         misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
224                         if (misalign && dma_len + misalign > 512)
225                                 dma_len = 512 - misalign;
226
227                         /* Fill out per descriptor fields */
228                         buffer->len = dma_len;
229                         buffer->dma_addr = dma_addr;
230                         len -= dma_len;
231                         dma_addr += dma_len;
232                         ++tx_queue->insert_count;
233                 } while (len);
234
235                 /* Transfer ownership of the unmapping to the final buffer */
236                 buffer->unmap_addr = unmap_addr;
237                 buffer->unmap_single = unmap_single;
238                 buffer->unmap_len = unmap_len;
239                 unmap_len = 0;
240
241                 /* Get address and size of next fragment */
242                 if (i >= skb_shinfo(skb)->nr_frags)
243                         break;
244                 fragment = &skb_shinfo(skb)->frags[i];
245                 len = fragment->size;
246                 page = fragment->page;
247                 page_offset = fragment->page_offset;
248                 i++;
249                 /* Map for DMA */
250                 unmap_single = 0;
251                 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
252                                         PCI_DMA_TODEVICE);
253         }
254
255         /* Transfer ownership of the skb to the final buffer */
256         buffer->skb = skb;
257         buffer->continuation = 0;
258
259         /* Pass off to hardware */
260         falcon_push_buffers(tx_queue);
261
262         return NETDEV_TX_OK;
263
264  pci_err:
265         EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
266                    "fragments for DMA\n", tx_queue->queue, skb->len,
267                    skb_shinfo(skb)->nr_frags + 1);
268
269         /* Mark the packet as transmitted, and free the SKB ourselves */
270         dev_kfree_skb_any((struct sk_buff *)skb);
271         goto unwind;
272
273  stop:
274         rc = NETDEV_TX_BUSY;
275
276         if (tx_queue->stopped == 1)
277                 efx_stop_queue(efx);
278
279  unwind:
280         /* Work backwards until we hit the original insert pointer value */
281         while (tx_queue->insert_count != tx_queue->write_count) {
282                 --tx_queue->insert_count;
283                 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
284                 buffer = &tx_queue->buffer[insert_ptr];
285                 efx_dequeue_buffer(tx_queue, buffer);
286                 buffer->len = 0;
287         }
288
289         /* Free the fragment we were mid-way through pushing */
290         if (unmap_len)
291                 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
292                                PCI_DMA_TODEVICE);
293
294         return rc;
295 }
296
297 /* Remove packets from the TX queue
298  *
299  * This removes packets from the TX queue, up to and including the
300  * specified index.
301  */
302 static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
303                                        unsigned int index)
304 {
305         struct efx_nic *efx = tx_queue->efx;
306         unsigned int stop_index, read_ptr;
307         unsigned int mask = tx_queue->efx->type->txd_ring_mask;
308
309         stop_index = (index + 1) & mask;
310         read_ptr = tx_queue->read_count & mask;
311
312         while (read_ptr != stop_index) {
313                 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
314                 if (unlikely(buffer->len == 0)) {
315                         EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
316                                 "completion id %x\n", tx_queue->queue,
317                                 read_ptr);
318                         efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
319                         return;
320                 }
321
322                 efx_dequeue_buffer(tx_queue, buffer);
323                 buffer->continuation = 1;
324                 buffer->len = 0;
325
326                 ++tx_queue->read_count;
327                 read_ptr = tx_queue->read_count & mask;
328         }
329 }
330
331 /* Initiate a packet transmission on the specified TX queue.
332  * Note that returning anything other than NETDEV_TX_OK will cause the
333  * OS to free the skb.
334  *
335  * This function is split out from efx_hard_start_xmit to allow the
336  * loopback test to direct packets via specific TX queues.  It is
337  * therefore a non-static inline, so as not to penalise performance
338  * for non-loopback transmissions.
339  *
340  * Context: netif_tx_lock held
341  */
342 inline int efx_xmit(struct efx_nic *efx,
343                     struct efx_tx_queue *tx_queue, struct sk_buff *skb)
344 {
345         int rc;
346
347         /* Map fragments for DMA and add to TX queue */
348         rc = efx_enqueue_skb(tx_queue, skb);
349         if (unlikely(rc != NETDEV_TX_OK))
350                 goto out;
351
352         /* Update last TX timer */
353         efx->net_dev->trans_start = jiffies;
354
355  out:
356         return rc;
357 }
358
359 /* Initiate a packet transmission.  We use one channel per CPU
360  * (sharing when we have more CPUs than channels).  On Falcon, the TX
361  * completion events will be directed back to the CPU that transmitted
362  * the packet, which should be cache-efficient.
363  *
364  * Context: non-blocking.
365  * Note that returning anything other than NETDEV_TX_OK will cause the
366  * OS to free the skb.
367  */
368 int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
369 {
370         struct efx_nic *efx = netdev_priv(net_dev);
371         struct efx_tx_queue *tx_queue;
372
373         if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
374                 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
375         else
376                 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
377
378         return efx_xmit(efx, tx_queue, skb);
379 }
380
381 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
382 {
383         unsigned fill_level;
384         struct efx_nic *efx = tx_queue->efx;
385
386         EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);
387
388         efx_dequeue_buffers(tx_queue, index);
389
390         /* See if we need to restart the netif queue.  This barrier
391          * separates the update of read_count from the test of
392          * stopped. */
393         smp_mb();
394         if (unlikely(tx_queue->stopped)) {
395                 fill_level = tx_queue->insert_count - tx_queue->read_count;
396                 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
397                         EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
398
399                         /* Do this under netif_tx_lock(), to avoid racing
400                          * with efx_xmit(). */
401                         netif_tx_lock(efx->net_dev);
402                         if (tx_queue->stopped) {
403                                 tx_queue->stopped = 0;
404                                 efx_wake_queue(efx);
405                         }
406                         netif_tx_unlock(efx->net_dev);
407                 }
408         }
409 }
410
411 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
412 {
413         struct efx_nic *efx = tx_queue->efx;
414         unsigned int txq_size;
415         int i, rc;
416
417         EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
418
419         /* Allocate software ring */
420         txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
421         tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
422         if (!tx_queue->buffer)
423                 return -ENOMEM;
424         for (i = 0; i <= efx->type->txd_ring_mask; ++i)
425                 tx_queue->buffer[i].continuation = 1;
426
427         /* Allocate hardware ring */
428         rc = falcon_probe_tx(tx_queue);
429         if (rc)
430                 goto fail;
431
432         return 0;
433
434  fail:
435         kfree(tx_queue->buffer);
436         tx_queue->buffer = NULL;
437         return rc;
438 }
439
440 int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
441 {
442         EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
443
444         tx_queue->insert_count = 0;
445         tx_queue->write_count = 0;
446         tx_queue->read_count = 0;
447         tx_queue->old_read_count = 0;
448         BUG_ON(tx_queue->stopped);
449
450         /* Set up TX descriptor ring */
451         return falcon_init_tx(tx_queue);
452 }
453
454 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
455 {
456         struct efx_tx_buffer *buffer;
457
458         if (!tx_queue->buffer)
459                 return;
460
461         /* Free any buffers left in the ring */
462         while (tx_queue->read_count != tx_queue->write_count) {
463                 buffer = &tx_queue->buffer[tx_queue->read_count &
464                                            tx_queue->efx->type->txd_ring_mask];
465                 efx_dequeue_buffer(tx_queue, buffer);
466                 buffer->continuation = 1;
467                 buffer->len = 0;
468
469                 ++tx_queue->read_count;
470         }
471 }
472
473 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
474 {
475         EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
476
477         /* Flush TX queue, remove descriptor ring */
478         falcon_fini_tx(tx_queue);
479
480         efx_release_tx_buffers(tx_queue);
481
482         /* Free up TSO header cache */
483         efx_fini_tso(tx_queue);
484
485         /* Release queue's stop on port, if any */
486         if (tx_queue->stopped) {
487                 tx_queue->stopped = 0;
488                 efx_wake_queue(tx_queue->efx);
489         }
490 }
491
492 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
493 {
494         EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
495         falcon_remove_tx(tx_queue);
496
497         kfree(tx_queue->buffer);
498         tx_queue->buffer = NULL;
499 }
500
501
502 /* Efx TCP segmentation acceleration.
503  *
504  * Why?  Because by doing it here in the driver we can go significantly
505  * faster than the GSO.
506  *
507  * Requires TX checksum offload support.
508  */
509
510 /* Number of bytes inserted at the start of a TSO header buffer,
511  * similar to NET_IP_ALIGN.
512  */
513 #if defined(__i386__) || defined(__x86_64__)
514 #define TSOH_OFFSET     0
515 #else
516 #define TSOH_OFFSET     NET_IP_ALIGN
517 #endif
518
519 #define TSOH_BUFFER(tsoh)       ((u8 *)(tsoh + 1) + TSOH_OFFSET)
520
521 /* Total size of struct efx_tso_header, buffer and padding */
522 #define TSOH_SIZE(hdr_len)                                      \
523         (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
524
525 /* Size of blocks on free list.  Larger blocks must be allocated from
526  * the heap.
527  */
528 #define TSOH_STD_SIZE           128
529
530 #define PTR_DIFF(p1, p2)  ((u8 *)(p1) - (u8 *)(p2))
531 #define ETH_HDR_LEN(skb)  (skb_network_header(skb) - (skb)->data)
532 #define SKB_TCP_OFF(skb)  PTR_DIFF(tcp_hdr(skb), (skb)->data)
533 #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
534
535 /**
536  * struct tso_state - TSO state for an SKB
537  * @remaining_len: Bytes of data we've yet to segment
538  * @seqnum: Current sequence number
539  * @packet_space: Remaining space in current packet
540  * @ifc: Input fragment cursor.
541  *      Where we are in the current fragment of the incoming SKB.  These
542  *      values get updated in place when we split a fragment over
543  *      multiple packets.
544  * @p: Parameters.
545  *      These values are set once at the start of the TSO send and do
546  *      not get changed as the routine progresses.
547  *
548  * The state used during segmentation.  It is put into this data structure
549  * just to make it easy to pass into inline functions.
550  */
551 struct tso_state {
552         unsigned remaining_len;
553         unsigned seqnum;
554         unsigned packet_space;
555
556         struct {
557                 /* DMA address of current position */
558                 dma_addr_t dma_addr;
559                 /* Remaining length */
560                 unsigned int len;
561                 /* DMA address and length of the whole fragment */
562                 unsigned int unmap_len;
563                 dma_addr_t unmap_addr;
564                 struct page *page;
565                 unsigned page_off;
566         } ifc;
567
568         struct {
569                 /* The number of bytes of header */
570                 unsigned int header_length;
571
572                 /* The number of bytes to put in each outgoing segment. */
573                 int full_packet_size;
574
575                 /* Current IPv4 ID, host endian. */
576                 unsigned ipv4_id;
577         } p;
578 };
579
580
581 /*
582  * Verify that our various assumptions about sk_buffs and the conditions
583  * under which TSO will be attempted hold true.
584  */
585 static inline void efx_tso_check_safe(const struct sk_buff *skb)
586 {
587         EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP));
588         EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
589                             skb->protocol);
590         EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
591         EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
592                              + (tcp_hdr(skb)->doff << 2u)) >
593                             skb_headlen(skb));
594 }
595
596
597 /*
598  * Allocate a page worth of efx_tso_header structures, and string them
599  * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
600  */
601 static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
602 {
603
604         struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
605         struct efx_tso_header *tsoh;
606         dma_addr_t dma_addr;
607         u8 *base_kva, *kva;
608
609         base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
610         if (base_kva == NULL) {
611                 EFX_ERR(tx_queue->efx, "Unable to allocate page for TSO"
612                         " headers\n");
613                 return -ENOMEM;
614         }
615
616         /* pci_alloc_consistent() allocates pages. */
617         EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
618
619         for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
620                 tsoh = (struct efx_tso_header *)kva;
621                 tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
622                 tsoh->next = tx_queue->tso_headers_free;
623                 tx_queue->tso_headers_free = tsoh;
624         }
625
626         return 0;
627 }
628
629
630 /* Free up a TSO header, and all others in the same page. */
631 static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
632                                 struct efx_tso_header *tsoh,
633                                 struct pci_dev *pci_dev)
634 {
635         struct efx_tso_header **p;
636         unsigned long base_kva;
637         dma_addr_t base_dma;
638
639         base_kva = (unsigned long)tsoh & PAGE_MASK;
640         base_dma = tsoh->dma_addr & PAGE_MASK;
641
642         p = &tx_queue->tso_headers_free;
643         while (*p != NULL) {
644                 if (((unsigned long)*p & PAGE_MASK) == base_kva)
645                         *p = (*p)->next;
646                 else
647                         p = &(*p)->next;
648         }
649
650         pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
651 }
652
653 static struct efx_tso_header *
654 efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
655 {
656         struct efx_tso_header *tsoh;
657
658         tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
659         if (unlikely(!tsoh))
660                 return NULL;
661
662         tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
663                                         TSOH_BUFFER(tsoh), header_len,
664                                         PCI_DMA_TODEVICE);
665         if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
666                                            tsoh->dma_addr))) {
667                 kfree(tsoh);
668                 return NULL;
669         }
670
671         tsoh->unmap_len = header_len;
672         return tsoh;
673 }
674
675 static void
676 efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
677 {
678         pci_unmap_single(tx_queue->efx->pci_dev,
679                          tsoh->dma_addr, tsoh->unmap_len,
680                          PCI_DMA_TODEVICE);
681         kfree(tsoh);
682 }
683
684 /**
685  * efx_tx_queue_insert - push descriptors onto the TX queue
686  * @tx_queue:           Efx TX queue
687  * @dma_addr:           DMA address of fragment
688  * @len:                Length of fragment
689  * @skb:                Only non-null for end of last segment
690  * @end_of_packet:      True if last fragment in a packet
691  * @unmap_addr:         DMA address of fragment for unmapping
692  * @unmap_len:          Only set this in last segment of a fragment
693  *
694  * Push descriptors onto the TX queue.  Return 0 on success or 1 if
695  * @tx_queue full.
696  */
697 static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
698                                dma_addr_t dma_addr, unsigned len,
699                                const struct sk_buff *skb, int end_of_packet,
700                                dma_addr_t unmap_addr, unsigned unmap_len)
701 {
702         struct efx_tx_buffer *buffer;
703         struct efx_nic *efx = tx_queue->efx;
704         unsigned dma_len, fill_level, insert_ptr, misalign;
705         int q_space;
706
707         EFX_BUG_ON_PARANOID(len <= 0);
708
709         fill_level = tx_queue->insert_count - tx_queue->old_read_count;
710         /* -1 as there is no way to represent all descriptors used */
711         q_space = efx->type->txd_ring_mask - 1 - fill_level;
712
713         while (1) {
714                 if (unlikely(q_space-- <= 0)) {
715                         /* It might be that completions have happened
716                          * since the xmit path last checked.  Update
717                          * the xmit path's copy of read_count.
718                          */
719                         ++tx_queue->stopped;
720                         /* This memory barrier protects the change of
721                          * stopped from the access of read_count. */
722                         smp_mb();
723                         tx_queue->old_read_count =
724                                 *(volatile unsigned *)&tx_queue->read_count;
725                         fill_level = (tx_queue->insert_count
726                                       - tx_queue->old_read_count);
727                         q_space = efx->type->txd_ring_mask - 1 - fill_level;
728                         if (unlikely(q_space-- <= 0))
729                                 return 1;
730                         smp_mb();
731                         --tx_queue->stopped;
732                 }
733
734                 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
735                 buffer = &tx_queue->buffer[insert_ptr];
736                 ++tx_queue->insert_count;
737
738                 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
739                                     tx_queue->read_count >
740                                     efx->type->txd_ring_mask);
741
742                 efx_tsoh_free(tx_queue, buffer);
743                 EFX_BUG_ON_PARANOID(buffer->len);
744                 EFX_BUG_ON_PARANOID(buffer->unmap_len);
745                 EFX_BUG_ON_PARANOID(buffer->skb);
746                 EFX_BUG_ON_PARANOID(buffer->continuation != 1);
747                 EFX_BUG_ON_PARANOID(buffer->tsoh);
748
749                 buffer->dma_addr = dma_addr;
750
751                 /* Ensure we do not cross a boundary unsupported by H/W */
752                 dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
753
754                 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
755                 if (misalign && dma_len + misalign > 512)
756                         dma_len = 512 - misalign;
757
758                 /* If there is enough space to send then do so */
759                 if (dma_len >= len)
760                         break;
761
762                 buffer->len = dma_len; /* Don't set the other members */
763                 dma_addr += dma_len;
764                 len -= dma_len;
765         }
766
767         EFX_BUG_ON_PARANOID(!len);
768         buffer->len = len;
769         buffer->skb = skb;
770         buffer->continuation = !end_of_packet;
771         buffer->unmap_addr = unmap_addr;
772         buffer->unmap_len = unmap_len;
773         return 0;
774 }
775
776
777 /*
778  * Put a TSO header into the TX queue.
779  *
780  * This is special-cased because we know that it is small enough to fit in
781  * a single fragment, and we know it doesn't cross a page boundary.  It
782  * also allows us to not worry about end-of-packet etc.
783  */
784 static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
785                                       struct efx_tso_header *tsoh, unsigned len)
786 {
787         struct efx_tx_buffer *buffer;
788
789         buffer = &tx_queue->buffer[tx_queue->insert_count &
790                                    tx_queue->efx->type->txd_ring_mask];
791         efx_tsoh_free(tx_queue, buffer);
792         EFX_BUG_ON_PARANOID(buffer->len);
793         EFX_BUG_ON_PARANOID(buffer->unmap_len);
794         EFX_BUG_ON_PARANOID(buffer->skb);
795         EFX_BUG_ON_PARANOID(buffer->continuation != 1);
796         EFX_BUG_ON_PARANOID(buffer->tsoh);
797         buffer->len = len;
798         buffer->dma_addr = tsoh->dma_addr;
799         buffer->tsoh = tsoh;
800
801         ++tx_queue->insert_count;
802 }
803
804
805 /* Remove descriptors put into a tx_queue. */
806 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
807 {
808         struct efx_tx_buffer *buffer;
809
810         /* Work backwards until we hit the original insert pointer value */
811         while (tx_queue->insert_count != tx_queue->write_count) {
812                 --tx_queue->insert_count;
813                 buffer = &tx_queue->buffer[tx_queue->insert_count &
814                                            tx_queue->efx->type->txd_ring_mask];
815                 efx_tsoh_free(tx_queue, buffer);
816                 EFX_BUG_ON_PARANOID(buffer->skb);
817                 buffer->len = 0;
818                 buffer->continuation = 1;
819                 if (buffer->unmap_len) {
820                         pci_unmap_page(tx_queue->efx->pci_dev,
821                                        buffer->unmap_addr,
822                                        buffer->unmap_len, PCI_DMA_TODEVICE);
823                         buffer->unmap_len = 0;
824                 }
825         }
826 }
827
828
829 /* Parse the SKB header and initialise state. */
830 static inline void tso_start(struct tso_state *st, const struct sk_buff *skb)
831 {
832         /* All ethernet/IP/TCP headers combined size is TCP header size
833          * plus offset of TCP header relative to start of packet.
834          */
835         st->p.header_length = ((tcp_hdr(skb)->doff << 2u)
836                                + PTR_DIFF(tcp_hdr(skb), skb->data));
837         st->p.full_packet_size = (st->p.header_length
838                                   + skb_shinfo(skb)->gso_size);
839
840         st->p.ipv4_id = ntohs(ip_hdr(skb)->id);
841         st->seqnum = ntohl(tcp_hdr(skb)->seq);
842
843         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
844         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
845         EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
846
847         st->packet_space = st->p.full_packet_size;
848         st->remaining_len = skb->len - st->p.header_length;
849 }
850
851
852 /**
853  * tso_get_fragment - record fragment details and map for DMA
854  * @st:                 TSO state
855  * @efx:                Efx NIC
856  * @data:               Pointer to fragment data
857  * @len:                Length of fragment
858  *
859  * Record fragment details and map for DMA.  Return 0 on success, or
860  * -%ENOMEM if DMA mapping fails.
861  */
862 static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
863                                    int len, struct page *page, int page_off)
864 {
865
866         st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
867                                           len, PCI_DMA_TODEVICE);
868         if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
869                 st->ifc.unmap_len = len;
870                 st->ifc.len = len;
871                 st->ifc.dma_addr = st->ifc.unmap_addr;
872                 st->ifc.page = page;
873                 st->ifc.page_off = page_off;
874                 return 0;
875         }
876         return -ENOMEM;
877 }
878
879
880 /**
881  * tso_fill_packet_with_fragment - form descriptors for the current fragment
882  * @tx_queue:           Efx TX queue
883  * @skb:                Socket buffer
884  * @st:                 TSO state
885  *
886  * Form descriptors for the current fragment, until we reach the end
887  * of fragment or end-of-packet.  Return 0 on success, 1 if not enough
888  * space in @tx_queue.
889  */
890 static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
891                                                 const struct sk_buff *skb,
892                                                 struct tso_state *st)
893 {
894
895         int n, end_of_packet, rc;
896
897         if (st->ifc.len == 0)
898                 return 0;
899         if (st->packet_space == 0)
900                 return 0;
901
902         EFX_BUG_ON_PARANOID(st->ifc.len <= 0);
903         EFX_BUG_ON_PARANOID(st->packet_space <= 0);
904
905         n = min(st->ifc.len, st->packet_space);
906
907         st->packet_space -= n;
908         st->remaining_len -= n;
909         st->ifc.len -= n;
910         st->ifc.page_off += n;
911         end_of_packet = st->remaining_len == 0 || st->packet_space == 0;
912
913         rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n,
914                                  st->remaining_len ? NULL : skb,
915                                  end_of_packet, st->ifc.unmap_addr,
916                                  st->ifc.len ? 0 : st->ifc.unmap_len);
917
918         st->ifc.dma_addr += n;
919
920         return rc;
921 }
922
923
924 /**
925  * tso_start_new_packet - generate a new header and prepare for the new packet
926  * @tx_queue:           Efx TX queue
927  * @skb:                Socket buffer
928  * @st:                 TSO state
929  *
930  * Generate a new header and prepare for the new packet.  Return 0 on
931  * success, or -1 if failed to alloc header.
932  */
933 static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
934                                        const struct sk_buff *skb,
935                                        struct tso_state *st)
936 {
937         struct efx_tso_header *tsoh;
938         struct iphdr *tsoh_iph;
939         struct tcphdr *tsoh_th;
940         unsigned ip_length;
941         u8 *header;
942
943         /* Allocate a DMA-mapped header buffer. */
944         if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
945                 if (tx_queue->tso_headers_free == NULL) {
946                         if (efx_tsoh_block_alloc(tx_queue))
947                                 return -1;
948                 }
949                 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
950                 tsoh = tx_queue->tso_headers_free;
951                 tx_queue->tso_headers_free = tsoh->next;
952                 tsoh->unmap_len = 0;
953         } else {
954                 tx_queue->tso_long_headers++;
955                 tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length);
956                 if (unlikely(!tsoh))
957                         return -1;
958         }
959
960         header = TSOH_BUFFER(tsoh);
961         tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
962         tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
963
964         /* Copy and update the headers. */
965         memcpy(header, skb->data, st->p.header_length);
966
967         tsoh_th->seq = htonl(st->seqnum);
968         st->seqnum += skb_shinfo(skb)->gso_size;
969         if (st->remaining_len > skb_shinfo(skb)->gso_size) {
970                 /* This packet will not finish the TSO burst. */
971                 ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb);
972                 tsoh_th->fin = 0;
973                 tsoh_th->psh = 0;
974         } else {
975                 /* This packet will be the last in the TSO burst. */
976                 ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
977                              + st->remaining_len);
978                 tsoh_th->fin = tcp_hdr(skb)->fin;
979                 tsoh_th->psh = tcp_hdr(skb)->psh;
980         }
981         tsoh_iph->tot_len = htons(ip_length);
982
983         /* Linux leaves suitable gaps in the IP ID space for us to fill. */
984         tsoh_iph->id = htons(st->p.ipv4_id);
985         st->p.ipv4_id++;
986
987         st->packet_space = skb_shinfo(skb)->gso_size;
988         ++tx_queue->tso_packets;
989
990         /* Form a descriptor for this header. */
991         efx_tso_put_header(tx_queue, tsoh, st->p.header_length);
992
993         return 0;
994 }
995
996
997 /**
998  * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
999  * @tx_queue:           Efx TX queue
1000  * @skb:                Socket buffer
1001  *
1002  * Context: You must hold netif_tx_lock() to call this function.
1003  *
1004  * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
1005  * @skb was not enqueued.  In all cases @skb is consumed.  Return
1006  * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1007  */
1008 static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1009                                const struct sk_buff *skb)
1010 {
1011         int frag_i, rc, rc2 = NETDEV_TX_OK;
1012         struct tso_state state;
1013         skb_frag_t *f;
1014
1015         /* Verify TSO is safe - these checks should never fail. */
1016         efx_tso_check_safe(skb);
1017
1018         EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
1019
1020         tso_start(&state, skb);
1021
1022         /* Assume that skb header area contains exactly the headers, and
1023          * all payload is in the frag list.
1024          */
1025         if (skb_headlen(skb) == state.p.header_length) {
1026                 /* Grab the first payload fragment. */
1027                 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1028                 frag_i = 0;
1029                 f = &skb_shinfo(skb)->frags[frag_i];
1030                 rc = tso_get_fragment(&state, tx_queue->efx,
1031                                       f->size, f->page, f->page_offset);
1032                 if (rc)
1033                         goto mem_err;
1034         } else {
1035                 /* It may look like this code fragment assumes that the
1036                  * skb->data portion does not cross a page boundary, but
1037                  * that is not the case.  It is guaranteed to be direct
1038                  * mapped memory, and therefore is physically contiguous,
1039                  * and so DMA will work fine.  kmap_atomic() on this region
1040                  * will just return the direct mapping, so that will work
1041                  * too.
1042                  */
1043                 int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
1044                 int hl = state.p.header_length;
1045                 rc = tso_get_fragment(&state, tx_queue->efx,
1046                                       skb_headlen(skb) - hl,
1047                                       virt_to_page(skb->data), page_off + hl);
1048                 if (rc)
1049                         goto mem_err;
1050                 frag_i = -1;
1051         }
1052
1053         if (tso_start_new_packet(tx_queue, skb, &state) < 0)
1054                 goto mem_err;
1055
1056         while (1) {
1057                 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1058                 if (unlikely(rc))
1059                         goto stop;
1060
1061                 /* Move onto the next fragment? */
1062                 if (state.ifc.len == 0) {
1063                         if (++frag_i >= skb_shinfo(skb)->nr_frags)
1064                                 /* End of payload reached. */
1065                                 break;
1066                         f = &skb_shinfo(skb)->frags[frag_i];
1067                         rc = tso_get_fragment(&state, tx_queue->efx,
1068                                               f->size, f->page, f->page_offset);
1069                         if (rc)
1070                                 goto mem_err;
1071                 }
1072
1073                 /* Start at new packet? */
1074                 if (state.packet_space == 0 &&
1075                     tso_start_new_packet(tx_queue, skb, &state) < 0)
1076                         goto mem_err;
1077         }
1078
1079         /* Pass off to hardware */
1080         falcon_push_buffers(tx_queue);
1081
1082         tx_queue->tso_bursts++;
1083         return NETDEV_TX_OK;
1084
1085  mem_err:
1086         EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping"
1087                 " error\n");
1088         dev_kfree_skb_any((struct sk_buff *)skb);
1089         goto unwind;
1090
1091  stop:
1092         rc2 = NETDEV_TX_BUSY;
1093
1094         /* Stop the queue if it wasn't stopped before. */
1095         if (tx_queue->stopped == 1)
1096                 efx_stop_queue(tx_queue->efx);
1097
1098  unwind:
1099         /* Free the DMA mapping we were in the process of writing out */
1100         if (state.ifc.unmap_len)
1101                 pci_unmap_page(tx_queue->efx->pci_dev, state.ifc.unmap_addr,
1102                                state.ifc.unmap_len, PCI_DMA_TODEVICE);
1103
1104         efx_enqueue_unwind(tx_queue);
1105         return rc2;
1106 }
1107
1108
1109 /*
1110  * Free up all TSO datastructures associated with tx_queue. This
1111  * routine should be called only once the tx_queue is both empty and
1112  * will no longer be used.
1113  */
1114 static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1115 {
1116         unsigned i;
1117
1118         if (tx_queue->buffer) {
1119                 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
1120                         efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1121         }
1122
1123         while (tx_queue->tso_headers_free != NULL)
1124                 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
1125                                     tx_queue->efx->pci_dev);
1126 }