]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/dma/ioat_dma.c
Merge branch 'for-2.6.28' of git://linux-nfs.org/~bfields/linux
[linux-2.6-omap-h63xx.git] / drivers / dma / ioat_dma.c
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2007 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  */
22
23 /*
24  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25  * copy operations.
26  */
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
37 #include "ioatdma.h"
38 #include "ioatdma_registers.h"
39 #include "ioatdma_hw.h"
40
41 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
42 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
43 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
44 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
45
46 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
47 static int ioat_pending_level = 4;
48 module_param(ioat_pending_level, int, 0644);
49 MODULE_PARM_DESC(ioat_pending_level,
50                  "high-water mark for pushing ioat descriptors (default: 4)");
51
52 #define RESET_DELAY  msecs_to_jiffies(100)
53 #define WATCHDOG_DELAY  round_jiffies(msecs_to_jiffies(2000))
54 static void ioat_dma_chan_reset_part2(struct work_struct *work);
55 static void ioat_dma_chan_watchdog(struct work_struct *work);
56
57 /*
58  * workaround for IOAT ver.3.0 null descriptor issue
59  * (channel returns error when size is 0)
60  */
61 #define NULL_DESC_BUFFER_SIZE 1
62
63 /* internal functions */
64 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
65 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
66
67 static struct ioat_desc_sw *
68 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
69 static struct ioat_desc_sw *
70 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
71
72 static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
73                                                 struct ioatdma_device *device,
74                                                 int index)
75 {
76         return device->idx[index];
77 }
78
79 /**
80  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
81  * @irq: interrupt id
82  * @data: interrupt data
83  */
84 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
85 {
86         struct ioatdma_device *instance = data;
87         struct ioat_dma_chan *ioat_chan;
88         unsigned long attnstatus;
89         int bit;
90         u8 intrctrl;
91
92         intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
93
94         if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
95                 return IRQ_NONE;
96
97         if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
98                 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
99                 return IRQ_NONE;
100         }
101
102         attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
103         for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
104                 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
105                 tasklet_schedule(&ioat_chan->cleanup_task);
106         }
107
108         writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
109         return IRQ_HANDLED;
110 }
111
112 /**
113  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
114  * @irq: interrupt id
115  * @data: interrupt data
116  */
117 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
118 {
119         struct ioat_dma_chan *ioat_chan = data;
120
121         tasklet_schedule(&ioat_chan->cleanup_task);
122
123         return IRQ_HANDLED;
124 }
125
126 static void ioat_dma_cleanup_tasklet(unsigned long data);
127
128 /**
129  * ioat_dma_enumerate_channels - find and initialize the device's channels
130  * @device: the device to be enumerated
131  */
132 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
133 {
134         u8 xfercap_scale;
135         u32 xfercap;
136         int i;
137         struct ioat_dma_chan *ioat_chan;
138
139         /*
140          * IOAT ver.3 workarounds
141          */
142         if (device->version == IOAT_VER_3_0) {
143                 u32 chan_err_mask;
144                 u16 dev_id;
145                 u32 dmauncerrsts;
146
147                 /*
148                  * Write CHANERRMSK_INT with 3E07h to mask out the errors
149                  * that can cause stability issues for IOAT ver.3
150                  */
151                 chan_err_mask = 0x3E07;
152                 pci_write_config_dword(device->pdev,
153                         IOAT_PCI_CHANERRMASK_INT_OFFSET,
154                         chan_err_mask);
155
156                 /*
157                  * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
158                  * (workaround for spurious config parity error after restart)
159                  */
160                 pci_read_config_word(device->pdev,
161                         IOAT_PCI_DEVICE_ID_OFFSET,
162                         &dev_id);
163                 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
164                         dmauncerrsts = 0x10;
165                         pci_write_config_dword(device->pdev,
166                                 IOAT_PCI_DMAUNCERRSTS_OFFSET,
167                                 dmauncerrsts);
168                 }
169         }
170
171         device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
172         xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
173         xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
174
175 #ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
176         if (i7300_idle_platform_probe(NULL, NULL) == 0) {
177                 device->common.chancnt--;
178         }
179 #endif
180         for (i = 0; i < device->common.chancnt; i++) {
181                 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
182                 if (!ioat_chan) {
183                         device->common.chancnt = i;
184                         break;
185                 }
186
187                 ioat_chan->device = device;
188                 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
189                 ioat_chan->xfercap = xfercap;
190                 ioat_chan->desccount = 0;
191                 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
192                 if (ioat_chan->device->version != IOAT_VER_1_2) {
193                         writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
194                                         | IOAT_DMA_DCA_ANY_CPU,
195                                 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
196                 }
197                 spin_lock_init(&ioat_chan->cleanup_lock);
198                 spin_lock_init(&ioat_chan->desc_lock);
199                 INIT_LIST_HEAD(&ioat_chan->free_desc);
200                 INIT_LIST_HEAD(&ioat_chan->used_desc);
201                 /* This should be made common somewhere in dmaengine.c */
202                 ioat_chan->common.device = &device->common;
203                 list_add_tail(&ioat_chan->common.device_node,
204                               &device->common.channels);
205                 device->idx[i] = ioat_chan;
206                 tasklet_init(&ioat_chan->cleanup_task,
207                              ioat_dma_cleanup_tasklet,
208                              (unsigned long) ioat_chan);
209                 tasklet_disable(&ioat_chan->cleanup_task);
210         }
211         return device->common.chancnt;
212 }
213
214 /**
215  * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
216  *                                 descriptors to hw
217  * @chan: DMA channel handle
218  */
219 static inline void __ioat1_dma_memcpy_issue_pending(
220                                                 struct ioat_dma_chan *ioat_chan)
221 {
222         ioat_chan->pending = 0;
223         writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
224 }
225
226 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
227 {
228         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
229
230         if (ioat_chan->pending > 0) {
231                 spin_lock_bh(&ioat_chan->desc_lock);
232                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
233                 spin_unlock_bh(&ioat_chan->desc_lock);
234         }
235 }
236
237 static inline void __ioat2_dma_memcpy_issue_pending(
238                                                 struct ioat_dma_chan *ioat_chan)
239 {
240         ioat_chan->pending = 0;
241         writew(ioat_chan->dmacount,
242                ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
243 }
244
245 static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
246 {
247         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
248
249         if (ioat_chan->pending > 0) {
250                 spin_lock_bh(&ioat_chan->desc_lock);
251                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
252                 spin_unlock_bh(&ioat_chan->desc_lock);
253         }
254 }
255
256
257 /**
258  * ioat_dma_chan_reset_part2 - reinit the channel after a reset
259  */
260 static void ioat_dma_chan_reset_part2(struct work_struct *work)
261 {
262         struct ioat_dma_chan *ioat_chan =
263                 container_of(work, struct ioat_dma_chan, work.work);
264         struct ioat_desc_sw *desc;
265
266         spin_lock_bh(&ioat_chan->cleanup_lock);
267         spin_lock_bh(&ioat_chan->desc_lock);
268
269         ioat_chan->completion_virt->low = 0;
270         ioat_chan->completion_virt->high = 0;
271         ioat_chan->pending = 0;
272
273         /*
274          * count the descriptors waiting, and be sure to do it
275          * right for both the CB1 line and the CB2 ring
276          */
277         ioat_chan->dmacount = 0;
278         if (ioat_chan->used_desc.prev) {
279                 desc = to_ioat_desc(ioat_chan->used_desc.prev);
280                 do {
281                         ioat_chan->dmacount++;
282                         desc = to_ioat_desc(desc->node.next);
283                 } while (&desc->node != ioat_chan->used_desc.next);
284         }
285
286         /*
287          * write the new starting descriptor address
288          * this puts channel engine into ARMED state
289          */
290         desc = to_ioat_desc(ioat_chan->used_desc.prev);
291         switch (ioat_chan->device->version) {
292         case IOAT_VER_1_2:
293                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
294                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
295                 writel(((u64) desc->async_tx.phys) >> 32,
296                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
297
298                 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
299                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
300                 break;
301         case IOAT_VER_2_0:
302                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
303                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
304                 writel(((u64) desc->async_tx.phys) >> 32,
305                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
306
307                 /* tell the engine to go with what's left to be done */
308                 writew(ioat_chan->dmacount,
309                        ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
310
311                 break;
312         }
313         dev_err(&ioat_chan->device->pdev->dev,
314                 "chan%d reset - %d descs waiting, %d total desc\n",
315                 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
316
317         spin_unlock_bh(&ioat_chan->desc_lock);
318         spin_unlock_bh(&ioat_chan->cleanup_lock);
319 }
320
321 /**
322  * ioat_dma_reset_channel - restart a channel
323  * @ioat_chan: IOAT DMA channel handle
324  */
325 static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
326 {
327         u32 chansts, chanerr;
328
329         if (!ioat_chan->used_desc.prev)
330                 return;
331
332         chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
333         chansts = (ioat_chan->completion_virt->low
334                                         & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
335         if (chanerr) {
336                 dev_err(&ioat_chan->device->pdev->dev,
337                         "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
338                         chan_num(ioat_chan), chansts, chanerr);
339                 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
340         }
341
342         /*
343          * whack it upside the head with a reset
344          * and wait for things to settle out.
345          * force the pending count to a really big negative
346          * to make sure no one forces an issue_pending
347          * while we're waiting.
348          */
349
350         spin_lock_bh(&ioat_chan->desc_lock);
351         ioat_chan->pending = INT_MIN;
352         writeb(IOAT_CHANCMD_RESET,
353                ioat_chan->reg_base
354                + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
355         spin_unlock_bh(&ioat_chan->desc_lock);
356
357         /* schedule the 2nd half instead of sleeping a long time */
358         schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
359 }
360
361 /**
362  * ioat_dma_chan_watchdog - watch for stuck channels
363  */
364 static void ioat_dma_chan_watchdog(struct work_struct *work)
365 {
366         struct ioatdma_device *device =
367                 container_of(work, struct ioatdma_device, work.work);
368         struct ioat_dma_chan *ioat_chan;
369         int i;
370
371         union {
372                 u64 full;
373                 struct {
374                         u32 low;
375                         u32 high;
376                 };
377         } completion_hw;
378         unsigned long compl_desc_addr_hw;
379
380         for (i = 0; i < device->common.chancnt; i++) {
381                 ioat_chan = ioat_lookup_chan_by_index(device, i);
382
383                 if (ioat_chan->device->version == IOAT_VER_1_2
384                         /* have we started processing anything yet */
385                     && ioat_chan->last_completion
386                         /* have we completed any since last watchdog cycle? */
387                     && (ioat_chan->last_completion ==
388                                 ioat_chan->watchdog_completion)
389                         /* has TCP stuck on one cookie since last watchdog? */
390                     && (ioat_chan->watchdog_tcp_cookie ==
391                                 ioat_chan->watchdog_last_tcp_cookie)
392                     && (ioat_chan->watchdog_tcp_cookie !=
393                                 ioat_chan->completed_cookie)
394                         /* is there something in the chain to be processed? */
395                         /* CB1 chain always has at least the last one processed */
396                     && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
397                     && ioat_chan->pending == 0) {
398
399                         /*
400                          * check CHANSTS register for completed
401                          * descriptor address.
402                          * if it is different than completion writeback,
403                          * it is not zero
404                          * and it has changed since the last watchdog
405                          *     we can assume that channel
406                          *     is still working correctly
407                          *     and the problem is in completion writeback.
408                          *     update completion writeback
409                          *     with actual CHANSTS value
410                          * else
411                          *     try resetting the channel
412                          */
413
414                         completion_hw.low = readl(ioat_chan->reg_base +
415                                 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
416                         completion_hw.high = readl(ioat_chan->reg_base +
417                                 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
418 #if (BITS_PER_LONG == 64)
419                         compl_desc_addr_hw =
420                                 completion_hw.full
421                                 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
422 #else
423                         compl_desc_addr_hw =
424                                 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
425 #endif
426
427                         if ((compl_desc_addr_hw != 0)
428                            && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
429                            && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
430                                 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
431                                 ioat_chan->completion_virt->low = completion_hw.low;
432                                 ioat_chan->completion_virt->high = completion_hw.high;
433                         } else {
434                                 ioat_dma_reset_channel(ioat_chan);
435                                 ioat_chan->watchdog_completion = 0;
436                                 ioat_chan->last_compl_desc_addr_hw = 0;
437                         }
438
439                 /*
440                  * for version 2.0 if there are descriptors yet to be processed
441                  * and the last completed hasn't changed since the last watchdog
442                  *      if they haven't hit the pending level
443                  *          issue the pending to push them through
444                  *      else
445                  *          try resetting the channel
446                  */
447                 } else if (ioat_chan->device->version == IOAT_VER_2_0
448                     && ioat_chan->used_desc.prev
449                     && ioat_chan->last_completion
450                     && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
451
452                         if (ioat_chan->pending < ioat_pending_level)
453                                 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
454                         else {
455                                 ioat_dma_reset_channel(ioat_chan);
456                                 ioat_chan->watchdog_completion = 0;
457                         }
458                 } else {
459                         ioat_chan->last_compl_desc_addr_hw = 0;
460                         ioat_chan->watchdog_completion
461                                         = ioat_chan->last_completion;
462                 }
463
464                 ioat_chan->watchdog_last_tcp_cookie =
465                         ioat_chan->watchdog_tcp_cookie;
466         }
467
468         schedule_delayed_work(&device->work, WATCHDOG_DELAY);
469 }
470
471 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
472 {
473         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
474         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
475         struct ioat_desc_sw *prev, *new;
476         struct ioat_dma_descriptor *hw;
477         dma_cookie_t cookie;
478         LIST_HEAD(new_chain);
479         u32 copy;
480         size_t len;
481         dma_addr_t src, dst;
482         unsigned long orig_flags;
483         unsigned int desc_count = 0;
484
485         /* src and dest and len are stored in the initial descriptor */
486         len = first->len;
487         src = first->src;
488         dst = first->dst;
489         orig_flags = first->async_tx.flags;
490         new = first;
491
492         spin_lock_bh(&ioat_chan->desc_lock);
493         prev = to_ioat_desc(ioat_chan->used_desc.prev);
494         prefetch(prev->hw);
495         do {
496                 copy = min_t(size_t, len, ioat_chan->xfercap);
497
498                 async_tx_ack(&new->async_tx);
499
500                 hw = new->hw;
501                 hw->size = copy;
502                 hw->ctl = 0;
503                 hw->src_addr = src;
504                 hw->dst_addr = dst;
505                 hw->next = 0;
506
507                 /* chain together the physical address list for the HW */
508                 wmb();
509                 prev->hw->next = (u64) new->async_tx.phys;
510
511                 len -= copy;
512                 dst += copy;
513                 src += copy;
514
515                 list_add_tail(&new->node, &new_chain);
516                 desc_count++;
517                 prev = new;
518         } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
519
520         if (!new) {
521                 dev_err(&ioat_chan->device->pdev->dev,
522                         "tx submit failed\n");
523                 spin_unlock_bh(&ioat_chan->desc_lock);
524                 return -ENOMEM;
525         }
526
527         hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
528         if (new->async_tx.callback) {
529                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
530                 if (first != new) {
531                         /* move callback into to last desc */
532                         new->async_tx.callback = first->async_tx.callback;
533                         new->async_tx.callback_param
534                                         = first->async_tx.callback_param;
535                         first->async_tx.callback = NULL;
536                         first->async_tx.callback_param = NULL;
537                 }
538         }
539
540         new->tx_cnt = desc_count;
541         new->async_tx.flags = orig_flags; /* client is in control of this ack */
542
543         /* store the original values for use in later cleanup */
544         if (new != first) {
545                 new->src = first->src;
546                 new->dst = first->dst;
547                 new->len = first->len;
548         }
549
550         /* cookie incr and addition to used_list must be atomic */
551         cookie = ioat_chan->common.cookie;
552         cookie++;
553         if (cookie < 0)
554                 cookie = 1;
555         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
556
557         /* write address into NextDescriptor field of last desc in chain */
558         to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
559                                                         first->async_tx.phys;
560         list_splice_tail(&new_chain, &ioat_chan->used_desc);
561
562         ioat_chan->dmacount += desc_count;
563         ioat_chan->pending += desc_count;
564         if (ioat_chan->pending >= ioat_pending_level)
565                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
566         spin_unlock_bh(&ioat_chan->desc_lock);
567
568         return cookie;
569 }
570
571 static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
572 {
573         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
574         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
575         struct ioat_desc_sw *new;
576         struct ioat_dma_descriptor *hw;
577         dma_cookie_t cookie;
578         u32 copy;
579         size_t len;
580         dma_addr_t src, dst;
581         unsigned long orig_flags;
582         unsigned int desc_count = 0;
583
584         /* src and dest and len are stored in the initial descriptor */
585         len = first->len;
586         src = first->src;
587         dst = first->dst;
588         orig_flags = first->async_tx.flags;
589         new = first;
590
591         /*
592          * ioat_chan->desc_lock is still in force in version 2 path
593          * it gets unlocked at end of this function
594          */
595         do {
596                 copy = min_t(size_t, len, ioat_chan->xfercap);
597
598                 async_tx_ack(&new->async_tx);
599
600                 hw = new->hw;
601                 hw->size = copy;
602                 hw->ctl = 0;
603                 hw->src_addr = src;
604                 hw->dst_addr = dst;
605
606                 len -= copy;
607                 dst += copy;
608                 src += copy;
609                 desc_count++;
610         } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
611
612         if (!new) {
613                 dev_err(&ioat_chan->device->pdev->dev,
614                         "tx submit failed\n");
615                 spin_unlock_bh(&ioat_chan->desc_lock);
616                 return -ENOMEM;
617         }
618
619         hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
620         if (new->async_tx.callback) {
621                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
622                 if (first != new) {
623                         /* move callback into to last desc */
624                         new->async_tx.callback = first->async_tx.callback;
625                         new->async_tx.callback_param
626                                         = first->async_tx.callback_param;
627                         first->async_tx.callback = NULL;
628                         first->async_tx.callback_param = NULL;
629                 }
630         }
631
632         new->tx_cnt = desc_count;
633         new->async_tx.flags = orig_flags; /* client is in control of this ack */
634
635         /* store the original values for use in later cleanup */
636         if (new != first) {
637                 new->src = first->src;
638                 new->dst = first->dst;
639                 new->len = first->len;
640         }
641
642         /* cookie incr and addition to used_list must be atomic */
643         cookie = ioat_chan->common.cookie;
644         cookie++;
645         if (cookie < 0)
646                 cookie = 1;
647         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
648
649         ioat_chan->dmacount += desc_count;
650         ioat_chan->pending += desc_count;
651         if (ioat_chan->pending >= ioat_pending_level)
652                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
653         spin_unlock_bh(&ioat_chan->desc_lock);
654
655         return cookie;
656 }
657
658 /**
659  * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
660  * @ioat_chan: the channel supplying the memory pool for the descriptors
661  * @flags: allocation flags
662  */
663 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
664                                         struct ioat_dma_chan *ioat_chan,
665                                         gfp_t flags)
666 {
667         struct ioat_dma_descriptor *desc;
668         struct ioat_desc_sw *desc_sw;
669         struct ioatdma_device *ioatdma_device;
670         dma_addr_t phys;
671
672         ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
673         desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
674         if (unlikely(!desc))
675                 return NULL;
676
677         desc_sw = kzalloc(sizeof(*desc_sw), flags);
678         if (unlikely(!desc_sw)) {
679                 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
680                 return NULL;
681         }
682
683         memset(desc, 0, sizeof(*desc));
684         dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
685         switch (ioat_chan->device->version) {
686         case IOAT_VER_1_2:
687                 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
688                 break;
689         case IOAT_VER_2_0:
690         case IOAT_VER_3_0:
691                 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
692                 break;
693         }
694         INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
695
696         desc_sw->hw = desc;
697         desc_sw->async_tx.phys = phys;
698
699         return desc_sw;
700 }
701
702 static int ioat_initial_desc_count = 256;
703 module_param(ioat_initial_desc_count, int, 0644);
704 MODULE_PARM_DESC(ioat_initial_desc_count,
705                  "initial descriptors per channel (default: 256)");
706
707 /**
708  * ioat2_dma_massage_chan_desc - link the descriptors into a circle
709  * @ioat_chan: the channel to be massaged
710  */
711 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
712 {
713         struct ioat_desc_sw *desc, *_desc;
714
715         /* setup used_desc */
716         ioat_chan->used_desc.next = ioat_chan->free_desc.next;
717         ioat_chan->used_desc.prev = NULL;
718
719         /* pull free_desc out of the circle so that every node is a hw
720          * descriptor, but leave it pointing to the list
721          */
722         ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
723         ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
724
725         /* circle link the hw descriptors */
726         desc = to_ioat_desc(ioat_chan->free_desc.next);
727         desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
728         list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
729                 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
730         }
731 }
732
733 /**
734  * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
735  * @chan: the channel to be filled out
736  */
737 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
738                                          struct dma_client *client)
739 {
740         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
741         struct ioat_desc_sw *desc;
742         u16 chanctrl;
743         u32 chanerr;
744         int i;
745         LIST_HEAD(tmp_list);
746
747         /* have we already been set up? */
748         if (!list_empty(&ioat_chan->free_desc))
749                 return ioat_chan->desccount;
750
751         /* Setup register to interrupt and write completion status on error */
752         chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
753                 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
754                 IOAT_CHANCTRL_ERR_COMPLETION_EN;
755         writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
756
757         chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
758         if (chanerr) {
759                 dev_err(&ioat_chan->device->pdev->dev,
760                         "CHANERR = %x, clearing\n", chanerr);
761                 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
762         }
763
764         /* Allocate descriptors */
765         for (i = 0; i < ioat_initial_desc_count; i++) {
766                 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
767                 if (!desc) {
768                         dev_err(&ioat_chan->device->pdev->dev,
769                                 "Only %d initial descriptors\n", i);
770                         break;
771                 }
772                 list_add_tail(&desc->node, &tmp_list);
773         }
774         spin_lock_bh(&ioat_chan->desc_lock);
775         ioat_chan->desccount = i;
776         list_splice(&tmp_list, &ioat_chan->free_desc);
777         if (ioat_chan->device->version != IOAT_VER_1_2)
778                 ioat2_dma_massage_chan_desc(ioat_chan);
779         spin_unlock_bh(&ioat_chan->desc_lock);
780
781         /* allocate a completion writeback area */
782         /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
783         ioat_chan->completion_virt =
784                 pci_pool_alloc(ioat_chan->device->completion_pool,
785                                GFP_KERNEL,
786                                &ioat_chan->completion_addr);
787         memset(ioat_chan->completion_virt, 0,
788                sizeof(*ioat_chan->completion_virt));
789         writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
790                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
791         writel(((u64) ioat_chan->completion_addr) >> 32,
792                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
793
794         tasklet_enable(&ioat_chan->cleanup_task);
795         ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
796         return ioat_chan->desccount;
797 }
798
799 /**
800  * ioat_dma_free_chan_resources - release all the descriptors
801  * @chan: the channel to be cleaned
802  */
803 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
804 {
805         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
806         struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
807         struct ioat_desc_sw *desc, *_desc;
808         int in_use_descs = 0;
809
810         tasklet_disable(&ioat_chan->cleanup_task);
811         ioat_dma_memcpy_cleanup(ioat_chan);
812
813         /* Delay 100ms after reset to allow internal DMA logic to quiesce
814          * before removing DMA descriptor resources.
815          */
816         writeb(IOAT_CHANCMD_RESET,
817                ioat_chan->reg_base
818                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
819         mdelay(100);
820
821         spin_lock_bh(&ioat_chan->desc_lock);
822         switch (ioat_chan->device->version) {
823         case IOAT_VER_1_2:
824                 list_for_each_entry_safe(desc, _desc,
825                                          &ioat_chan->used_desc, node) {
826                         in_use_descs++;
827                         list_del(&desc->node);
828                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
829                                       desc->async_tx.phys);
830                         kfree(desc);
831                 }
832                 list_for_each_entry_safe(desc, _desc,
833                                          &ioat_chan->free_desc, node) {
834                         list_del(&desc->node);
835                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
836                                       desc->async_tx.phys);
837                         kfree(desc);
838                 }
839                 break;
840         case IOAT_VER_2_0:
841         case IOAT_VER_3_0:
842                 list_for_each_entry_safe(desc, _desc,
843                                          ioat_chan->free_desc.next, node) {
844                         list_del(&desc->node);
845                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
846                                       desc->async_tx.phys);
847                         kfree(desc);
848                 }
849                 desc = to_ioat_desc(ioat_chan->free_desc.next);
850                 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
851                               desc->async_tx.phys);
852                 kfree(desc);
853                 INIT_LIST_HEAD(&ioat_chan->free_desc);
854                 INIT_LIST_HEAD(&ioat_chan->used_desc);
855                 break;
856         }
857         spin_unlock_bh(&ioat_chan->desc_lock);
858
859         pci_pool_free(ioatdma_device->completion_pool,
860                       ioat_chan->completion_virt,
861                       ioat_chan->completion_addr);
862
863         /* one is ok since we left it on there on purpose */
864         if (in_use_descs > 1)
865                 dev_err(&ioat_chan->device->pdev->dev,
866                         "Freeing %d in use descriptors!\n",
867                         in_use_descs - 1);
868
869         ioat_chan->last_completion = ioat_chan->completion_addr = 0;
870         ioat_chan->pending = 0;
871         ioat_chan->dmacount = 0;
872         ioat_chan->watchdog_completion = 0;
873         ioat_chan->last_compl_desc_addr_hw = 0;
874         ioat_chan->watchdog_tcp_cookie =
875                 ioat_chan->watchdog_last_tcp_cookie = 0;
876 }
877
878 /**
879  * ioat_dma_get_next_descriptor - return the next available descriptor
880  * @ioat_chan: IOAT DMA channel handle
881  *
882  * Gets the next descriptor from the chain, and must be called with the
883  * channel's desc_lock held.  Allocates more descriptors if the channel
884  * has run out.
885  */
886 static struct ioat_desc_sw *
887 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
888 {
889         struct ioat_desc_sw *new;
890
891         if (!list_empty(&ioat_chan->free_desc)) {
892                 new = to_ioat_desc(ioat_chan->free_desc.next);
893                 list_del(&new->node);
894         } else {
895                 /* try to get another desc */
896                 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
897                 if (!new) {
898                         dev_err(&ioat_chan->device->pdev->dev,
899                                 "alloc failed\n");
900                         return NULL;
901                 }
902         }
903
904         prefetch(new->hw);
905         return new;
906 }
907
908 static struct ioat_desc_sw *
909 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
910 {
911         struct ioat_desc_sw *new;
912
913         /*
914          * used.prev points to where to start processing
915          * used.next points to next free descriptor
916          * if used.prev == NULL, there are none waiting to be processed
917          * if used.next == used.prev.prev, there is only one free descriptor,
918          *      and we need to use it to as a noop descriptor before
919          *      linking in a new set of descriptors, since the device
920          *      has probably already read the pointer to it
921          */
922         if (ioat_chan->used_desc.prev &&
923             ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
924
925                 struct ioat_desc_sw *desc;
926                 struct ioat_desc_sw *noop_desc;
927                 int i;
928
929                 /* set up the noop descriptor */
930                 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
931                 /* set size to non-zero value (channel returns error when size is 0) */
932                 noop_desc->hw->size = NULL_DESC_BUFFER_SIZE;
933                 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
934                 noop_desc->hw->src_addr = 0;
935                 noop_desc->hw->dst_addr = 0;
936
937                 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
938                 ioat_chan->pending++;
939                 ioat_chan->dmacount++;
940
941                 /* try to get a few more descriptors */
942                 for (i = 16; i; i--) {
943                         desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
944                         if (!desc) {
945                                 dev_err(&ioat_chan->device->pdev->dev,
946                                         "alloc failed\n");
947                                 break;
948                         }
949                         list_add_tail(&desc->node, ioat_chan->used_desc.next);
950
951                         desc->hw->next
952                                 = to_ioat_desc(desc->node.next)->async_tx.phys;
953                         to_ioat_desc(desc->node.prev)->hw->next
954                                 = desc->async_tx.phys;
955                         ioat_chan->desccount++;
956                 }
957
958                 ioat_chan->used_desc.next = noop_desc->node.next;
959         }
960         new = to_ioat_desc(ioat_chan->used_desc.next);
961         prefetch(new);
962         ioat_chan->used_desc.next = new->node.next;
963
964         if (ioat_chan->used_desc.prev == NULL)
965                 ioat_chan->used_desc.prev = &new->node;
966
967         prefetch(new->hw);
968         return new;
969 }
970
971 static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
972                                                 struct ioat_dma_chan *ioat_chan)
973 {
974         if (!ioat_chan)
975                 return NULL;
976
977         switch (ioat_chan->device->version) {
978         case IOAT_VER_1_2:
979                 return ioat1_dma_get_next_descriptor(ioat_chan);
980         case IOAT_VER_2_0:
981         case IOAT_VER_3_0:
982                 return ioat2_dma_get_next_descriptor(ioat_chan);
983         }
984         return NULL;
985 }
986
987 static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
988                                                 struct dma_chan *chan,
989                                                 dma_addr_t dma_dest,
990                                                 dma_addr_t dma_src,
991                                                 size_t len,
992                                                 unsigned long flags)
993 {
994         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
995         struct ioat_desc_sw *new;
996
997         spin_lock_bh(&ioat_chan->desc_lock);
998         new = ioat_dma_get_next_descriptor(ioat_chan);
999         spin_unlock_bh(&ioat_chan->desc_lock);
1000
1001         if (new) {
1002                 new->len = len;
1003                 new->dst = dma_dest;
1004                 new->src = dma_src;
1005                 new->async_tx.flags = flags;
1006                 return &new->async_tx;
1007         } else {
1008                 dev_err(&ioat_chan->device->pdev->dev,
1009                         "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1010                         chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
1011                 return NULL;
1012         }
1013 }
1014
1015 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
1016                                                 struct dma_chan *chan,
1017                                                 dma_addr_t dma_dest,
1018                                                 dma_addr_t dma_src,
1019                                                 size_t len,
1020                                                 unsigned long flags)
1021 {
1022         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
1023         struct ioat_desc_sw *new;
1024
1025         spin_lock_bh(&ioat_chan->desc_lock);
1026         new = ioat2_dma_get_next_descriptor(ioat_chan);
1027
1028         /*
1029          * leave ioat_chan->desc_lock set in ioat 2 path
1030          * it will get unlocked at end of tx_submit
1031          */
1032
1033         if (new) {
1034                 new->len = len;
1035                 new->dst = dma_dest;
1036                 new->src = dma_src;
1037                 new->async_tx.flags = flags;
1038                 return &new->async_tx;
1039         } else {
1040                 spin_unlock_bh(&ioat_chan->desc_lock);
1041                 dev_err(&ioat_chan->device->pdev->dev,
1042                         "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
1043                         chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
1044                 return NULL;
1045         }
1046 }
1047
1048 static void ioat_dma_cleanup_tasklet(unsigned long data)
1049 {
1050         struct ioat_dma_chan *chan = (void *)data;
1051         ioat_dma_memcpy_cleanup(chan);
1052         writew(IOAT_CHANCTRL_INT_DISABLE,
1053                chan->reg_base + IOAT_CHANCTRL_OFFSET);
1054 }
1055
1056 static void
1057 ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
1058 {
1059         /*
1060          * yes we are unmapping both _page and _single
1061          * alloc'd regions with unmap_page. Is this
1062          * *really* that bad?
1063          */
1064         if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
1065                 pci_unmap_page(ioat_chan->device->pdev,
1066                                 pci_unmap_addr(desc, dst),
1067                                 pci_unmap_len(desc, len),
1068                                 PCI_DMA_FROMDEVICE);
1069
1070         if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
1071                 pci_unmap_page(ioat_chan->device->pdev,
1072                                 pci_unmap_addr(desc, src),
1073                                 pci_unmap_len(desc, len),
1074                                 PCI_DMA_TODEVICE);
1075 }
1076
1077 /**
1078  * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
1079  * @chan: ioat channel to be cleaned up
1080  */
1081 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1082 {
1083         unsigned long phys_complete;
1084         struct ioat_desc_sw *desc, *_desc;
1085         dma_cookie_t cookie = 0;
1086         unsigned long desc_phys;
1087         struct ioat_desc_sw *latest_desc;
1088
1089         prefetch(ioat_chan->completion_virt);
1090
1091         if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
1092                 return;
1093
1094         /* The completion writeback can happen at any time,
1095            so reads by the driver need to be atomic operations
1096            The descriptor physical addresses are limited to 32-bits
1097            when the CPU can only do a 32-bit mov */
1098
1099 #if (BITS_PER_LONG == 64)
1100         phys_complete =
1101                 ioat_chan->completion_virt->full
1102                 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1103 #else
1104         phys_complete =
1105                 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
1106 #endif
1107
1108         if ((ioat_chan->completion_virt->full
1109                 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
1110                                 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
1111                 dev_err(&ioat_chan->device->pdev->dev,
1112                         "Channel halted, chanerr = %x\n",
1113                         readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
1114
1115                 /* TODO do something to salvage the situation */
1116         }
1117
1118         if (phys_complete == ioat_chan->last_completion) {
1119                 spin_unlock_bh(&ioat_chan->cleanup_lock);
1120                 /*
1121                  * perhaps we're stuck so hard that the watchdog can't go off?
1122                  * try to catch it after 2 seconds
1123                  */
1124                 if (ioat_chan->device->version != IOAT_VER_3_0) {
1125                         if (time_after(jiffies,
1126                                        ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1127                                 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1128                                 ioat_chan->last_completion_time = jiffies;
1129                         }
1130                 }
1131                 return;
1132         }
1133         ioat_chan->last_completion_time = jiffies;
1134
1135         cookie = 0;
1136         if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1137                 spin_unlock_bh(&ioat_chan->cleanup_lock);
1138                 return;
1139         }
1140
1141         switch (ioat_chan->device->version) {
1142         case IOAT_VER_1_2:
1143                 list_for_each_entry_safe(desc, _desc,
1144                                          &ioat_chan->used_desc, node) {
1145
1146                         /*
1147                          * Incoming DMA requests may use multiple descriptors,
1148                          * due to exceeding xfercap, perhaps. If so, only the
1149                          * last one will have a cookie, and require unmapping.
1150                          */
1151                         if (desc->async_tx.cookie) {
1152                                 cookie = desc->async_tx.cookie;
1153                                 ioat_dma_unmap(ioat_chan, desc);
1154                                 if (desc->async_tx.callback) {
1155                                         desc->async_tx.callback(desc->async_tx.callback_param);
1156                                         desc->async_tx.callback = NULL;
1157                                 }
1158                         }
1159
1160                         if (desc->async_tx.phys != phys_complete) {
1161                                 /*
1162                                  * a completed entry, but not the last, so clean
1163                                  * up if the client is done with the descriptor
1164                                  */
1165                                 if (async_tx_test_ack(&desc->async_tx)) {
1166                                         list_del(&desc->node);
1167                                         list_add_tail(&desc->node,
1168                                                       &ioat_chan->free_desc);
1169                                 } else
1170                                         desc->async_tx.cookie = 0;
1171                         } else {
1172                                 /*
1173                                  * last used desc. Do not remove, so we can
1174                                  * append from it, but don't look at it next
1175                                  * time, either
1176                                  */
1177                                 desc->async_tx.cookie = 0;
1178
1179                                 /* TODO check status bits? */
1180                                 break;
1181                         }
1182                 }
1183                 break;
1184         case IOAT_VER_2_0:
1185         case IOAT_VER_3_0:
1186                 /* has some other thread has already cleaned up? */
1187                 if (ioat_chan->used_desc.prev == NULL)
1188                         break;
1189
1190                 /* work backwards to find latest finished desc */
1191                 desc = to_ioat_desc(ioat_chan->used_desc.next);
1192                 latest_desc = NULL;
1193                 do {
1194                         desc = to_ioat_desc(desc->node.prev);
1195                         desc_phys = (unsigned long)desc->async_tx.phys
1196                                        & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1197                         if (desc_phys == phys_complete) {
1198                                 latest_desc = desc;
1199                                 break;
1200                         }
1201                 } while (&desc->node != ioat_chan->used_desc.prev);
1202
1203                 if (latest_desc != NULL) {
1204
1205                         /* work forwards to clear finished descriptors */
1206                         for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
1207                              &desc->node != latest_desc->node.next &&
1208                              &desc->node != ioat_chan->used_desc.next;
1209                              desc = to_ioat_desc(desc->node.next)) {
1210                                 if (desc->async_tx.cookie) {
1211                                         cookie = desc->async_tx.cookie;
1212                                         desc->async_tx.cookie = 0;
1213                                         ioat_dma_unmap(ioat_chan, desc);
1214                                         if (desc->async_tx.callback) {
1215                                                 desc->async_tx.callback(desc->async_tx.callback_param);
1216                                                 desc->async_tx.callback = NULL;
1217                                         }
1218                                 }
1219                         }
1220
1221                         /* move used.prev up beyond those that are finished */
1222                         if (&desc->node == ioat_chan->used_desc.next)
1223                                 ioat_chan->used_desc.prev = NULL;
1224                         else
1225                                 ioat_chan->used_desc.prev = &desc->node;
1226                 }
1227                 break;
1228         }
1229
1230         spin_unlock_bh(&ioat_chan->desc_lock);
1231
1232         ioat_chan->last_completion = phys_complete;
1233         if (cookie != 0)
1234                 ioat_chan->completed_cookie = cookie;
1235
1236         spin_unlock_bh(&ioat_chan->cleanup_lock);
1237 }
1238
1239 /**
1240  * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
1241  * @chan: IOAT DMA channel handle
1242  * @cookie: DMA transaction identifier
1243  * @done: if not %NULL, updated with last completed transaction
1244  * @used: if not %NULL, updated with last used transaction
1245  */
1246 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
1247                                             dma_cookie_t cookie,
1248                                             dma_cookie_t *done,
1249                                             dma_cookie_t *used)
1250 {
1251         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
1252         dma_cookie_t last_used;
1253         dma_cookie_t last_complete;
1254         enum dma_status ret;
1255
1256         last_used = chan->cookie;
1257         last_complete = ioat_chan->completed_cookie;
1258         ioat_chan->watchdog_tcp_cookie = cookie;
1259
1260         if (done)
1261                 *done = last_complete;
1262         if (used)
1263                 *used = last_used;
1264
1265         ret = dma_async_is_complete(cookie, last_complete, last_used);
1266         if (ret == DMA_SUCCESS)
1267                 return ret;
1268
1269         ioat_dma_memcpy_cleanup(ioat_chan);
1270
1271         last_used = chan->cookie;
1272         last_complete = ioat_chan->completed_cookie;
1273
1274         if (done)
1275                 *done = last_complete;
1276         if (used)
1277                 *used = last_used;
1278
1279         return dma_async_is_complete(cookie, last_complete, last_used);
1280 }
1281
1282 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
1283 {
1284         struct ioat_desc_sw *desc;
1285
1286         spin_lock_bh(&ioat_chan->desc_lock);
1287
1288         desc = ioat_dma_get_next_descriptor(ioat_chan);
1289
1290         if (!desc) {
1291                 dev_err(&ioat_chan->device->pdev->dev,
1292                         "Unable to start null desc - get next desc failed\n");
1293                 spin_unlock_bh(&ioat_chan->desc_lock);
1294                 return;
1295         }
1296
1297         desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
1298                                 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
1299                                 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
1300         /* set size to non-zero value (channel returns error when size is 0) */
1301         desc->hw->size = NULL_DESC_BUFFER_SIZE;
1302         desc->hw->src_addr = 0;
1303         desc->hw->dst_addr = 0;
1304         async_tx_ack(&desc->async_tx);
1305         switch (ioat_chan->device->version) {
1306         case IOAT_VER_1_2:
1307                 desc->hw->next = 0;
1308                 list_add_tail(&desc->node, &ioat_chan->used_desc);
1309
1310                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1311                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
1312                 writel(((u64) desc->async_tx.phys) >> 32,
1313                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
1314
1315                 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
1316                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1317                 break;
1318         case IOAT_VER_2_0:
1319         case IOAT_VER_3_0:
1320                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1321                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1322                 writel(((u64) desc->async_tx.phys) >> 32,
1323                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1324
1325                 ioat_chan->dmacount++;
1326                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1327                 break;
1328         }
1329         spin_unlock_bh(&ioat_chan->desc_lock);
1330 }
1331
1332 /*
1333  * Perform a IOAT transaction to verify the HW works.
1334  */
1335 #define IOAT_TEST_SIZE 2000
1336
1337 static void ioat_dma_test_callback(void *dma_async_param)
1338 {
1339         printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
1340                 dma_async_param);
1341 }
1342
1343 /**
1344  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1345  * @device: device to be tested
1346  */
1347 static int ioat_dma_self_test(struct ioatdma_device *device)
1348 {
1349         int i;
1350         u8 *src;
1351         u8 *dest;
1352         struct dma_chan *dma_chan;
1353         struct dma_async_tx_descriptor *tx;
1354         dma_addr_t dma_dest, dma_src;
1355         dma_cookie_t cookie;
1356         int err = 0;
1357
1358         src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1359         if (!src)
1360                 return -ENOMEM;
1361         dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1362         if (!dest) {
1363                 kfree(src);
1364                 return -ENOMEM;
1365         }
1366
1367         /* Fill in src buffer */
1368         for (i = 0; i < IOAT_TEST_SIZE; i++)
1369                 src[i] = (u8)i;
1370
1371         /* Start copy, using first DMA channel */
1372         dma_chan = container_of(device->common.channels.next,
1373                                 struct dma_chan,
1374                                 device_node);
1375         if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
1376                 dev_err(&device->pdev->dev,
1377                         "selftest cannot allocate chan resource\n");
1378                 err = -ENODEV;
1379                 goto out;
1380         }
1381
1382         dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1383                                  DMA_TO_DEVICE);
1384         dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1385                                   DMA_FROM_DEVICE);
1386         tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1387                                                    IOAT_TEST_SIZE, 0);
1388         if (!tx) {
1389                 dev_err(&device->pdev->dev,
1390                         "Self-test prep failed, disabling\n");
1391                 err = -ENODEV;
1392                 goto free_resources;
1393         }
1394
1395         async_tx_ack(tx);
1396         tx->callback = ioat_dma_test_callback;
1397         tx->callback_param = (void *)0x8086;
1398         cookie = tx->tx_submit(tx);
1399         if (cookie < 0) {
1400                 dev_err(&device->pdev->dev,
1401                         "Self-test setup failed, disabling\n");
1402                 err = -ENODEV;
1403                 goto free_resources;
1404         }
1405         device->common.device_issue_pending(dma_chan);
1406         msleep(1);
1407
1408         if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1409                                         != DMA_SUCCESS) {
1410                 dev_err(&device->pdev->dev,
1411                         "Self-test copy timed out, disabling\n");
1412                 err = -ENODEV;
1413                 goto free_resources;
1414         }
1415         if (memcmp(src, dest, IOAT_TEST_SIZE)) {
1416                 dev_err(&device->pdev->dev,
1417                         "Self-test copy failed compare, disabling\n");
1418                 err = -ENODEV;
1419                 goto free_resources;
1420         }
1421
1422 free_resources:
1423         device->common.device_free_chan_resources(dma_chan);
1424 out:
1425         kfree(src);
1426         kfree(dest);
1427         return err;
1428 }
1429
1430 static char ioat_interrupt_style[32] = "msix";
1431 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1432                     sizeof(ioat_interrupt_style), 0644);
1433 MODULE_PARM_DESC(ioat_interrupt_style,
1434                  "set ioat interrupt style: msix (default), "
1435                  "msix-single-vector, msi, intx)");
1436
1437 /**
1438  * ioat_dma_setup_interrupts - setup interrupt handler
1439  * @device: ioat device
1440  */
1441 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1442 {
1443         struct ioat_dma_chan *ioat_chan;
1444         int err, i, j, msixcnt;
1445         u8 intrctrl = 0;
1446
1447         if (!strcmp(ioat_interrupt_style, "msix"))
1448                 goto msix;
1449         if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1450                 goto msix_single_vector;
1451         if (!strcmp(ioat_interrupt_style, "msi"))
1452                 goto msi;
1453         if (!strcmp(ioat_interrupt_style, "intx"))
1454                 goto intx;
1455         dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
1456                 ioat_interrupt_style);
1457         goto err_no_irq;
1458
1459 msix:
1460         /* The number of MSI-X vectors should equal the number of channels */
1461         msixcnt = device->common.chancnt;
1462         for (i = 0; i < msixcnt; i++)
1463                 device->msix_entries[i].entry = i;
1464
1465         err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
1466         if (err < 0)
1467                 goto msi;
1468         if (err > 0)
1469                 goto msix_single_vector;
1470
1471         for (i = 0; i < msixcnt; i++) {
1472                 ioat_chan = ioat_lookup_chan_by_index(device, i);
1473                 err = request_irq(device->msix_entries[i].vector,
1474                                   ioat_dma_do_interrupt_msix,
1475                                   0, "ioat-msix", ioat_chan);
1476                 if (err) {
1477                         for (j = 0; j < i; j++) {
1478                                 ioat_chan =
1479                                         ioat_lookup_chan_by_index(device, j);
1480                                 free_irq(device->msix_entries[j].vector,
1481                                          ioat_chan);
1482                         }
1483                         goto msix_single_vector;
1484                 }
1485         }
1486         intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1487         device->irq_mode = msix_multi_vector;
1488         goto done;
1489
1490 msix_single_vector:
1491         device->msix_entries[0].entry = 0;
1492         err = pci_enable_msix(device->pdev, device->msix_entries, 1);
1493         if (err)
1494                 goto msi;
1495
1496         err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
1497                           0, "ioat-msix", device);
1498         if (err) {
1499                 pci_disable_msix(device->pdev);
1500                 goto msi;
1501         }
1502         device->irq_mode = msix_single_vector;
1503         goto done;
1504
1505 msi:
1506         err = pci_enable_msi(device->pdev);
1507         if (err)
1508                 goto intx;
1509
1510         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1511                           0, "ioat-msi", device);
1512         if (err) {
1513                 pci_disable_msi(device->pdev);
1514                 goto intx;
1515         }
1516         /*
1517          * CB 1.2 devices need a bit set in configuration space to enable MSI
1518          */
1519         if (device->version == IOAT_VER_1_2) {
1520                 u32 dmactrl;
1521                 pci_read_config_dword(device->pdev,
1522                                       IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1523                 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1524                 pci_write_config_dword(device->pdev,
1525                                        IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1526         }
1527         device->irq_mode = msi;
1528         goto done;
1529
1530 intx:
1531         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1532                           IRQF_SHARED, "ioat-intx", device);
1533         if (err)
1534                 goto err_no_irq;
1535         device->irq_mode = intx;
1536
1537 done:
1538         intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1539         writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1540         return 0;
1541
1542 err_no_irq:
1543         /* Disable all interrupt generation */
1544         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1545         dev_err(&device->pdev->dev, "no usable interrupts\n");
1546         device->irq_mode = none;
1547         return -1;
1548 }
1549
1550 /**
1551  * ioat_dma_remove_interrupts - remove whatever interrupts were set
1552  * @device: ioat device
1553  */
1554 static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1555 {
1556         struct ioat_dma_chan *ioat_chan;
1557         int i;
1558
1559         /* Disable all interrupt generation */
1560         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1561
1562         switch (device->irq_mode) {
1563         case msix_multi_vector:
1564                 for (i = 0; i < device->common.chancnt; i++) {
1565                         ioat_chan = ioat_lookup_chan_by_index(device, i);
1566                         free_irq(device->msix_entries[i].vector, ioat_chan);
1567                 }
1568                 pci_disable_msix(device->pdev);
1569                 break;
1570         case msix_single_vector:
1571                 free_irq(device->msix_entries[0].vector, device);
1572                 pci_disable_msix(device->pdev);
1573                 break;
1574         case msi:
1575                 free_irq(device->pdev->irq, device);
1576                 pci_disable_msi(device->pdev);
1577                 break;
1578         case intx:
1579                 free_irq(device->pdev->irq, device);
1580                 break;
1581         case none:
1582                 dev_warn(&device->pdev->dev,
1583                          "call to %s without interrupts setup\n", __func__);
1584         }
1585         device->irq_mode = none;
1586 }
1587
1588 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1589                                       void __iomem *iobase)
1590 {
1591         int err;
1592         struct ioatdma_device *device;
1593
1594         device = kzalloc(sizeof(*device), GFP_KERNEL);
1595         if (!device) {
1596                 err = -ENOMEM;
1597                 goto err_kzalloc;
1598         }
1599         device->pdev = pdev;
1600         device->reg_base = iobase;
1601         device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1602
1603         /* DMA coherent memory pool for DMA descriptor allocations */
1604         device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1605                                            sizeof(struct ioat_dma_descriptor),
1606                                            64, 0);
1607         if (!device->dma_pool) {
1608                 err = -ENOMEM;
1609                 goto err_dma_pool;
1610         }
1611
1612         device->completion_pool = pci_pool_create("completion_pool", pdev,
1613                                                   sizeof(u64), SMP_CACHE_BYTES,
1614                                                   SMP_CACHE_BYTES);
1615         if (!device->completion_pool) {
1616                 err = -ENOMEM;
1617                 goto err_completion_pool;
1618         }
1619
1620         INIT_LIST_HEAD(&device->common.channels);
1621         ioat_dma_enumerate_channels(device);
1622
1623         device->common.device_alloc_chan_resources =
1624                                                 ioat_dma_alloc_chan_resources;
1625         device->common.device_free_chan_resources =
1626                                                 ioat_dma_free_chan_resources;
1627         device->common.dev = &pdev->dev;
1628
1629         dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
1630         device->common.device_is_tx_complete = ioat_dma_is_complete;
1631         switch (device->version) {
1632         case IOAT_VER_1_2:
1633                 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1634                 device->common.device_issue_pending =
1635                                                 ioat1_dma_memcpy_issue_pending;
1636                 break;
1637         case IOAT_VER_2_0:
1638         case IOAT_VER_3_0:
1639                 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1640                 device->common.device_issue_pending =
1641                                                 ioat2_dma_memcpy_issue_pending;
1642                 break;
1643         }
1644
1645         dev_err(&device->pdev->dev,
1646                 "Intel(R) I/OAT DMA Engine found,"
1647                 " %d channels, device version 0x%02x, driver version %s\n",
1648                 device->common.chancnt, device->version, IOAT_DMA_VERSION);
1649
1650         err = ioat_dma_setup_interrupts(device);
1651         if (err)
1652                 goto err_setup_interrupts;
1653
1654         err = ioat_dma_self_test(device);
1655         if (err)
1656                 goto err_self_test;
1657
1658         ioat_set_tcp_copy_break(device);
1659
1660         dma_async_device_register(&device->common);
1661
1662         if (device->version != IOAT_VER_3_0) {
1663                 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1664                 schedule_delayed_work(&device->work,
1665                                       WATCHDOG_DELAY);
1666         }
1667
1668         return device;
1669
1670 err_self_test:
1671         ioat_dma_remove_interrupts(device);
1672 err_setup_interrupts:
1673         pci_pool_destroy(device->completion_pool);
1674 err_completion_pool:
1675         pci_pool_destroy(device->dma_pool);
1676 err_dma_pool:
1677         kfree(device);
1678 err_kzalloc:
1679         dev_err(&pdev->dev,
1680                 "Intel(R) I/OAT DMA Engine initialization failed\n");
1681         return NULL;
1682 }
1683
1684 void ioat_dma_remove(struct ioatdma_device *device)
1685 {
1686         struct dma_chan *chan, *_chan;
1687         struct ioat_dma_chan *ioat_chan;
1688
1689         ioat_dma_remove_interrupts(device);
1690
1691         dma_async_device_unregister(&device->common);
1692
1693         pci_pool_destroy(device->dma_pool);
1694         pci_pool_destroy(device->completion_pool);
1695
1696         iounmap(device->reg_base);
1697         pci_release_regions(device->pdev);
1698         pci_disable_device(device->pdev);
1699
1700         if (device->version != IOAT_VER_3_0) {
1701                 cancel_delayed_work(&device->work);
1702         }
1703
1704         list_for_each_entry_safe(chan, _chan,
1705                                  &device->common.channels, device_node) {
1706                 ioat_chan = to_ioat_chan(chan);
1707                 list_del(&chan->device_node);
1708                 kfree(ioat_chan);
1709         }
1710         kfree(device);
1711 }
1712