]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/dma/ioat_dma.c
dmaengine: Add dma_client parameter to device_alloc_chan_resources
[linux-2.6-omap-h63xx.git] / drivers / dma / ioat_dma.c
1 /*
2  * Intel I/OAT DMA Linux driver
3  * Copyright(c) 2004 - 2007 Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  */
22
23 /*
24  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
25  * copy operations.
26  */
27
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include "ioatdma.h"
36 #include "ioatdma_registers.h"
37 #include "ioatdma_hw.h"
38
39 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
40 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
41 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
42 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
43
44 static int ioat_pending_level = 4;
45 module_param(ioat_pending_level, int, 0644);
46 MODULE_PARM_DESC(ioat_pending_level,
47                  "high-water mark for pushing ioat descriptors (default: 4)");
48
49 /* internal functions */
50 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
51 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
52
53 static struct ioat_desc_sw *
54 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
55 static struct ioat_desc_sw *
56 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
57
58 static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
59                                                 struct ioatdma_device *device,
60                                                 int index)
61 {
62         return device->idx[index];
63 }
64
65 /**
66  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
67  * @irq: interrupt id
68  * @data: interrupt data
69  */
70 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
71 {
72         struct ioatdma_device *instance = data;
73         struct ioat_dma_chan *ioat_chan;
74         unsigned long attnstatus;
75         int bit;
76         u8 intrctrl;
77
78         intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
79
80         if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
81                 return IRQ_NONE;
82
83         if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
84                 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
85                 return IRQ_NONE;
86         }
87
88         attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
89         for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
90                 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
91                 tasklet_schedule(&ioat_chan->cleanup_task);
92         }
93
94         writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
95         return IRQ_HANDLED;
96 }
97
98 /**
99  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
100  * @irq: interrupt id
101  * @data: interrupt data
102  */
103 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
104 {
105         struct ioat_dma_chan *ioat_chan = data;
106
107         tasklet_schedule(&ioat_chan->cleanup_task);
108
109         return IRQ_HANDLED;
110 }
111
112 static void ioat_dma_cleanup_tasklet(unsigned long data);
113
114 /**
115  * ioat_dma_enumerate_channels - find and initialize the device's channels
116  * @device: the device to be enumerated
117  */
118 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
119 {
120         u8 xfercap_scale;
121         u32 xfercap;
122         int i;
123         struct ioat_dma_chan *ioat_chan;
124
125         device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
126         xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
127         xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
128
129         for (i = 0; i < device->common.chancnt; i++) {
130                 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
131                 if (!ioat_chan) {
132                         device->common.chancnt = i;
133                         break;
134                 }
135
136                 ioat_chan->device = device;
137                 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
138                 ioat_chan->xfercap = xfercap;
139                 ioat_chan->desccount = 0;
140                 if (ioat_chan->device->version != IOAT_VER_1_2) {
141                         writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
142                                         | IOAT_DMA_DCA_ANY_CPU,
143                                 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
144                 }
145                 spin_lock_init(&ioat_chan->cleanup_lock);
146                 spin_lock_init(&ioat_chan->desc_lock);
147                 INIT_LIST_HEAD(&ioat_chan->free_desc);
148                 INIT_LIST_HEAD(&ioat_chan->used_desc);
149                 /* This should be made common somewhere in dmaengine.c */
150                 ioat_chan->common.device = &device->common;
151                 list_add_tail(&ioat_chan->common.device_node,
152                               &device->common.channels);
153                 device->idx[i] = ioat_chan;
154                 tasklet_init(&ioat_chan->cleanup_task,
155                              ioat_dma_cleanup_tasklet,
156                              (unsigned long) ioat_chan);
157                 tasklet_disable(&ioat_chan->cleanup_task);
158         }
159         return device->common.chancnt;
160 }
161
162 /**
163  * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
164  *                                 descriptors to hw
165  * @chan: DMA channel handle
166  */
167 static inline void __ioat1_dma_memcpy_issue_pending(
168                                                 struct ioat_dma_chan *ioat_chan)
169 {
170         ioat_chan->pending = 0;
171         writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
172 }
173
174 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
175 {
176         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
177
178         if (ioat_chan->pending != 0) {
179                 spin_lock_bh(&ioat_chan->desc_lock);
180                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
181                 spin_unlock_bh(&ioat_chan->desc_lock);
182         }
183 }
184
185 static inline void __ioat2_dma_memcpy_issue_pending(
186                                                 struct ioat_dma_chan *ioat_chan)
187 {
188         ioat_chan->pending = 0;
189         writew(ioat_chan->dmacount,
190                ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
191 }
192
193 static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
194 {
195         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
196
197         if (ioat_chan->pending != 0) {
198                 spin_lock_bh(&ioat_chan->desc_lock);
199                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
200                 spin_unlock_bh(&ioat_chan->desc_lock);
201         }
202 }
203
204 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
205 {
206         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
207         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
208         struct ioat_desc_sw *prev, *new;
209         struct ioat_dma_descriptor *hw;
210         dma_cookie_t cookie;
211         LIST_HEAD(new_chain);
212         u32 copy;
213         size_t len;
214         dma_addr_t src, dst;
215         unsigned long orig_flags;
216         unsigned int desc_count = 0;
217
218         /* src and dest and len are stored in the initial descriptor */
219         len = first->len;
220         src = first->src;
221         dst = first->dst;
222         orig_flags = first->async_tx.flags;
223         new = first;
224
225         spin_lock_bh(&ioat_chan->desc_lock);
226         prev = to_ioat_desc(ioat_chan->used_desc.prev);
227         prefetch(prev->hw);
228         do {
229                 copy = min_t(size_t, len, ioat_chan->xfercap);
230
231                 async_tx_ack(&new->async_tx);
232
233                 hw = new->hw;
234                 hw->size = copy;
235                 hw->ctl = 0;
236                 hw->src_addr = src;
237                 hw->dst_addr = dst;
238                 hw->next = 0;
239
240                 /* chain together the physical address list for the HW */
241                 wmb();
242                 prev->hw->next = (u64) new->async_tx.phys;
243
244                 len -= copy;
245                 dst += copy;
246                 src += copy;
247
248                 list_add_tail(&new->node, &new_chain);
249                 desc_count++;
250                 prev = new;
251         } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
252
253         hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
254         if (new->async_tx.callback) {
255                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
256                 if (first != new) {
257                         /* move callback into to last desc */
258                         new->async_tx.callback = first->async_tx.callback;
259                         new->async_tx.callback_param
260                                         = first->async_tx.callback_param;
261                         first->async_tx.callback = NULL;
262                         first->async_tx.callback_param = NULL;
263                 }
264         }
265
266         new->tx_cnt = desc_count;
267         new->async_tx.flags = orig_flags; /* client is in control of this ack */
268
269         /* store the original values for use in later cleanup */
270         if (new != first) {
271                 new->src = first->src;
272                 new->dst = first->dst;
273                 new->len = first->len;
274         }
275
276         /* cookie incr and addition to used_list must be atomic */
277         cookie = ioat_chan->common.cookie;
278         cookie++;
279         if (cookie < 0)
280                 cookie = 1;
281         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
282
283         /* write address into NextDescriptor field of last desc in chain */
284         to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
285                                                         first->async_tx.phys;
286         __list_splice(&new_chain, ioat_chan->used_desc.prev);
287
288         ioat_chan->dmacount += desc_count;
289         ioat_chan->pending += desc_count;
290         if (ioat_chan->pending >= ioat_pending_level)
291                 __ioat1_dma_memcpy_issue_pending(ioat_chan);
292         spin_unlock_bh(&ioat_chan->desc_lock);
293
294         return cookie;
295 }
296
297 static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
298 {
299         struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
300         struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
301         struct ioat_desc_sw *new;
302         struct ioat_dma_descriptor *hw;
303         dma_cookie_t cookie;
304         u32 copy;
305         size_t len;
306         dma_addr_t src, dst;
307         unsigned long orig_flags;
308         unsigned int desc_count = 0;
309
310         /* src and dest and len are stored in the initial descriptor */
311         len = first->len;
312         src = first->src;
313         dst = first->dst;
314         orig_flags = first->async_tx.flags;
315         new = first;
316
317         /*
318          * ioat_chan->desc_lock is still in force in version 2 path
319          * it gets unlocked at end of this function
320          */
321         do {
322                 copy = min_t(size_t, len, ioat_chan->xfercap);
323
324                 async_tx_ack(&new->async_tx);
325
326                 hw = new->hw;
327                 hw->size = copy;
328                 hw->ctl = 0;
329                 hw->src_addr = src;
330                 hw->dst_addr = dst;
331
332                 len -= copy;
333                 dst += copy;
334                 src += copy;
335                 desc_count++;
336         } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
337
338         hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
339         if (new->async_tx.callback) {
340                 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
341                 if (first != new) {
342                         /* move callback into to last desc */
343                         new->async_tx.callback = first->async_tx.callback;
344                         new->async_tx.callback_param
345                                         = first->async_tx.callback_param;
346                         first->async_tx.callback = NULL;
347                         first->async_tx.callback_param = NULL;
348                 }
349         }
350
351         new->tx_cnt = desc_count;
352         new->async_tx.flags = orig_flags; /* client is in control of this ack */
353
354         /* store the original values for use in later cleanup */
355         if (new != first) {
356                 new->src = first->src;
357                 new->dst = first->dst;
358                 new->len = first->len;
359         }
360
361         /* cookie incr and addition to used_list must be atomic */
362         cookie = ioat_chan->common.cookie;
363         cookie++;
364         if (cookie < 0)
365                 cookie = 1;
366         ioat_chan->common.cookie = new->async_tx.cookie = cookie;
367
368         ioat_chan->dmacount += desc_count;
369         ioat_chan->pending += desc_count;
370         if (ioat_chan->pending >= ioat_pending_level)
371                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
372         spin_unlock_bh(&ioat_chan->desc_lock);
373
374         return cookie;
375 }
376
377 /**
378  * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
379  * @ioat_chan: the channel supplying the memory pool for the descriptors
380  * @flags: allocation flags
381  */
382 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
383                                         struct ioat_dma_chan *ioat_chan,
384                                         gfp_t flags)
385 {
386         struct ioat_dma_descriptor *desc;
387         struct ioat_desc_sw *desc_sw;
388         struct ioatdma_device *ioatdma_device;
389         dma_addr_t phys;
390
391         ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
392         desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
393         if (unlikely(!desc))
394                 return NULL;
395
396         desc_sw = kzalloc(sizeof(*desc_sw), flags);
397         if (unlikely(!desc_sw)) {
398                 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
399                 return NULL;
400         }
401
402         memset(desc, 0, sizeof(*desc));
403         dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
404         switch (ioat_chan->device->version) {
405         case IOAT_VER_1_2:
406                 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
407                 break;
408         case IOAT_VER_2_0:
409                 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
410                 break;
411         }
412         INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
413
414         desc_sw->hw = desc;
415         desc_sw->async_tx.phys = phys;
416
417         return desc_sw;
418 }
419
420 static int ioat_initial_desc_count = 256;
421 module_param(ioat_initial_desc_count, int, 0644);
422 MODULE_PARM_DESC(ioat_initial_desc_count,
423                  "initial descriptors per channel (default: 256)");
424
425 /**
426  * ioat2_dma_massage_chan_desc - link the descriptors into a circle
427  * @ioat_chan: the channel to be massaged
428  */
429 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
430 {
431         struct ioat_desc_sw *desc, *_desc;
432
433         /* setup used_desc */
434         ioat_chan->used_desc.next = ioat_chan->free_desc.next;
435         ioat_chan->used_desc.prev = NULL;
436
437         /* pull free_desc out of the circle so that every node is a hw
438          * descriptor, but leave it pointing to the list
439          */
440         ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
441         ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
442
443         /* circle link the hw descriptors */
444         desc = to_ioat_desc(ioat_chan->free_desc.next);
445         desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
446         list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
447                 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
448         }
449 }
450
451 /**
452  * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
453  * @chan: the channel to be filled out
454  */
455 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
456                                          struct dma_client *client)
457 {
458         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
459         struct ioat_desc_sw *desc;
460         u16 chanctrl;
461         u32 chanerr;
462         int i;
463         LIST_HEAD(tmp_list);
464
465         /* have we already been set up? */
466         if (!list_empty(&ioat_chan->free_desc))
467                 return ioat_chan->desccount;
468
469         /* Setup register to interrupt and write completion status on error */
470         chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
471                 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
472                 IOAT_CHANCTRL_ERR_COMPLETION_EN;
473         writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
474
475         chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
476         if (chanerr) {
477                 dev_err(&ioat_chan->device->pdev->dev,
478                         "CHANERR = %x, clearing\n", chanerr);
479                 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
480         }
481
482         /* Allocate descriptors */
483         for (i = 0; i < ioat_initial_desc_count; i++) {
484                 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
485                 if (!desc) {
486                         dev_err(&ioat_chan->device->pdev->dev,
487                                 "Only %d initial descriptors\n", i);
488                         break;
489                 }
490                 list_add_tail(&desc->node, &tmp_list);
491         }
492         spin_lock_bh(&ioat_chan->desc_lock);
493         ioat_chan->desccount = i;
494         list_splice(&tmp_list, &ioat_chan->free_desc);
495         if (ioat_chan->device->version != IOAT_VER_1_2)
496                 ioat2_dma_massage_chan_desc(ioat_chan);
497         spin_unlock_bh(&ioat_chan->desc_lock);
498
499         /* allocate a completion writeback area */
500         /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
501         ioat_chan->completion_virt =
502                 pci_pool_alloc(ioat_chan->device->completion_pool,
503                                GFP_KERNEL,
504                                &ioat_chan->completion_addr);
505         memset(ioat_chan->completion_virt, 0,
506                sizeof(*ioat_chan->completion_virt));
507         writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
508                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
509         writel(((u64) ioat_chan->completion_addr) >> 32,
510                ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
511
512         tasklet_enable(&ioat_chan->cleanup_task);
513         ioat_dma_start_null_desc(ioat_chan);  /* give chain to dma device */
514         return ioat_chan->desccount;
515 }
516
517 /**
518  * ioat_dma_free_chan_resources - release all the descriptors
519  * @chan: the channel to be cleaned
520  */
521 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
522 {
523         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
524         struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
525         struct ioat_desc_sw *desc, *_desc;
526         int in_use_descs = 0;
527
528         tasklet_disable(&ioat_chan->cleanup_task);
529         ioat_dma_memcpy_cleanup(ioat_chan);
530
531         /* Delay 100ms after reset to allow internal DMA logic to quiesce
532          * before removing DMA descriptor resources.
533          */
534         writeb(IOAT_CHANCMD_RESET,
535                ioat_chan->reg_base
536                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
537         mdelay(100);
538
539         spin_lock_bh(&ioat_chan->desc_lock);
540         switch (ioat_chan->device->version) {
541         case IOAT_VER_1_2:
542                 list_for_each_entry_safe(desc, _desc,
543                                          &ioat_chan->used_desc, node) {
544                         in_use_descs++;
545                         list_del(&desc->node);
546                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
547                                       desc->async_tx.phys);
548                         kfree(desc);
549                 }
550                 list_for_each_entry_safe(desc, _desc,
551                                          &ioat_chan->free_desc, node) {
552                         list_del(&desc->node);
553                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
554                                       desc->async_tx.phys);
555                         kfree(desc);
556                 }
557                 break;
558         case IOAT_VER_2_0:
559                 list_for_each_entry_safe(desc, _desc,
560                                          ioat_chan->free_desc.next, node) {
561                         list_del(&desc->node);
562                         pci_pool_free(ioatdma_device->dma_pool, desc->hw,
563                                       desc->async_tx.phys);
564                         kfree(desc);
565                 }
566                 desc = to_ioat_desc(ioat_chan->free_desc.next);
567                 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
568                               desc->async_tx.phys);
569                 kfree(desc);
570                 INIT_LIST_HEAD(&ioat_chan->free_desc);
571                 INIT_LIST_HEAD(&ioat_chan->used_desc);
572                 break;
573         }
574         spin_unlock_bh(&ioat_chan->desc_lock);
575
576         pci_pool_free(ioatdma_device->completion_pool,
577                       ioat_chan->completion_virt,
578                       ioat_chan->completion_addr);
579
580         /* one is ok since we left it on there on purpose */
581         if (in_use_descs > 1)
582                 dev_err(&ioat_chan->device->pdev->dev,
583                         "Freeing %d in use descriptors!\n",
584                         in_use_descs - 1);
585
586         ioat_chan->last_completion = ioat_chan->completion_addr = 0;
587         ioat_chan->pending = 0;
588         ioat_chan->dmacount = 0;
589 }
590
591 /**
592  * ioat_dma_get_next_descriptor - return the next available descriptor
593  * @ioat_chan: IOAT DMA channel handle
594  *
595  * Gets the next descriptor from the chain, and must be called with the
596  * channel's desc_lock held.  Allocates more descriptors if the channel
597  * has run out.
598  */
599 static struct ioat_desc_sw *
600 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
601 {
602         struct ioat_desc_sw *new;
603
604         if (!list_empty(&ioat_chan->free_desc)) {
605                 new = to_ioat_desc(ioat_chan->free_desc.next);
606                 list_del(&new->node);
607         } else {
608                 /* try to get another desc */
609                 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
610                 if (!new) {
611                         dev_err(&ioat_chan->device->pdev->dev,
612                                 "alloc failed\n");
613                         return NULL;
614                 }
615         }
616
617         prefetch(new->hw);
618         return new;
619 }
620
621 static struct ioat_desc_sw *
622 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
623 {
624         struct ioat_desc_sw *new;
625
626         /*
627          * used.prev points to where to start processing
628          * used.next points to next free descriptor
629          * if used.prev == NULL, there are none waiting to be processed
630          * if used.next == used.prev.prev, there is only one free descriptor,
631          *      and we need to use it to as a noop descriptor before
632          *      linking in a new set of descriptors, since the device
633          *      has probably already read the pointer to it
634          */
635         if (ioat_chan->used_desc.prev &&
636             ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
637
638                 struct ioat_desc_sw *desc;
639                 struct ioat_desc_sw *noop_desc;
640                 int i;
641
642                 /* set up the noop descriptor */
643                 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
644                 noop_desc->hw->size = 0;
645                 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
646                 noop_desc->hw->src_addr = 0;
647                 noop_desc->hw->dst_addr = 0;
648
649                 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
650                 ioat_chan->pending++;
651                 ioat_chan->dmacount++;
652
653                 /* try to get a few more descriptors */
654                 for (i = 16; i; i--) {
655                         desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
656                         if (!desc) {
657                                 dev_err(&ioat_chan->device->pdev->dev,
658                                         "alloc failed\n");
659                                 break;
660                         }
661                         list_add_tail(&desc->node, ioat_chan->used_desc.next);
662
663                         desc->hw->next
664                                 = to_ioat_desc(desc->node.next)->async_tx.phys;
665                         to_ioat_desc(desc->node.prev)->hw->next
666                                 = desc->async_tx.phys;
667                         ioat_chan->desccount++;
668                 }
669
670                 ioat_chan->used_desc.next = noop_desc->node.next;
671         }
672         new = to_ioat_desc(ioat_chan->used_desc.next);
673         prefetch(new);
674         ioat_chan->used_desc.next = new->node.next;
675
676         if (ioat_chan->used_desc.prev == NULL)
677                 ioat_chan->used_desc.prev = &new->node;
678
679         prefetch(new->hw);
680         return new;
681 }
682
683 static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
684                                                 struct ioat_dma_chan *ioat_chan)
685 {
686         if (!ioat_chan)
687                 return NULL;
688
689         switch (ioat_chan->device->version) {
690         case IOAT_VER_1_2:
691                 return ioat1_dma_get_next_descriptor(ioat_chan);
692                 break;
693         case IOAT_VER_2_0:
694                 return ioat2_dma_get_next_descriptor(ioat_chan);
695                 break;
696         }
697         return NULL;
698 }
699
700 static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
701                                                 struct dma_chan *chan,
702                                                 dma_addr_t dma_dest,
703                                                 dma_addr_t dma_src,
704                                                 size_t len,
705                                                 unsigned long flags)
706 {
707         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
708         struct ioat_desc_sw *new;
709
710         spin_lock_bh(&ioat_chan->desc_lock);
711         new = ioat_dma_get_next_descriptor(ioat_chan);
712         spin_unlock_bh(&ioat_chan->desc_lock);
713
714         if (new) {
715                 new->len = len;
716                 new->dst = dma_dest;
717                 new->src = dma_src;
718                 new->async_tx.flags = flags;
719                 return &new->async_tx;
720         } else
721                 return NULL;
722 }
723
724 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
725                                                 struct dma_chan *chan,
726                                                 dma_addr_t dma_dest,
727                                                 dma_addr_t dma_src,
728                                                 size_t len,
729                                                 unsigned long flags)
730 {
731         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
732         struct ioat_desc_sw *new;
733
734         spin_lock_bh(&ioat_chan->desc_lock);
735         new = ioat2_dma_get_next_descriptor(ioat_chan);
736
737         /*
738          * leave ioat_chan->desc_lock set in ioat 2 path
739          * it will get unlocked at end of tx_submit
740          */
741
742         if (new) {
743                 new->len = len;
744                 new->dst = dma_dest;
745                 new->src = dma_src;
746                 new->async_tx.flags = flags;
747                 return &new->async_tx;
748         } else
749                 return NULL;
750 }
751
752 static void ioat_dma_cleanup_tasklet(unsigned long data)
753 {
754         struct ioat_dma_chan *chan = (void *)data;
755         ioat_dma_memcpy_cleanup(chan);
756         writew(IOAT_CHANCTRL_INT_DISABLE,
757                chan->reg_base + IOAT_CHANCTRL_OFFSET);
758 }
759
760 /**
761  * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
762  * @chan: ioat channel to be cleaned up
763  */
764 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
765 {
766         unsigned long phys_complete;
767         struct ioat_desc_sw *desc, *_desc;
768         dma_cookie_t cookie = 0;
769         unsigned long desc_phys;
770         struct ioat_desc_sw *latest_desc;
771
772         prefetch(ioat_chan->completion_virt);
773
774         if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
775                 return;
776
777         /* The completion writeback can happen at any time,
778            so reads by the driver need to be atomic operations
779            The descriptor physical addresses are limited to 32-bits
780            when the CPU can only do a 32-bit mov */
781
782 #if (BITS_PER_LONG == 64)
783         phys_complete =
784                 ioat_chan->completion_virt->full
785                 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
786 #else
787         phys_complete =
788                 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
789 #endif
790
791         if ((ioat_chan->completion_virt->full
792                 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
793                                 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
794                 dev_err(&ioat_chan->device->pdev->dev,
795                         "Channel halted, chanerr = %x\n",
796                         readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
797
798                 /* TODO do something to salvage the situation */
799         }
800
801         if (phys_complete == ioat_chan->last_completion) {
802                 spin_unlock_bh(&ioat_chan->cleanup_lock);
803                 return;
804         }
805
806         cookie = 0;
807         spin_lock_bh(&ioat_chan->desc_lock);
808         switch (ioat_chan->device->version) {
809         case IOAT_VER_1_2:
810                 list_for_each_entry_safe(desc, _desc,
811                                          &ioat_chan->used_desc, node) {
812
813                         /*
814                          * Incoming DMA requests may use multiple descriptors,
815                          * due to exceeding xfercap, perhaps. If so, only the
816                          * last one will have a cookie, and require unmapping.
817                          */
818                         if (desc->async_tx.cookie) {
819                                 cookie = desc->async_tx.cookie;
820
821                                 /*
822                                  * yes we are unmapping both _page and _single
823                                  * alloc'd regions with unmap_page. Is this
824                                  * *really* that bad?
825                                  */
826                                 pci_unmap_page(ioat_chan->device->pdev,
827                                                 pci_unmap_addr(desc, dst),
828                                                 pci_unmap_len(desc, len),
829                                                 PCI_DMA_FROMDEVICE);
830                                 pci_unmap_page(ioat_chan->device->pdev,
831                                                 pci_unmap_addr(desc, src),
832                                                 pci_unmap_len(desc, len),
833                                                 PCI_DMA_TODEVICE);
834
835                                 if (desc->async_tx.callback) {
836                                         desc->async_tx.callback(desc->async_tx.callback_param);
837                                         desc->async_tx.callback = NULL;
838                                 }
839                         }
840
841                         if (desc->async_tx.phys != phys_complete) {
842                                 /*
843                                  * a completed entry, but not the last, so clean
844                                  * up if the client is done with the descriptor
845                                  */
846                                 if (async_tx_test_ack(&desc->async_tx)) {
847                                         list_del(&desc->node);
848                                         list_add_tail(&desc->node,
849                                                       &ioat_chan->free_desc);
850                                 } else
851                                         desc->async_tx.cookie = 0;
852                         } else {
853                                 /*
854                                  * last used desc. Do not remove, so we can
855                                  * append from it, but don't look at it next
856                                  * time, either
857                                  */
858                                 desc->async_tx.cookie = 0;
859
860                                 /* TODO check status bits? */
861                                 break;
862                         }
863                 }
864                 break;
865         case IOAT_VER_2_0:
866                 /* has some other thread has already cleaned up? */
867                 if (ioat_chan->used_desc.prev == NULL)
868                         break;
869
870                 /* work backwards to find latest finished desc */
871                 desc = to_ioat_desc(ioat_chan->used_desc.next);
872                 latest_desc = NULL;
873                 do {
874                         desc = to_ioat_desc(desc->node.prev);
875                         desc_phys = (unsigned long)desc->async_tx.phys
876                                        & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
877                         if (desc_phys == phys_complete) {
878                                 latest_desc = desc;
879                                 break;
880                         }
881                 } while (&desc->node != ioat_chan->used_desc.prev);
882
883                 if (latest_desc != NULL) {
884
885                         /* work forwards to clear finished descriptors */
886                         for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
887                              &desc->node != latest_desc->node.next &&
888                              &desc->node != ioat_chan->used_desc.next;
889                              desc = to_ioat_desc(desc->node.next)) {
890                                 if (desc->async_tx.cookie) {
891                                         cookie = desc->async_tx.cookie;
892                                         desc->async_tx.cookie = 0;
893
894                                         pci_unmap_page(ioat_chan->device->pdev,
895                                                       pci_unmap_addr(desc, dst),
896                                                       pci_unmap_len(desc, len),
897                                                       PCI_DMA_FROMDEVICE);
898                                         pci_unmap_page(ioat_chan->device->pdev,
899                                                       pci_unmap_addr(desc, src),
900                                                       pci_unmap_len(desc, len),
901                                                       PCI_DMA_TODEVICE);
902
903                                         if (desc->async_tx.callback) {
904                                                 desc->async_tx.callback(desc->async_tx.callback_param);
905                                                 desc->async_tx.callback = NULL;
906                                         }
907                                 }
908                         }
909
910                         /* move used.prev up beyond those that are finished */
911                         if (&desc->node == ioat_chan->used_desc.next)
912                                 ioat_chan->used_desc.prev = NULL;
913                         else
914                                 ioat_chan->used_desc.prev = &desc->node;
915                 }
916                 break;
917         }
918
919         spin_unlock_bh(&ioat_chan->desc_lock);
920
921         ioat_chan->last_completion = phys_complete;
922         if (cookie != 0)
923                 ioat_chan->completed_cookie = cookie;
924
925         spin_unlock_bh(&ioat_chan->cleanup_lock);
926 }
927
928 /**
929  * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
930  * @chan: IOAT DMA channel handle
931  * @cookie: DMA transaction identifier
932  * @done: if not %NULL, updated with last completed transaction
933  * @used: if not %NULL, updated with last used transaction
934  */
935 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
936                                             dma_cookie_t cookie,
937                                             dma_cookie_t *done,
938                                             dma_cookie_t *used)
939 {
940         struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
941         dma_cookie_t last_used;
942         dma_cookie_t last_complete;
943         enum dma_status ret;
944
945         last_used = chan->cookie;
946         last_complete = ioat_chan->completed_cookie;
947
948         if (done)
949                 *done = last_complete;
950         if (used)
951                 *used = last_used;
952
953         ret = dma_async_is_complete(cookie, last_complete, last_used);
954         if (ret == DMA_SUCCESS)
955                 return ret;
956
957         ioat_dma_memcpy_cleanup(ioat_chan);
958
959         last_used = chan->cookie;
960         last_complete = ioat_chan->completed_cookie;
961
962         if (done)
963                 *done = last_complete;
964         if (used)
965                 *used = last_used;
966
967         return dma_async_is_complete(cookie, last_complete, last_used);
968 }
969
970 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
971 {
972         struct ioat_desc_sw *desc;
973
974         spin_lock_bh(&ioat_chan->desc_lock);
975
976         desc = ioat_dma_get_next_descriptor(ioat_chan);
977         desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
978                                 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
979                                 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
980         desc->hw->size = 0;
981         desc->hw->src_addr = 0;
982         desc->hw->dst_addr = 0;
983         async_tx_ack(&desc->async_tx);
984         switch (ioat_chan->device->version) {
985         case IOAT_VER_1_2:
986                 desc->hw->next = 0;
987                 list_add_tail(&desc->node, &ioat_chan->used_desc);
988
989                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
990                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
991                 writel(((u64) desc->async_tx.phys) >> 32,
992                        ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
993
994                 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
995                         + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
996                 break;
997         case IOAT_VER_2_0:
998                 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
999                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1000                 writel(((u64) desc->async_tx.phys) >> 32,
1001                        ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1002
1003                 ioat_chan->dmacount++;
1004                 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1005                 break;
1006         }
1007         spin_unlock_bh(&ioat_chan->desc_lock);
1008 }
1009
1010 /*
1011  * Perform a IOAT transaction to verify the HW works.
1012  */
1013 #define IOAT_TEST_SIZE 2000
1014
1015 static void ioat_dma_test_callback(void *dma_async_param)
1016 {
1017         printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
1018                 dma_async_param);
1019 }
1020
1021 /**
1022  * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1023  * @device: device to be tested
1024  */
1025 static int ioat_dma_self_test(struct ioatdma_device *device)
1026 {
1027         int i;
1028         u8 *src;
1029         u8 *dest;
1030         struct dma_chan *dma_chan;
1031         struct dma_async_tx_descriptor *tx;
1032         dma_addr_t dma_dest, dma_src;
1033         dma_cookie_t cookie;
1034         int err = 0;
1035
1036         src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1037         if (!src)
1038                 return -ENOMEM;
1039         dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1040         if (!dest) {
1041                 kfree(src);
1042                 return -ENOMEM;
1043         }
1044
1045         /* Fill in src buffer */
1046         for (i = 0; i < IOAT_TEST_SIZE; i++)
1047                 src[i] = (u8)i;
1048
1049         /* Start copy, using first DMA channel */
1050         dma_chan = container_of(device->common.channels.next,
1051                                 struct dma_chan,
1052                                 device_node);
1053         if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
1054                 dev_err(&device->pdev->dev,
1055                         "selftest cannot allocate chan resource\n");
1056                 err = -ENODEV;
1057                 goto out;
1058         }
1059
1060         dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1061                                  DMA_TO_DEVICE);
1062         dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1063                                   DMA_FROM_DEVICE);
1064         tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1065                                                    IOAT_TEST_SIZE, 0);
1066         if (!tx) {
1067                 dev_err(&device->pdev->dev,
1068                         "Self-test prep failed, disabling\n");
1069                 err = -ENODEV;
1070                 goto free_resources;
1071         }
1072
1073         async_tx_ack(tx);
1074         tx->callback = ioat_dma_test_callback;
1075         tx->callback_param = (void *)0x8086;
1076         cookie = tx->tx_submit(tx);
1077         if (cookie < 0) {
1078                 dev_err(&device->pdev->dev,
1079                         "Self-test setup failed, disabling\n");
1080                 err = -ENODEV;
1081                 goto free_resources;
1082         }
1083         device->common.device_issue_pending(dma_chan);
1084         msleep(1);
1085
1086         if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1087                                         != DMA_SUCCESS) {
1088                 dev_err(&device->pdev->dev,
1089                         "Self-test copy timed out, disabling\n");
1090                 err = -ENODEV;
1091                 goto free_resources;
1092         }
1093         if (memcmp(src, dest, IOAT_TEST_SIZE)) {
1094                 dev_err(&device->pdev->dev,
1095                         "Self-test copy failed compare, disabling\n");
1096                 err = -ENODEV;
1097                 goto free_resources;
1098         }
1099
1100 free_resources:
1101         device->common.device_free_chan_resources(dma_chan);
1102 out:
1103         kfree(src);
1104         kfree(dest);
1105         return err;
1106 }
1107
1108 static char ioat_interrupt_style[32] = "msix";
1109 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1110                     sizeof(ioat_interrupt_style), 0644);
1111 MODULE_PARM_DESC(ioat_interrupt_style,
1112                  "set ioat interrupt style: msix (default), "
1113                  "msix-single-vector, msi, intx)");
1114
1115 /**
1116  * ioat_dma_setup_interrupts - setup interrupt handler
1117  * @device: ioat device
1118  */
1119 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1120 {
1121         struct ioat_dma_chan *ioat_chan;
1122         int err, i, j, msixcnt;
1123         u8 intrctrl = 0;
1124
1125         if (!strcmp(ioat_interrupt_style, "msix"))
1126                 goto msix;
1127         if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1128                 goto msix_single_vector;
1129         if (!strcmp(ioat_interrupt_style, "msi"))
1130                 goto msi;
1131         if (!strcmp(ioat_interrupt_style, "intx"))
1132                 goto intx;
1133         dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
1134                 ioat_interrupt_style);
1135         goto err_no_irq;
1136
1137 msix:
1138         /* The number of MSI-X vectors should equal the number of channels */
1139         msixcnt = device->common.chancnt;
1140         for (i = 0; i < msixcnt; i++)
1141                 device->msix_entries[i].entry = i;
1142
1143         err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
1144         if (err < 0)
1145                 goto msi;
1146         if (err > 0)
1147                 goto msix_single_vector;
1148
1149         for (i = 0; i < msixcnt; i++) {
1150                 ioat_chan = ioat_lookup_chan_by_index(device, i);
1151                 err = request_irq(device->msix_entries[i].vector,
1152                                   ioat_dma_do_interrupt_msix,
1153                                   0, "ioat-msix", ioat_chan);
1154                 if (err) {
1155                         for (j = 0; j < i; j++) {
1156                                 ioat_chan =
1157                                         ioat_lookup_chan_by_index(device, j);
1158                                 free_irq(device->msix_entries[j].vector,
1159                                          ioat_chan);
1160                         }
1161                         goto msix_single_vector;
1162                 }
1163         }
1164         intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1165         device->irq_mode = msix_multi_vector;
1166         goto done;
1167
1168 msix_single_vector:
1169         device->msix_entries[0].entry = 0;
1170         err = pci_enable_msix(device->pdev, device->msix_entries, 1);
1171         if (err)
1172                 goto msi;
1173
1174         err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
1175                           0, "ioat-msix", device);
1176         if (err) {
1177                 pci_disable_msix(device->pdev);
1178                 goto msi;
1179         }
1180         device->irq_mode = msix_single_vector;
1181         goto done;
1182
1183 msi:
1184         err = pci_enable_msi(device->pdev);
1185         if (err)
1186                 goto intx;
1187
1188         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1189                           0, "ioat-msi", device);
1190         if (err) {
1191                 pci_disable_msi(device->pdev);
1192                 goto intx;
1193         }
1194         /*
1195          * CB 1.2 devices need a bit set in configuration space to enable MSI
1196          */
1197         if (device->version == IOAT_VER_1_2) {
1198                 u32 dmactrl;
1199                 pci_read_config_dword(device->pdev,
1200                                       IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1201                 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1202                 pci_write_config_dword(device->pdev,
1203                                        IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1204         }
1205         device->irq_mode = msi;
1206         goto done;
1207
1208 intx:
1209         err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1210                           IRQF_SHARED, "ioat-intx", device);
1211         if (err)
1212                 goto err_no_irq;
1213         device->irq_mode = intx;
1214
1215 done:
1216         intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1217         writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1218         return 0;
1219
1220 err_no_irq:
1221         /* Disable all interrupt generation */
1222         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1223         dev_err(&device->pdev->dev, "no usable interrupts\n");
1224         device->irq_mode = none;
1225         return -1;
1226 }
1227
1228 /**
1229  * ioat_dma_remove_interrupts - remove whatever interrupts were set
1230  * @device: ioat device
1231  */
1232 static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1233 {
1234         struct ioat_dma_chan *ioat_chan;
1235         int i;
1236
1237         /* Disable all interrupt generation */
1238         writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1239
1240         switch (device->irq_mode) {
1241         case msix_multi_vector:
1242                 for (i = 0; i < device->common.chancnt; i++) {
1243                         ioat_chan = ioat_lookup_chan_by_index(device, i);
1244                         free_irq(device->msix_entries[i].vector, ioat_chan);
1245                 }
1246                 pci_disable_msix(device->pdev);
1247                 break;
1248         case msix_single_vector:
1249                 free_irq(device->msix_entries[0].vector, device);
1250                 pci_disable_msix(device->pdev);
1251                 break;
1252         case msi:
1253                 free_irq(device->pdev->irq, device);
1254                 pci_disable_msi(device->pdev);
1255                 break;
1256         case intx:
1257                 free_irq(device->pdev->irq, device);
1258                 break;
1259         case none:
1260                 dev_warn(&device->pdev->dev,
1261                          "call to %s without interrupts setup\n", __func__);
1262         }
1263         device->irq_mode = none;
1264 }
1265
1266 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1267                                       void __iomem *iobase)
1268 {
1269         int err;
1270         struct ioatdma_device *device;
1271
1272         device = kzalloc(sizeof(*device), GFP_KERNEL);
1273         if (!device) {
1274                 err = -ENOMEM;
1275                 goto err_kzalloc;
1276         }
1277         device->pdev = pdev;
1278         device->reg_base = iobase;
1279         device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1280
1281         /* DMA coherent memory pool for DMA descriptor allocations */
1282         device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1283                                            sizeof(struct ioat_dma_descriptor),
1284                                            64, 0);
1285         if (!device->dma_pool) {
1286                 err = -ENOMEM;
1287                 goto err_dma_pool;
1288         }
1289
1290         device->completion_pool = pci_pool_create("completion_pool", pdev,
1291                                                   sizeof(u64), SMP_CACHE_BYTES,
1292                                                   SMP_CACHE_BYTES);
1293         if (!device->completion_pool) {
1294                 err = -ENOMEM;
1295                 goto err_completion_pool;
1296         }
1297
1298         INIT_LIST_HEAD(&device->common.channels);
1299         ioat_dma_enumerate_channels(device);
1300
1301         device->common.device_alloc_chan_resources =
1302                                                 ioat_dma_alloc_chan_resources;
1303         device->common.device_free_chan_resources =
1304                                                 ioat_dma_free_chan_resources;
1305         device->common.dev = &pdev->dev;
1306
1307         dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
1308         device->common.device_is_tx_complete = ioat_dma_is_complete;
1309         switch (device->version) {
1310         case IOAT_VER_1_2:
1311                 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1312                 device->common.device_issue_pending =
1313                                                 ioat1_dma_memcpy_issue_pending;
1314                 break;
1315         case IOAT_VER_2_0:
1316                 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1317                 device->common.device_issue_pending =
1318                                                 ioat2_dma_memcpy_issue_pending;
1319                 break;
1320         }
1321
1322         dev_err(&device->pdev->dev,
1323                 "Intel(R) I/OAT DMA Engine found,"
1324                 " %d channels, device version 0x%02x, driver version %s\n",
1325                 device->common.chancnt, device->version, IOAT_DMA_VERSION);
1326
1327         err = ioat_dma_setup_interrupts(device);
1328         if (err)
1329                 goto err_setup_interrupts;
1330
1331         err = ioat_dma_self_test(device);
1332         if (err)
1333                 goto err_self_test;
1334
1335         dma_async_device_register(&device->common);
1336
1337         return device;
1338
1339 err_self_test:
1340         ioat_dma_remove_interrupts(device);
1341 err_setup_interrupts:
1342         pci_pool_destroy(device->completion_pool);
1343 err_completion_pool:
1344         pci_pool_destroy(device->dma_pool);
1345 err_dma_pool:
1346         kfree(device);
1347 err_kzalloc:
1348         dev_err(&pdev->dev,
1349                 "Intel(R) I/OAT DMA Engine initialization failed\n");
1350         return NULL;
1351 }
1352
1353 void ioat_dma_remove(struct ioatdma_device *device)
1354 {
1355         struct dma_chan *chan, *_chan;
1356         struct ioat_dma_chan *ioat_chan;
1357
1358         ioat_dma_remove_interrupts(device);
1359
1360         dma_async_device_unregister(&device->common);
1361
1362         pci_pool_destroy(device->dma_pool);
1363         pci_pool_destroy(device->completion_pool);
1364
1365         iounmap(device->reg_base);
1366         pci_release_regions(device->pdev);
1367         pci_disable_device(device->pdev);
1368
1369         list_for_each_entry_safe(chan, _chan,
1370                                  &device->common.channels, device_node) {
1371                 ioat_chan = to_ioat_chan(chan);
1372                 list_del(&chan->device_node);
1373                 kfree(ioat_chan);
1374         }
1375         kfree(device);
1376 }
1377