]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
ide: switch to DMA-mapping API part #2
authorBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Mon, 13 Oct 2008 19:39:47 +0000 (21:39 +0200)
committerBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Mon, 13 Oct 2008 19:39:47 +0000 (21:39 +0200)
Follow-up to commit 5c05ff68b9a9b40a9be949497e0aa980185565cf
("ide: switch to DMA-mapping API"):

* pci_{alloc,free}_consistent() -> dma_{alloc,free}_coherent()
  in ide_{allocate,release}_dma_engine().

* Add ->prd_max_nents and ->prd_ent_size fields to ide_hwif_t
  (+ set default values in ide_allocate_dma_engine()).

* Make ide_{allocate,release}_dma_engine() available also
  for CONFIG_BLK_DEV_IDEDMA_SFF=n.  Then convert au1xxx-ide.c,
  scc_pata.c and sgiioc4.c to use them.

* Add missing ->init_dma method to scc_pata.

This patch also fixes:
- ->dmatable_cpu leak for au1xxx-ide
- too early realease of ->dmatable_cpu for scc_pata
- wrong amount of ->dmatable_cpu memory being freed for sgiioc4

While at it:
- remove superfluous ->dma_base check from ide_unregister()
- return -ENOMEM on error in ide_release_dma_engine()
- beautify error message in ide_release_dma_engine()

Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
drivers/ide/ide-dma.c
drivers/ide/ide.c
drivers/ide/mips/au1xxx-ide.c
drivers/ide/pci/scc_pata.c
drivers/ide/pci/sgiioc4.c
include/linux/ide.h

index 244b61b573ce4700f5662f80739bf0fc5851ea05..3f949b5db353d56ef05584d8c106df3549e36bde 100644 (file)
@@ -844,36 +844,43 @@ void ide_dma_timeout(ide_drive_t *drive)
 }
 EXPORT_SYMBOL_GPL(ide_dma_timeout);
 
-#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
 void ide_release_dma_engine(ide_hwif_t *hwif)
 {
        if (hwif->dmatable_cpu) {
-               struct pci_dev *pdev = to_pci_dev(hwif->dev);
+               int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
 
-               pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES,
-                                   hwif->dmatable_cpu, hwif->dmatable_dma);
+               dma_free_coherent(hwif->dev, prd_size,
+                                 hwif->dmatable_cpu, hwif->dmatable_dma);
                hwif->dmatable_cpu = NULL;
        }
 }
+EXPORT_SYMBOL_GPL(ide_release_dma_engine);
 
 int ide_allocate_dma_engine(ide_hwif_t *hwif)
 {
-       struct pci_dev *pdev = to_pci_dev(hwif->dev);
+       int prd_size;
 
-       hwif->dmatable_cpu = pci_alloc_consistent(pdev,
-                                                 PRD_ENTRIES * PRD_BYTES,
-                                                 &hwif->dmatable_dma);
+       if (hwif->prd_max_nents == 0)
+               hwif->prd_max_nents = PRD_ENTRIES;
+       if (hwif->prd_ent_size == 0)
+               hwif->prd_ent_size = PRD_BYTES;
 
-       if (hwif->dmatable_cpu)
-               return 0;
+       prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
 
-       printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
+       hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
+                                               &hwif->dmatable_dma,
+                                               GFP_ATOMIC);
+       if (hwif->dmatable_cpu == NULL) {
+               printk(KERN_ERR "%s: unable to allocate PRD table\n",
                        hwif->name);
+               return -ENOMEM;
+       }
 
-       return 1;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
 
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
 const struct ide_dma_ops sff_dma_ops = {
        .dma_host_set           = ide_dma_host_set,
        .dma_setup              = ide_dma_setup,
index a498245dc21391916337f3518b115c07f816be37..083783e851d12f2346918ca7607e23f6218c4ac6 100644 (file)
@@ -227,8 +227,7 @@ void ide_unregister(ide_hwif_t *hwif)
        kfree(hwif->sg_table);
        unregister_blkdev(hwif->major, hwif->name);
 
-       if (hwif->dma_base)
-               ide_release_dma_engine(hwif);
+       ide_release_dma_engine(hwif);
 
        mutex_unlock(&ide_cfg_mtx);
 }
index f9e88cfec827d5828a9dcb00a8ed9386ffbe8ab7..0ec8fd1e4dcb3ae37d3fb98966bc936f534fa0d0 100644 (file)
@@ -427,10 +427,9 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
                                                             NUM_DESCRIPTORS);
        auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
                                                             NUM_DESCRIPTORS);
-       hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev,
-                                               PRD_ENTRIES * PRD_BYTES,        /* 1 Page */
-                                               &hwif->dmatable_dma, GFP_KERNEL);
+
+       /* FIXME: check return value */
+       (void)ide_allocate_dma_engine(hwif);
        
        au1xxx_dbdma_start( auide->tx_chan );
        au1xxx_dbdma_start( auide->rx_chan );
index 3e75bf5f5e37badbcc28f1b39cdb4eceed87554e..9ce1d8059921932de41938cc874412088b75b4ee 100644 (file)
@@ -821,6 +821,12 @@ static void __devinit init_iops_scc(ide_hwif_t *hwif)
        init_mmio_iops_scc(hwif);
 }
 
+static int __devinit scc_init_dma(ide_hwif_t *hwif,
+                                 const struct ide_port_info *d)
+{
+       return ide_allocate_dma_engine(hwif);
+}
+
 static u8 scc_cable_detect(ide_hwif_t *hwif)
 {
        return ATA_CBL_PATA80;
@@ -885,6 +891,7 @@ static const struct ide_dma_ops scc_dma_ops = {
   {                                                    \
       .name            = name_str,                     \
       .init_iops       = init_iops_scc,                \
+      .init_dma                = scc_init_dma,                 \
       .init_hwif       = init_hwif_scc,                \
       .tp_ops          = &scc_tp_ops,          \
       .port_ops                = &scc_port_ops,                \
@@ -922,13 +929,6 @@ static void __devexit scc_remove(struct pci_dev *dev)
 {
        struct scc_ports *ports = pci_get_drvdata(dev);
        struct ide_host *host = ports->host;
-       ide_hwif_t *hwif = host->ports[0];
-
-       if (hwif->dmatable_cpu) {
-               pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
-                                   hwif->dmatable_cpu, hwif->dmatable_dma);
-               hwif->dmatable_cpu = NULL;
-       }
 
        ide_host_remove(host);
 
index 84cd986810cf187f2917152bdcc584b1219d4101..dd634541ce361a62a4789484cdf89fe6a85584f3 100644 (file)
@@ -357,14 +357,13 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
        }
        hwif->dma_base = (unsigned long) virt_dma_base;
 
-       hwif->dmatable_cpu = pci_alloc_consistent(dev,
-                                         IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
-                                         &hwif->dmatable_dma);
+       hwif->sg_max_nents = IOC4_PRD_ENTRIES;
 
-       if (!hwif->dmatable_cpu)
-               goto dma_pci_alloc_failure;
+       hwif->prd_max_nents = IOC4_PRD_ENTRIES;
+       hwif->prd_ent_size = IOC4_PRD_BYTES;
 
-       hwif->sg_max_nents = IOC4_PRD_ENTRIES;
+       if (ide_allocate_dma_engine(hwif))
+               goto dma_pci_alloc_failure;
 
        pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE,
                                   (dma_addr_t *)&hwif->extra_base);
@@ -373,8 +372,8 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
                return 0;
        }
 
-       pci_free_consistent(dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
-                           hwif->dmatable_cpu, hwif->dmatable_dma);
+       ide_release_dma_engine(hwif);
+
        printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n",
               __func__, hwif->name);
        printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name);
index 39aaff8ff45769a029eca26a92184f210e81d57d..8121aa9240c44b0ad426b3622b84002ac021b755 100644 (file)
@@ -788,6 +788,12 @@ typedef struct hwif_s {
        unsigned int    *dmatable_cpu;
        /* dma physical region descriptor table (dma view) */
        dma_addr_t      dmatable_dma;
+
+       /* maximum number of PRD table entries */
+       int prd_max_nents;
+       /* PRD entry size in bytes */
+       int prd_ent_size;
+
        /* Scatter-gather list used to build the above */
        struct scatterlist *sg_table;
        int sg_max_nents;               /* Maximum number of entries in it */
@@ -1423,14 +1429,14 @@ int ide_set_dma(ide_drive_t *);
 void ide_check_dma_crc(ide_drive_t *);
 ide_startstop_t ide_dma_intr(ide_drive_t *);
 
+int ide_allocate_dma_engine(ide_hwif_t *);
+void ide_release_dma_engine(ide_hwif_t *);
+
 int ide_build_sglist(ide_drive_t *, struct request *);
 void ide_destroy_dmatable(ide_drive_t *);
 
 #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
 extern int ide_build_dmatable(ide_drive_t *, struct request *);
-int ide_allocate_dma_engine(ide_hwif_t *);
-void ide_release_dma_engine(ide_hwif_t *);
-
 void ide_dma_host_set(ide_drive_t *, int);
 extern int ide_dma_setup(ide_drive_t *);
 void ide_dma_exec_cmd(ide_drive_t *, u8);
@@ -1453,11 +1459,8 @@ static inline void ide_dma_on(ide_drive_t *drive) { ; }
 static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
 static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
 static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
-#endif /* CONFIG_BLK_DEV_IDEDMA */
-
-#ifndef CONFIG_BLK_DEV_IDEDMA_SFF
 static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
-#endif
+#endif /* CONFIG_BLK_DEV_IDEDMA */
 
 #ifdef CONFIG_BLK_DEV_IDEACPI
 extern int ide_acpi_exec_tfs(ide_drive_t *drive);