]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Jan 2009 19:55:14 +0000 (11:55 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Jan 2009 19:55:14 +0000 (11:55 -0800)
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (94 commits)
  ACPICA: hide private headers
  ACPICA: create acpica/ directory
  ACPI: fix build warning
  ACPI : Use RSDT instead of XSDT by adding boot option of "acpi=rsdt"
  ACPI: Avoid array address overflow when _CST MWAIT hint bits are set
  fujitsu-laptop: Simplify SBLL/SBL2 backlight handling
  fujitsu-laptop: Add BL power, LED control and radio state information
  ACPICA: delete utcache.c
  ACPICA: delete acdisasm.h
  ACPICA: Update version to 20081204.
  ACPICA: FADT: Update error msgs for consistency
  ACPICA: FADT: set acpi_gbl_use_default_register_widths to TRUE by default
  ACPICA: FADT parsing changes and fixes
  ACPICA: Add ACPI_MUTEX_TYPE configuration option
  ACPICA: Fixes for various ACPI data tables
  ACPICA: Restructure includes into public/private
  ACPI: remove private acpica headers from driver files
  ACPI: reboot.c: use new acpi_reset interface
  ACPICA: New: acpi_reset interface - write to reset register
  ACPICA: Move all public H/W interfaces to new hwxface
  ...

60 files changed:
Documentation/crypto/async-tx-api.txt
Documentation/dmaengine.txt [new file with mode: 0644]
arch/avr32/mach-at32ap/at32ap700x.c
arch/parisc/Makefile
arch/parisc/include/asm/byteorder.h
arch/parisc/include/asm/checksum.h
arch/parisc/include/asm/io.h
arch/parisc/include/asm/mmu_context.h
arch/parisc/include/asm/processor.h
arch/parisc/include/asm/uaccess.h
arch/parisc/kernel/drivers.c
arch/parisc/kernel/hpmc.S
arch/parisc/kernel/irq.c
arch/parisc/kernel/pdc_cons.c
arch/parisc/kernel/perf.c
arch/parisc/kernel/processor.c
arch/parisc/kernel/setup.c
arch/parisc/kernel/smp.c
arch/parisc/kernel/time.c
arch/parisc/kernel/topology.c
arch/parisc/kernel/traps.c
arch/parisc/kernel/unwind.c
arch/parisc/lib/iomap.c
arch/parisc/lib/memcpy.c
arch/parisc/mm/fault.c
crypto/async_tx/async_tx.c
drivers/ata/libata-core.c
drivers/ata/libata-sff.c
drivers/dca/dca-core.c
drivers/dma/Kconfig
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/dw_dmac.c
drivers/dma/fsldma.c
drivers/dma/ioat.c
drivers/dma/ioat_dma.c
drivers/dma/iop-adma.c
drivers/dma/mv_xor.c
drivers/mmc/host/atmel-mci.c
drivers/parisc/asp.c
drivers/parisc/ccio-dma.c
drivers/parisc/dino.c
drivers/parisc/hppb.c
drivers/parisc/lasi.c
drivers/parisc/lba_pci.c
drivers/parisc/sba_iommu.c
drivers/parisc/wax.c
drivers/rtc/rtc-parisc.c
include/linux/async_tx.h
include/linux/atmel-mci.h
include/linux/dmaengine.h
include/linux/dw_dmac.h
include/linux/netdevice.h
include/net/netdma.h
kernel/cred.c
net/core/dev.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv6/tcp_ipv6.c

index c1e9545c59bd2366cfdb76dfa6946a24a9b4c547..9f59fcbf5d82b9ce3236dd044f9e1d75b4600e79 100644 (file)
@@ -13,9 +13,9 @@
 3.6 Constraints
 3.7 Example
 
-4 DRIVER DEVELOPER NOTES
+4 DMAENGINE DRIVER DEVELOPER NOTES
 4.1 Conformance points
-4.2 "My application needs finer control of hardware channels"
+4.2 "My application needs exclusive control of hardware channels"
 
 5 SOURCE
 
@@ -150,6 +150,7 @@ ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more
 implementation examples.
 
 4 DRIVER DEVELOPMENT NOTES
+
 4.1 Conformance points:
 There are a few conformance points required in dmaengine drivers to
 accommodate assumptions made by applications using the async_tx API:
@@ -158,58 +159,49 @@ accommodate assumptions made by applications using the async_tx API:
 3/ Use async_tx_run_dependencies() in the descriptor clean up path to
    handle submission of dependent operations
 
-4.2 "My application needs finer control of hardware channels"
-This requirement seems to arise from cases where a DMA engine driver is
-trying to support device-to-memory DMA.  The dmaengine and async_tx
-implementations were designed for offloading memory-to-memory
-operations; however, there are some capabilities of the dmaengine layer
-that can be used for platform-specific channel management.
-Platform-specific constraints can be handled by registering the
-application as a 'dma_client' and implementing a 'dma_event_callback' to
-apply a filter to the available channels in the system.  Before showing
-how to implement a custom dma_event callback some background of
-dmaengine's client support is required.
-
-The following routines in dmaengine support multiple clients requesting
-use of a channel:
-- dma_async_client_register(struct dma_client *client)
-- dma_async_client_chan_request(struct dma_client *client)
-
-dma_async_client_register takes a pointer to an initialized dma_client
-structure.  It expects that the 'event_callback' and 'cap_mask' fields
-are already initialized.
-
-dma_async_client_chan_request triggers dmaengine to notify the client of
-all channels that satisfy the capability mask.  It is up to the client's
-event_callback routine to track how many channels the client needs and
-how many it is currently using.  The dma_event_callback routine returns a
-dma_state_client code to let dmaengine know the status of the
-allocation.
-
-Below is the example of how to extend this functionality for
-platform-specific filtering of the available channels beyond the
-standard capability mask:
-
-static enum dma_state_client
-my_dma_client_callback(struct dma_client *client,
-                       struct dma_chan *chan, enum dma_state state)
-{
-       struct dma_device *dma_dev;
-       struct my_platform_specific_dma *plat_dma_dev;
-       
-       dma_dev = chan->device;
-       plat_dma_dev = container_of(dma_dev,
-                                   struct my_platform_specific_dma,
-                                   dma_dev);
-
-       if (!plat_dma_dev->platform_specific_capability)
-               return DMA_DUP;
-
-       . . .
-}
+4.2 "My application needs exclusive control of hardware channels"
+Primarily this requirement arises from cases where a DMA engine driver
+is being used to support device-to-memory operations.  A channel that is
+performing these operations cannot, for many platform specific reasons,
+be shared.  For these cases the dma_request_channel() interface is
+provided.
+
+The interface is:
+struct dma_chan *dma_request_channel(dma_cap_mask_t mask,
+                                    dma_filter_fn filter_fn,
+                                    void *filter_param);
+
+Where dma_filter_fn is defined as:
+typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
+
+When the optional 'filter_fn' parameter is set to NULL
+dma_request_channel simply returns the first channel that satisfies the
+capability mask.  Otherwise, when the mask parameter is insufficient for
+specifying the necessary channel, the filter_fn routine can be used to
+disposition the available channels in the system. The filter_fn routine
+is called once for each free channel in the system.  Upon seeing a
+suitable channel filter_fn returns DMA_ACK which flags that channel to
+be the return value from dma_request_channel.  A channel allocated via
+this interface is exclusive to the caller, until dma_release_channel()
+is called.
+
+The DMA_PRIVATE capability flag is used to tag dma devices that should
+not be used by the general-purpose allocator.  It can be set at
+initialization time if it is known that a channel will always be
+private.  Alternatively, it is set when dma_request_channel() finds an
+unused "public" channel.
+
+A couple caveats to note when implementing a driver and consumer:
+1/ Once a channel has been privately allocated it will no longer be
+   considered by the general-purpose allocator even after a call to
+   dma_release_channel().
+2/ Since capabilities are specified at the device level a dma_device
+   with multiple channels will either have all channels public, or all
+   channels private.
 
 5 SOURCE
-include/linux/dmaengine.h: core header file for DMA drivers and clients
+
+include/linux/dmaengine.h: core header file for DMA drivers and api users
 drivers/dma/dmaengine.c: offload engine channel management routines
 drivers/dma/: location for offload engine drivers
 include/linux/async_tx.h: core header file for the async_tx api
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt
new file mode 100644 (file)
index 0000000..0c1c2f6
--- /dev/null
@@ -0,0 +1 @@
+See Documentation/crypto/async-tx-api.txt
index ea7bc1e8562be302f7d75be16bb8844ca52e3510..3fbfd1e32a9ee79af4f4545d95a9543b9070d189 100644 (file)
@@ -1305,7 +1305,7 @@ struct platform_device *__init
 at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
 {
        struct platform_device          *pdev;
-       struct dw_dma_slave             *dws;
+       struct dw_dma_slave             *dws = &data->dma_slave;
        u32                             pioa_mask;
        u32                             piob_mask;
 
@@ -1324,22 +1324,13 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
                                ARRAY_SIZE(atmel_mci0_resource)))
                goto fail;
 
-       if (data->dma_slave)
-               dws = kmemdup(to_dw_dma_slave(data->dma_slave),
-                               sizeof(struct dw_dma_slave), GFP_KERNEL);
-       else
-               dws = kzalloc(sizeof(struct dw_dma_slave), GFP_KERNEL);
-
-       dws->slave.dev = &pdev->dev;
-       dws->slave.dma_dev = &dw_dmac0_device.dev;
-       dws->slave.reg_width = DMA_SLAVE_WIDTH_32BIT;
+       dws->dma_dev = &dw_dmac0_device.dev;
+       dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
        dws->cfg_hi = (DWC_CFGH_SRC_PER(0)
                                | DWC_CFGH_DST_PER(1));
        dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL
                                | DWC_CFGL_HS_SRC_POL);
 
-       data->dma_slave = &dws->slave;
-
        if (platform_device_add_data(pdev, data,
                                sizeof(struct mci_platform_data)))
                goto fail;
index 5ddad7bd60ac8740de6712aabd6c274fc1fb435d..0d428278356dd2b526ff904b34d9f421dc35c5d1 100644 (file)
@@ -77,7 +77,7 @@ libs-y        += arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
 
 drivers-$(CONFIG_OPROFILE)             += arch/parisc/oprofile/
 
-PALO := $(shell if which palo; then : ; \
+PALO := $(shell if (which palo 2>&1); then : ; \
        elif [ -x /sbin/palo ]; then echo /sbin/palo; \
        fi)
 
index db148313de5de079badd584c88184f25ced42fd8..83095c5bb379b7e685454e09ea30b926a82c60db 100644 (file)
@@ -4,9 +4,10 @@
 #include <asm/types.h>
 #include <linux/compiler.h>
 
-#ifdef __GNUC__
+#define __BIG_ENDIAN
+#define __SWAB_64_THRU_32__
 
-static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
 {
        __asm__("dep %0, 15, 8, %0\n\t"         /* deposit 00ab -> 0bab */
                "shd %%r0, %0, 8, %0"           /* shift 000000ab -> 00ba */
@@ -14,8 +15,9 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
                : "0" (x));
        return x;
 }
+#define __arch_swab16 __arch_swab16
 
-static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x)
+static inline __attribute_const__ __u32 __arch_swab24(__u32 x)
 {
        __asm__("shd %0, %0, 8, %0\n\t"         /* shift xabcxabc -> cxab */
                "dep %0, 15, 8, %0\n\t"         /* deposit cxab -> cbab */
@@ -25,7 +27,7 @@ static __inline__ __attribute_const__ __u32 ___arch__swab24(__u32 x)
        return x;
 }
 
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 {
        unsigned int temp;
        __asm__("shd %0, %0, 16, %1\n\t"        /* shift abcdabcd -> cdab */
@@ -35,7 +37,7 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
                : "0" (x));
        return x;
 }
-
+#define __arch_swab32 __arch_swab32
 
 #if BITS_PER_LONG > 32
 /*
@@ -48,7 +50,8 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
 **      HSHR    67452301 -> *6*4*2*0 into %0
 **      OR      %0 | %1  -> 76543210 into %0 (all done!)
 */
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) {
+static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+{
        __u64 temp;
        __asm__("permh,3210 %0, %0\n\t"
                "hshl %0, 8, %1\n\t"
@@ -58,25 +61,9 @@ static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) {
                : "0" (x));
        return x;
 }
-#define __arch__swab64(x) ___arch__swab64(x)
-#define __BYTEORDER_HAS_U64__
-#elif !defined(__STRICT_ANSI__)
-static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x)
-{
-       __u32 t1 = ___arch__swab32((__u32) x);
-       __u32 t2 = ___arch__swab32((__u32) (x >> 32));
-       return (((__u64) t1 << 32) | t2);
-}
-#define __arch__swab64(x) ___arch__swab64(x)
-#define __BYTEORDER_HAS_U64__
-#endif
-
-#define __arch__swab16(x) ___arch__swab16(x)
-#define __arch__swab24(x) ___arch__swab24(x)
-#define __arch__swab32(x) ___arch__swab32(x)
-
-#endif /* __GNUC__ */
+#define __arch_swab64 __arch_swab64
+#endif /* BITS_PER_LONG > 32 */
 
-#include <linux/byteorder/big_endian.h>
+#include <linux/byteorder.h>
 
 #endif /* _PARISC_BYTEORDER_H */
index e9639ccc3fcec82fe6ede8b90764f62cd2a5415d..c84b2fcb18a95ea1c643a6aea3595f2615721925 100644 (file)
@@ -182,7 +182,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
 #endif
        : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
        : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
-       : "r19", "r20", "r21", "r22");
+       : "r19", "r20", "r21", "r22", "memory");
        return csum_fold(sum);
 }
 
index 55ddb1842107b8542735a4b1032d0c49dddf81e1..d3031d1f9d034539ca2ff9999c4dba09743c3576 100644 (file)
@@ -4,12 +4,6 @@
 #include <linux/types.h>
 #include <asm/pgtable.h>
 
-extern unsigned long parisc_vmerge_boundary;
-extern unsigned long parisc_vmerge_max_size;
-
-#define BIO_VMERGE_BOUNDARY    parisc_vmerge_boundary
-#define BIO_VMERGE_MAX_SIZE    parisc_vmerge_max_size
-
 #define virt_to_phys(a) ((unsigned long)__pa(a))
 #define phys_to_virt(a) __va(a)
 #define virt_to_bus virt_to_phys
@@ -182,9 +176,9 @@ static inline void __raw_writeq(unsigned long long b, volatile void __iomem *add
 
 /* readb can never be const, so use __fswab instead of le*_to_cpu */
 #define readb(addr) __raw_readb(addr)
-#define readw(addr) __fswab16(__raw_readw(addr))
-#define readl(addr) __fswab32(__raw_readl(addr))
-#define readq(addr) __fswab64(__raw_readq(addr))
+#define readw(addr) le16_to_cpu(__raw_readw(addr))
+#define readl(addr) le32_to_cpu(__raw_readl(addr))
+#define readq(addr) le64_to_cpu(__raw_readq(addr))
 #define writeb(b, addr) __raw_writeb(b, addr)
 #define writew(b, addr) __raw_writew(cpu_to_le16(b), addr)
 #define writel(b, addr) __raw_writel(cpu_to_le32(b), addr)
index 85856c74ad1ddddecc344b4fdda31008b0e1d6b6..354b2aca990e2701c7149c35df4b6a06131988bc 100644 (file)
@@ -34,16 +34,21 @@ destroy_context(struct mm_struct *mm)
        mm->context = 0;
 }
 
-static inline void load_context(mm_context_t context)
+static inline unsigned long __space_to_prot(mm_context_t context)
 {
-       mtsp(context, 3);
 #if SPACEID_SHIFT == 0
-       mtctl(context << 1,8);
+       return context << 1;
 #else
-       mtctl(context >> (SPACEID_SHIFT - 1),8);
+       return context >> (SPACEID_SHIFT - 1);
 #endif
 }
 
+static inline void load_context(mm_context_t context)
+{
+       mtsp(context, 3);
+       mtctl(__space_to_prot(context), 8);
+}
+
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
 {
 
index 3c9d34844c83e0f6f4939ed01d54887ce16b7b5d..9d64df8754ba5203c9a7299038ab5c01ea68185b 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/ptrace.h>
 #include <asm/types.h>
 #include <asm/system.h>
+#include <asm/percpu.h>
 #endif /* __ASSEMBLY__ */
 
 #define KERNEL_STACK_SIZE      (4*PAGE_SIZE)
@@ -109,8 +110,7 @@ struct cpuinfo_parisc {
 };
 
 extern struct system_cpuinfo_parisc boot_cpu_data;
-extern struct cpuinfo_parisc cpu_data[NR_CPUS];
-#define current_cpu_data cpu_data[smp_processor_id()]
+DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 
 #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
 
index 4878b9501f245ec77bb826a800d2878e34c7cc0e..1c6dbb6f6e5663988ed5def13f4a776f51dd3e62 100644 (file)
@@ -241,4 +241,6 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
+int fixup_exception(struct pt_regs *regs);
+
 #endif /* __PARISC_UACCESS_H */
index 884b7ce16a3bd3cee27ba80e01e7bd3755d43cbd..994bcd980909d62d2d1d3c474e22cf2c0b744ce4 100644 (file)
@@ -549,6 +549,38 @@ static int parisc_generic_match(struct device *dev, struct device_driver *drv)
        return match_device(to_parisc_driver(drv), to_parisc_device(dev));
 }
 
+static ssize_t make_modalias(struct device *dev, char *buf)
+{
+       const struct parisc_device *padev = to_parisc_device(dev);
+       const struct parisc_device_id *id = &padev->id;
+
+       return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
+               (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
+               (u32)id->sversion);
+}
+
+static int parisc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       const struct parisc_device *padev;
+       char modalias[40];
+
+       if (!dev)
+               return -ENODEV;
+
+       padev = to_parisc_device(dev);
+       if (!padev)
+               return -ENODEV;
+
+       if (add_uevent_var(env, "PARISC_NAME=%s", padev->name))
+               return -ENOMEM;
+
+       make_modalias(dev, modalias);
+       if (add_uevent_var(env, "MODALIAS=%s", modalias))
+               return -ENOMEM;
+
+       return 0;
+}
+
 #define pa_dev_attr(name, field, format_string)                                \
 static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf)               \
 {                                                                      \
@@ -566,12 +598,7 @@ pa_dev_attr_id(sversion, "0x%05x\n");
 
 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct parisc_device *padev = to_parisc_device(dev);
-       struct parisc_device_id *id = &padev->id;
-
-       return sprintf(buf, "parisc:t%02Xhv%04Xrev%02Xsv%08X\n",
-               (u8)id->hw_type, (u16)id->hversion, (u8)id->hversion_rev,
-               (u32)id->sversion);
+       return make_modalias(dev, buf);
 }
 
 static struct device_attribute parisc_device_attrs[] = {
@@ -587,6 +614,7 @@ static struct device_attribute parisc_device_attrs[] = {
 struct bus_type parisc_bus_type = {
        .name = "parisc",
        .match = parisc_generic_match,
+       .uevent = parisc_uevent,
        .dev_attrs = parisc_device_attrs,
        .probe = parisc_driver_probe,
        .remove = parisc_driver_remove,
index 2cbf13b3ef11d0a1ff62f7eb5cec9b1c96333371..5595a2f311816d3d5e24420e5598563c4815d3a8 100644 (file)
@@ -80,6 +80,7 @@ END(hpmc_pim_data)
 
        .import intr_save, code
 ENTRY(os_hpmc)
+.os_hpmc:
 
        /*
         * registers modified:
@@ -295,5 +296,10 @@ os_hpmc_6:
        b .
        nop
 ENDPROC(os_hpmc)
-ENTRY(os_hpmc_end)     /* this label used to compute os_hpmc checksum */
+.os_hpmc_end:
        nop
+.data
+.align 4
+       .export os_hpmc_size
+os_hpmc_size:
+       .word .os_hpmc_end-.os_hpmc
index 4cea935e2f99d1d51436fdb815e357dea54a4e31..ac2c822928c75e3ade5d9e6d278c08804703c3d8 100644 (file)
@@ -298,7 +298,7 @@ unsigned long txn_affinity_addr(unsigned int irq, int cpu)
        irq_desc[irq].affinity = cpumask_of_cpu(cpu);
 #endif
 
-       return cpu_data[cpu].txn_addr;
+       return per_cpu(cpu_data, cpu).txn_addr;
 }
 
 
@@ -309,8 +309,9 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
        next_cpu++; /* assign to "next" CPU we want this bugger on */
 
        /* validate entry */
-       while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || 
-               !cpu_online(next_cpu)))
+       while ((next_cpu < NR_CPUS) &&
+               (!per_cpu(cpu_data, next_cpu).txn_addr ||
+                !cpu_online(next_cpu)))
                next_cpu++;
 
        if (next_cpu >= NR_CPUS) 
@@ -359,7 +360,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
                printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
                       irq, smp_processor_id(), cpu);
                gsc_writel(irq + CPU_IRQ_BASE,
-                          cpu_data[cpu].hpa);
+                          per_cpu(cpu_data, cpu).hpa);
                goto set_out;
        }
 #endif
@@ -421,5 +422,5 @@ void __init init_IRQ(void)
 
 void ack_bad_irq(unsigned int irq)
 {
-       printk("unexpected IRQ %d\n", irq);
+       printk(KERN_WARNING "unexpected IRQ %d\n", irq);
 }
index ccb68090781efdc24b553ce9e6162899ed349c03..1ff366cb9685b9e211d2279a74f2177c4535b329 100644 (file)
@@ -52,7 +52,7 @@
 #include <linux/tty.h>
 #include <asm/pdc.h>           /* for iodc_call() proto and friends */
 
-static spinlock_t pdc_console_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pdc_console_lock);
 
 static void pdc_console_write(struct console *co, const char *s, unsigned count)
 {
index f696f57faa15b58dfd32e2101379f32e27ce70c0..75099efb3bf37bc4c2695611b5f04162b4eb67de 100644 (file)
@@ -541,9 +541,9 @@ static int __init perf_init(void)
        spin_lock_init(&perf_lock);
 
        /* TODO: this only lets us access the first cpu.. what to do for SMP? */
-       cpu_device = cpu_data[0].dev;
+       cpu_device = per_cpu(cpu_data, 0).dev;
        printk("Performance monitoring counters enabled for %s\n",
-               cpu_data[0].dev->name);
+               per_cpu(cpu_data, 0).dev->name);
 
        return 0;
 }
index 370086fb8333a8a1b5b6f9fdb17418759dc9e064..ecb609342feb73164114b4089aafafe3aa2d6f9d 100644 (file)
@@ -3,7 +3,7 @@
  *    Initial setup-routines for HP 9000 based hardware.
  *
  *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
- *    Modifications for PA-RISC (C) 1999 Helge Deller <deller@gmx.de>
+ *    Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
  *    Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
  *    Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
  *    Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
@@ -46,7 +46,7 @@
 struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
 EXPORT_SYMBOL(boot_cpu_data);
 
-struct cpuinfo_parisc cpu_data[NR_CPUS] __read_mostly;
+DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 
 extern int update_cr16_clocksource(void);      /* from time.c */
 
@@ -68,6 +68,23 @@ extern int update_cr16_clocksource(void);    /* from time.c */
 ** The initialization of OS data structures is the same (done below).
 */
 
+/**
+ * init_cpu_profiler - enable/setup per cpu profiling hooks.
+ * @cpunum: The processor instance.
+ *
+ * FIXME: doesn't do much yet...
+ */
+static void __cpuinit
+init_percpu_prof(unsigned long cpunum)
+{
+       struct cpuinfo_parisc *p;
+
+       p = &per_cpu(cpu_data, cpunum);
+       p->prof_counter = 1;
+       p->prof_multiplier = 1;
+}
+
+
 /**
  * processor_probe - Determine if processor driver should claim this device.
  * @dev: The device which has been found.
@@ -147,7 +164,7 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
        }
 #endif
 
-       p = &cpu_data[cpuid];
+       p = &per_cpu(cpu_data, cpuid);
        boot_cpu_data.cpu_count++;
 
        /* initialize counters - CPU 0 gets it_value set in time_init() */
@@ -162,12 +179,9 @@ static int __cpuinit processor_probe(struct parisc_device *dev)
 #ifdef CONFIG_SMP
        /*
        ** FIXME: review if any other initialization is clobbered
-       **      for boot_cpu by the above memset().
+       **        for boot_cpu by the above memset().
        */
-
-       /* stolen from init_percpu_prof() */
-       cpu_data[cpuid].prof_counter = 1;
-       cpu_data[cpuid].prof_multiplier = 1;
+       init_percpu_prof(cpuid);
 #endif
 
        /*
@@ -261,19 +275,6 @@ void __init collect_boot_cpu_data(void)
 }
 
 
-/**
- * init_cpu_profiler - enable/setup per cpu profiling hooks.
- * @cpunum: The processor instance.
- *
- * FIXME: doesn't do much yet...
- */
-static inline void __init
-init_percpu_prof(int cpunum)
-{
-       cpu_data[cpunum].prof_counter = 1;
-       cpu_data[cpunum].prof_multiplier = 1;
-}
-
 
 /**
  * init_per_cpu - Handle individual processor initializations.
@@ -293,7 +294,7 @@ init_percpu_prof(int cpunum)
  *
  * o Enable CPU profiling hooks.
  */
-int __init init_per_cpu(int cpunum)
+int __cpuinit init_per_cpu(int cpunum)
 {
        int ret;
        struct pdc_coproc_cfg coproc_cfg;
@@ -307,8 +308,8 @@ int __init init_per_cpu(int cpunum)
                /* FWIW, FP rev/model is a more accurate way to determine
                ** CPU type. CPU rev/model has some ambiguous cases.
                */
-               cpu_data[cpunum].fp_rev = coproc_cfg.revision;
-               cpu_data[cpunum].fp_model = coproc_cfg.model;
+               per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+               per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
 
                printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
                        cpunum, coproc_cfg.revision, coproc_cfg.model);
@@ -344,16 +345,17 @@ int __init init_per_cpu(int cpunum)
 int
 show_cpuinfo (struct seq_file *m, void *v)
 {
-       int     n;
+       unsigned long cpu;
 
-       for(n=0; n<boot_cpu_data.cpu_count; n++) {
+       for_each_online_cpu(cpu) {
+               const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
 #ifdef CONFIG_SMP
-               if (0 == cpu_data[n].hpa)
+               if (0 == cpuinfo->hpa)
                        continue;
 #endif
-               seq_printf(m, "processor\t: %d\n"
+               seq_printf(m, "processor\t: %lu\n"
                                "cpu family\t: PA-RISC %s\n",
-                                n, boot_cpu_data.family_name);
+                                cpu, boot_cpu_data.family_name);
 
                seq_printf(m, "cpu\t\t: %s\n",  boot_cpu_data.cpu_name );
 
@@ -365,8 +367,8 @@ show_cpuinfo (struct seq_file *m, void *v)
                seq_printf(m, "model\t\t: %s\n"
                                "model name\t: %s\n",
                                 boot_cpu_data.pdc.sys_model_name,
-                                cpu_data[n].dev ? 
-                                cpu_data[n].dev->name : "Unknown" );
+                                cpuinfo->dev ?
+                                cpuinfo->dev->name : "Unknown");
 
                seq_printf(m, "hversion\t: 0x%08x\n"
                                "sversion\t: 0x%08x\n",
@@ -377,8 +379,8 @@ show_cpuinfo (struct seq_file *m, void *v)
                show_cache_info(m);
 
                seq_printf(m, "bogomips\t: %lu.%02lu\n",
-                            cpu_data[n].loops_per_jiffy / (500000 / HZ),
-                            (cpu_data[n].loops_per_jiffy / (5000 / HZ)) % 100);
+                            cpuinfo->loops_per_jiffy / (500000 / HZ),
+                            (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
 
                seq_printf(m, "software id\t: %ld\n\n",
                                boot_cpu_data.pdc.model.sw_id);
index 7d27853ff8c8b6233ec4a0e1f2fe3747fbbf88aa..82131ca8e05c28790bbc133690e2267f1be67034 100644 (file)
@@ -58,11 +58,6 @@ int parisc_bus_is_phys __read_mostly = 1;    /* Assume no IOMMU is present */
 EXPORT_SYMBOL(parisc_bus_is_phys);
 #endif
 
-/* This sets the vmerge boundary and size, it's here because it has to
- * be available on all platforms (zero means no-virtual merging) */
-unsigned long parisc_vmerge_boundary = 0;
-unsigned long parisc_vmerge_max_size = 0;
-
 void __init setup_cmdline(char **cmdline_p)
 {
        extern unsigned int boot_args[];
@@ -321,7 +316,7 @@ static int __init parisc_init(void)
        
        processor_init();
        printk(KERN_INFO "CPU(s): %d x %s at %d.%06d MHz\n",
-                       boot_cpu_data.cpu_count,
+                       num_present_cpus(),
                        boot_cpu_data.cpu_name,
                        boot_cpu_data.cpu_hz / 1000000,
                        boot_cpu_data.cpu_hz % 1000000  );
@@ -387,8 +382,8 @@ void start_parisc(void)
        if (ret >= 0 && coproc_cfg.ccr_functional) {
                mtctl(coproc_cfg.ccr_functional, 10);
 
-               cpu_data[cpunum].fp_rev = coproc_cfg.revision;
-               cpu_data[cpunum].fp_model = coproc_cfg.model;
+               per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
+               per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
 
                asm volatile ("fstd     %fr0,8(%sp)");
        } else {
index 80bc000523face7176458cf26c452f461ecd6413..9995d7ed58198c42de88223b51ff0d3ac4f75b3b 100644 (file)
@@ -56,16 +56,17 @@ static int smp_debug_lvl = 0;
                if (lvl >= smp_debug_lvl)       \
                        printk(printargs);
 #else
-#define smp_debug(lvl, ...)
+#define smp_debug(lvl, ...)    do { } while(0)
 #endif /* DEBUG_SMP */
 
 DEFINE_SPINLOCK(smp_lock);
 
 volatile struct task_struct *smp_init_current_idle_task;
 
-static volatile int cpu_now_booting __read_mostly = 0; /* track which CPU is booting */
+/* track which CPU is booting */
+static volatile int cpu_now_booting __cpuinitdata;
 
-static int parisc_max_cpus __read_mostly = 1;
+static int parisc_max_cpus __cpuinitdata = 1;
 
 DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
 
@@ -123,7 +124,7 @@ irqreturn_t
 ipi_interrupt(int irq, void *dev_id) 
 {
        int this_cpu = smp_processor_id();
-       struct cpuinfo_parisc *p = &cpu_data[this_cpu];
+       struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
        unsigned long ops;
        unsigned long flags;
 
@@ -202,13 +203,13 @@ ipi_interrupt(int irq, void *dev_id)
 static inline void
 ipi_send(int cpu, enum ipi_message_type op)
 {
-       struct cpuinfo_parisc *p = &cpu_data[cpu];
+       struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
        spinlock_t *lock = &per_cpu(ipi_lock, cpu);
        unsigned long flags;
 
        spin_lock_irqsave(lock, flags);
        p->pending_ipi |= 1 << op;
-       gsc_writel(IPI_IRQ - CPU_IRQ_BASE, cpu_data[cpu].hpa);
+       gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
        spin_unlock_irqrestore(lock, flags);
 }
 
@@ -224,10 +225,7 @@ send_IPI_mask(cpumask_t mask, enum ipi_message_type op)
 static inline void
 send_IPI_single(int dest_cpu, enum ipi_message_type op)
 {
-       if (dest_cpu == NO_PROC_ID) {
-               BUG();
-               return;
-       }
+       BUG_ON(dest_cpu == NO_PROC_ID);
 
        ipi_send(dest_cpu, op);
 }
@@ -309,8 +307,7 @@ smp_cpu_init(int cpunum)
        /* Initialise the idle task for this CPU */
        atomic_inc(&init_mm.mm_count);
        current->active_mm = &init_mm;
-       if(current->mm)
-               BUG();
+       BUG_ON(current->mm);
        enter_lazy_tlb(&init_mm, current);
 
        init_IRQ();   /* make sure no IRQs are enabled or pending */
@@ -345,6 +342,7 @@ void __init smp_callin(void)
  */
 int __cpuinit smp_boot_one_cpu(int cpuid)
 {
+       const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
        struct task_struct *idle;
        long timeout;
 
@@ -376,7 +374,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
        smp_init_current_idle_task = idle ;
        mb();
 
-       printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);
+       printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
 
        /*
        ** This gets PDC to release the CPU from a very tight loop.
@@ -387,7 +385,7 @@ int __cpuinit smp_boot_one_cpu(int cpuid)
        ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 
        ** contents of memory are valid."
        */
-       gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, cpu_data[cpuid].hpa);
+       gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
        mb();
 
        /* 
@@ -419,12 +417,12 @@ alive:
        return 0;
 }
 
-void __devinit smp_prepare_boot_cpu(void)
+void __init smp_prepare_boot_cpu(void)
 {
-       int bootstrap_processor=cpu_data[0].cpuid;      /* CPU ID of BSP */
+       int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
 
        /* Setup BSP mappings */
-       printk("SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
+       printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
 
        cpu_set(bootstrap_processor, cpu_online_map);
        cpu_set(bootstrap_processor, cpu_present_map);
index 4d09203bc69307275afcb92f5f3a53d2bdff5057..9d46c43a415291a005fc8ff2ef7331d3ff8dbbb7 100644 (file)
@@ -60,7 +60,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
        unsigned long cycles_elapsed, ticks_elapsed;
        unsigned long cycles_remainder;
        unsigned int cpu = smp_processor_id();
-       struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu];
+       struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
 
        /* gcc can optimize for "read-only" case with a local clocktick */
        unsigned long cpt = clocktick;
@@ -213,7 +213,7 @@ void __init start_cpu_itimer(void)
 
        mtctl(next_tick, 16);           /* kick off Interval Timer (CR16) */
 
-       cpu_data[cpu].it_value = next_tick;
+       per_cpu(cpu_data, cpu).it_value = next_tick;
 }
 
 struct platform_device rtc_parisc_dev = {
index d71cb018a21e0466a22f30fe25bc7e0cf49850c5..f5159381fdd6f04be4679a0dd66ab474bb78f490 100644 (file)
 #include <linux/cpu.h>
 #include <linux/cache.h>
 
-static struct cpu cpu_devices[NR_CPUS] __read_mostly;
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
 static int __init topology_init(void)
 {
        int num;
 
        for_each_present_cpu(num) {
-               register_cpu(&cpu_devices[num], num);
+               register_cpu(&per_cpu(cpu_devices, num), num);
        }
        return 0;
 }
index 4c771cd580ecf9abbc4e6d9238accbb0a99bd0d8..ba658d2086f77decbc35404da73287338e27618a 100644 (file)
@@ -745,6 +745,10 @@ void handle_interruption(int code, struct pt_regs *regs)
                /* Fall Through */
        case 27: 
                /* Data memory protection ID trap */
+               if (code == 27 && !user_mode(regs) &&
+                       fixup_exception(regs))
+                       return;
+
                die_if_kernel("Protection id trap", regs, code);
                si.si_code = SEGV_MAPERR;
                si.si_signo = SIGSEGV;
@@ -821,8 +825,8 @@ void handle_interruption(int code, struct pt_regs *regs)
 
 int __init check_ivt(void *iva)
 {
+       extern u32 os_hpmc_size;
        extern const u32 os_hpmc[];
-       extern const u32 os_hpmc_end[];
 
        int i;
        u32 check = 0;
@@ -839,8 +843,7 @@ int __init check_ivt(void *iva)
            *ivap++ = 0;
 
        /* Compute Checksum for HPMC handler */
-
-       length = os_hpmc_end - os_hpmc;
+       length = os_hpmc_size;
        ivap[7] = length;
 
        hpmcp = (u32 *)os_hpmc;
index 6773c582e457a15b3e9ddb5006e462754bddd06e..69dad5a850a8392f5f520dd164f4dda9a4d5445a 100644 (file)
@@ -372,7 +372,7 @@ void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct
        struct pt_regs *r = &t->thread.regs;
        struct pt_regs *r2;
 
-       r2 = kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
+       r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
        if (!r2)
                return;
        *r2 = *r;
index 9abed07db7fc0ded2b7e041c5e73f65e1911b211..5069e8b2ca712a85a437b495b1195aefda364a9c 100644 (file)
@@ -261,7 +261,7 @@ static const struct iomap_ops iomem_ops = {
        iomem_write32r,
 };
 
-const struct iomap_ops *iomap_ops[8] = {
+static const struct iomap_ops *iomap_ops[8] = {
        [0] = &ioport_ops,
        [7] = &iomem_ops
 };
index 2d68431fc22e98a818dd3ecaca049c93c9196d5e..bbda909c866e57c1327b6ccd47cf9bab8dc992d4 100644 (file)
@@ -275,7 +275,7 @@ handle_store_error:
 
 
 /* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
 {
        register unsigned long src, dst, t1, t2, t3;
        register unsigned char *pcs, *pcd;
index b2e3e9a8cecefce049d43ef1a5d36d61403cc9b9..92c7fa4ecc3f4a27ac6ac79f31f1acda964f967e 100644 (file)
@@ -139,13 +139,41 @@ parisc_acctyp(unsigned long code, unsigned int inst)
                        }
 #endif
 
+int fixup_exception(struct pt_regs *regs)
+{
+       const struct exception_table_entry *fix;
+
+       fix = search_exception_tables(regs->iaoq[0]);
+       if (fix) {
+               struct exception_data *d;
+               d = &__get_cpu_var(exception_data);
+               d->fault_ip = regs->iaoq[0];
+               d->fault_space = regs->isr;
+               d->fault_addr = regs->ior;
+
+               regs->iaoq[0] = ((fix->fixup) & ~3);
+               /*
+                * NOTE: In some cases the faulting instruction
+                * may be in the delay slot of a branch. We
+                * don't want to take the branch, so we don't
+                * increment iaoq[1], instead we set it to be
+                * iaoq[0]+4, and clear the B bit in the PSW
+                */
+               regs->iaoq[1] = regs->iaoq[0] + 4;
+               regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
+
+               return 1;
+       }
+
+       return 0;
+}
+
 void do_page_fault(struct pt_regs *regs, unsigned long code,
                              unsigned long address)
 {
        struct vm_area_struct *vma, *prev_vma;
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
-       const struct exception_table_entry *fix;
        unsigned long acc_type;
        int fault;
 
@@ -229,32 +257,8 @@ bad_area:
 
 no_context:
 
-       if (!user_mode(regs)) {
-               fix = search_exception_tables(regs->iaoq[0]);
-
-               if (fix) {
-                       struct exception_data *d;
-
-                       d = &__get_cpu_var(exception_data);
-                       d->fault_ip = regs->iaoq[0];
-                       d->fault_space = regs->isr;
-                       d->fault_addr = regs->ior;
-
-                       regs->iaoq[0] = ((fix->fixup) & ~3);
-
-                       /*
-                        * NOTE: In some cases the faulting instruction
-                        * may be in the delay slot of a branch. We
-                        * don't want to take the branch, so we don't
-                        * increment iaoq[1], instead we set it to be
-                        * iaoq[0]+4, and clear the B bit in the PSW
-                        */
-
-                       regs->iaoq[1] = regs->iaoq[0] + 4;
-                       regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
-
-                       return;
-               }
+       if (!user_mode(regs) && fixup_exception(regs)) {
+               return;
        }
 
        parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
index dcbf1be149f3486e26f38428499d2a07d0ff604e..f21147f3626a628c2fb78e1e5448a0361f9a630d 100644 (file)
 #include <linux/async_tx.h>
 
 #ifdef CONFIG_DMA_ENGINE
-static enum dma_state_client
-dma_channel_add_remove(struct dma_client *client,
-       struct dma_chan *chan, enum dma_state state);
-
-static struct dma_client async_tx_dma = {
-       .event_callback = dma_channel_add_remove,
-       /* .cap_mask == 0 defaults to all channels */
-};
-
-/**
- * dma_cap_mask_all - enable iteration over all operation types
- */
-static dma_cap_mask_t dma_cap_mask_all;
-
-/**
- * chan_ref_percpu - tracks channel allocations per core/opertion
- */
-struct chan_ref_percpu {
-       struct dma_chan_ref *ref;
-};
-
-static int channel_table_initialized;
-static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END];
-
-/**
- * async_tx_lock - protect modification of async_tx_master_list and serialize
- *     rebalance operations
- */
-static spinlock_t async_tx_lock;
-
-static LIST_HEAD(async_tx_master_list);
-
-/* async_tx_issue_pending_all - start all transactions on all channels */
-void async_tx_issue_pending_all(void)
-{
-       struct dma_chan_ref *ref;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(ref, &async_tx_master_list, node)
-               ref->chan->device->device_issue_pending(ref->chan);
-       rcu_read_unlock();
-}
-EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
-
-/* dma_wait_for_async_tx - spin wait for a transcation to complete
- * @tx: transaction to wait on
- */
-enum dma_status
-dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
-{
-       enum dma_status status;
-       struct dma_async_tx_descriptor *iter;
-       struct dma_async_tx_descriptor *parent;
-
-       if (!tx)
-               return DMA_SUCCESS;
-
-       /* poll through the dependency chain, return when tx is complete */
-       do {
-               iter = tx;
-
-               /* find the root of the unsubmitted dependency chain */
-               do {
-                       parent = iter->parent;
-                       if (!parent)
-                               break;
-                       else
-                               iter = parent;
-               } while (parent);
-
-               /* there is a small window for ->parent == NULL and
-                * ->cookie == -EBUSY
-                */
-               while (iter->cookie == -EBUSY)
-                       cpu_relax();
-
-               status = dma_sync_wait(iter->chan, iter->cookie);
-       } while (status == DMA_IN_PROGRESS || (iter != tx));
-
-       return status;
-}
-EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
-
-/* async_tx_run_dependencies - helper routine for dma drivers to process
- *     (start) dependent operations on their target channel
- * @tx: transaction with dependencies
- */
-void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
-{
-       struct dma_async_tx_descriptor *dep = tx->next;
-       struct dma_async_tx_descriptor *dep_next;
-       struct dma_chan *chan;
-
-       if (!dep)
-               return;
-
-       chan = dep->chan;
-
-       /* keep submitting up until a channel switch is detected
-        * in that case we will be called again as a result of
-        * processing the interrupt from async_tx_channel_switch
-        */
-       for (; dep; dep = dep_next) {
-               spin_lock_bh(&dep->lock);
-               dep->parent = NULL;
-               dep_next = dep->next;
-               if (dep_next && dep_next->chan == chan)
-                       dep->next = NULL; /* ->next will be submitted */
-               else
-                       dep_next = NULL; /* submit current dep and terminate */
-               spin_unlock_bh(&dep->lock);
-
-               dep->tx_submit(dep);
-       }
-
-       chan->device->device_issue_pending(chan);
-}
-EXPORT_SYMBOL_GPL(async_tx_run_dependencies);
-
-static void
-free_dma_chan_ref(struct rcu_head *rcu)
-{
-       struct dma_chan_ref *ref;
-       ref = container_of(rcu, struct dma_chan_ref, rcu);
-       kfree(ref);
-}
-
-static void
-init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan)
-{
-       INIT_LIST_HEAD(&ref->node);
-       INIT_RCU_HEAD(&ref->rcu);
-       ref->chan = chan;
-       atomic_set(&ref->count, 0);
-}
-
-/**
- * get_chan_ref_by_cap - returns the nth channel of the given capability
- *     defaults to returning the channel with the desired capability and the
- *     lowest reference count if the index can not be satisfied
- * @cap: capability to match
- * @index: nth channel desired, passing -1 has the effect of forcing the
- *  default return value
- */
-static struct dma_chan_ref *
-get_chan_ref_by_cap(enum dma_transaction_type cap, int index)
-{
-       struct dma_chan_ref *ret_ref = NULL, *min_ref = NULL, *ref;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(ref, &async_tx_master_list, node)
-               if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
-                       if (!min_ref)
-                               min_ref = ref;
-                       else if (atomic_read(&ref->count) <
-                               atomic_read(&min_ref->count))
-                               min_ref = ref;
-
-                       if (index-- == 0) {
-                               ret_ref = ref;
-                               break;
-                       }
-               }
-       rcu_read_unlock();
-
-       if (!ret_ref)
-               ret_ref = min_ref;
-
-       if (ret_ref)
-               atomic_inc(&ret_ref->count);
-
-       return ret_ref;
-}
-
-/**
- * async_tx_rebalance - redistribute the available channels, optimize
- * for cpu isolation in the SMP case, and opertaion isolation in the
- * uniprocessor case
- */
-static void async_tx_rebalance(void)
-{
-       int cpu, cap, cpu_idx = 0;
-       unsigned long flags;
-
-       if (!channel_table_initialized)
-               return;
-
-       spin_lock_irqsave(&async_tx_lock, flags);
-
-       /* undo the last distribution */
-       for_each_dma_cap_mask(cap, dma_cap_mask_all)
-               for_each_possible_cpu(cpu) {
-                       struct dma_chan_ref *ref =
-                               per_cpu_ptr(channel_table[cap], cpu)->ref;
-                       if (ref) {
-                               atomic_set(&ref->count, 0);
-                               per_cpu_ptr(channel_table[cap], cpu)->ref =
-                                                                       NULL;
-                       }
-               }
-
-       for_each_dma_cap_mask(cap, dma_cap_mask_all)
-               for_each_online_cpu(cpu) {
-                       struct dma_chan_ref *new;
-                       if (NR_CPUS > 1)
-                               new = get_chan_ref_by_cap(cap, cpu_idx++);
-                       else
-                               new = get_chan_ref_by_cap(cap, -1);
-
-                       per_cpu_ptr(channel_table[cap], cpu)->ref = new;
-               }
-
-       spin_unlock_irqrestore(&async_tx_lock, flags);
-}
-
-static enum dma_state_client
-dma_channel_add_remove(struct dma_client *client,
-       struct dma_chan *chan, enum dma_state state)
-{
-       unsigned long found, flags;
-       struct dma_chan_ref *master_ref, *ref;
-       enum dma_state_client ack = DMA_DUP; /* default: take no action */
-
-       switch (state) {
-       case DMA_RESOURCE_AVAILABLE:
-               found = 0;
-               rcu_read_lock();
-               list_for_each_entry_rcu(ref, &async_tx_master_list, node)
-                       if (ref->chan == chan) {
-                               found = 1;
-                               break;
-                       }
-               rcu_read_unlock();
-
-               pr_debug("async_tx: dma resource available [%s]\n",
-                       found ? "old" : "new");
-
-               if (!found)
-                       ack = DMA_ACK;
-               else
-                       break;
-
-               /* add the channel to the generic management list */
-               master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
-               if (master_ref) {
-                       /* keep a reference until async_tx is unloaded */
-                       dma_chan_get(chan);
-                       init_dma_chan_ref(master_ref, chan);
-                       spin_lock_irqsave(&async_tx_lock, flags);
-                       list_add_tail_rcu(&master_ref->node,
-                               &async_tx_master_list);
-                       spin_unlock_irqrestore(&async_tx_lock,
-                               flags);
-               } else {
-                       printk(KERN_WARNING "async_tx: unable to create"
-                               " new master entry in response to"
-                               " a DMA_RESOURCE_ADDED event"
-                               " (-ENOMEM)\n");
-                       return 0;
-               }
-
-               async_tx_rebalance();
-               break;
-       case DMA_RESOURCE_REMOVED:
-               found = 0;
-               spin_lock_irqsave(&async_tx_lock, flags);
-               list_for_each_entry(ref, &async_tx_master_list, node)
-                       if (ref->chan == chan) {
-                               /* permit backing devices to go away */
-                               dma_chan_put(ref->chan);
-                               list_del_rcu(&ref->node);
-                               call_rcu(&ref->rcu, free_dma_chan_ref);
-                               found = 1;
-                               break;
-                       }
-               spin_unlock_irqrestore(&async_tx_lock, flags);
-
-               pr_debug("async_tx: dma resource removed [%s]\n",
-                       found ? "ours" : "not ours");
-
-               if (found)
-                       ack = DMA_ACK;
-               else
-                       break;
-
-               async_tx_rebalance();
-               break;
-       case DMA_RESOURCE_SUSPEND:
-       case DMA_RESOURCE_RESUME:
-               printk(KERN_WARNING "async_tx: does not support dma channel"
-                       " suspend/resume\n");
-               break;
-       default:
-               BUG();
-       }
-
-       return ack;
-}
-
-static int __init
-async_tx_init(void)
+static int __init async_tx_init(void)
 {
-       enum dma_transaction_type cap;
-
-       spin_lock_init(&async_tx_lock);
-       bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
-
-       /* an interrupt will never be an explicit operation type.
-        * clearing this bit prevents allocation to a slot in 'channel_table'
-        */
-       clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
-
-       for_each_dma_cap_mask(cap, dma_cap_mask_all) {
-               channel_table[cap] = alloc_percpu(struct chan_ref_percpu);
-               if (!channel_table[cap])
-                       goto err;
-       }
-
-       channel_table_initialized = 1;
-       dma_async_client_register(&async_tx_dma);
-       dma_async_client_chan_request(&async_tx_dma);
+       dmaengine_get();
 
        printk(KERN_INFO "async_tx: api initialized (async)\n");
 
        return 0;
-err:
-       printk(KERN_ERR "async_tx: initialization failure\n");
-
-       while (--cap >= 0)
-               free_percpu(channel_table[cap]);
-
-       return 1;
 }
 
 static void __exit async_tx_exit(void)
 {
-       enum dma_transaction_type cap;
-
-       channel_table_initialized = 0;
-
-       for_each_dma_cap_mask(cap, dma_cap_mask_all)
-               if (channel_table[cap])
-                       free_percpu(channel_table[cap]);
-
-       dma_async_client_unregister(&async_tx_dma);
+       dmaengine_put();
 }
 
 /**
@@ -387,16 +54,9 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
 {
        /* see if we can keep the chain on one channel */
        if (depend_tx &&
-               dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
+           dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
                return depend_tx->chan;
-       else if (likely(channel_table_initialized)) {
-               struct dma_chan_ref *ref;
-               int cpu = get_cpu();
-               ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref;
-               put_cpu();
-               return ref ? ref->chan : NULL;
-       } else
-               return NULL;
+       return dma_find_channel(tx_type);
 }
 EXPORT_SYMBOL_GPL(__async_tx_find_channel);
 #else
index 175df54eb6646c595cc23f3410d80a8eaa61eab4..c507a9ac78f475e8569d920f0b5404553fff7b90 100644 (file)
@@ -4556,7 +4556,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc)
        struct scatterlist *sg = qc->sg;
        int dir = qc->dma_dir;
 
-       WARN_ON(sg == NULL);
+       WARN_ON_ONCE(sg == NULL);
 
        VPRINTK("unmapping %u sg elements\n", qc->n_elem);
 
@@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
        struct ata_port *ap = qc->ap;
        unsigned int tag;
 
-       WARN_ON(qc == NULL);    /* ata_qc_from_tag _might_ return NULL */
+       WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
 
        qc->flags = 0;
        tag = qc->tag;
@@ -4791,8 +4791,8 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
        struct ata_port *ap = qc->ap;
        struct ata_link *link = qc->dev->link;
 
-       WARN_ON(qc == NULL);    /* ata_qc_from_tag _might_ return NULL */
-       WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
+       WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
+       WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
 
        if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
                ata_sg_clean(qc);
@@ -4878,7 +4878,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
                struct ata_device *dev = qc->dev;
                struct ata_eh_info *ehi = &dev->link->eh_info;
 
-               WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
+               WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
 
                if (unlikely(qc->err_mask))
                        qc->flags |= ATA_QCFLAG_FAILED;
@@ -5000,16 +5000,16 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
         * check is skipped for old EH because it reuses active qc to
         * request ATAPI sense.
         */
-       WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
+       WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
 
        if (ata_is_ncq(prot)) {
-               WARN_ON(link->sactive & (1 << qc->tag));
+               WARN_ON_ONCE(link->sactive & (1 << qc->tag));
 
                if (!link->sactive)
                        ap->nr_active_links++;
                link->sactive |= 1 << qc->tag;
        } else {
-               WARN_ON(link->sactive);
+               WARN_ON_ONCE(link->sactive);
 
                ap->nr_active_links++;
                link->active_tag = qc->tag;
index c59ad76c84b1ebe2e3e28b7a08166ffb5f0313fd..0eae9b4535564ce8a64be5428faf9382683cf985 100644 (file)
@@ -578,7 +578,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
        }
 
        if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
-               WARN_ON(!ioaddr->ctl_addr);
+               WARN_ON_ONCE(!ioaddr->ctl_addr);
                iowrite8(tf->hob_feature, ioaddr->feature_addr);
                iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
                iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
@@ -651,7 +651,7 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
                        iowrite8(tf->ctl, ioaddr->ctl_addr);
                        ap->last_ctl = tf->ctl;
                } else
-                       WARN_ON(1);
+                       WARN_ON_ONCE(1);
        }
 }
 EXPORT_SYMBOL_GPL(ata_sff_tf_read);
@@ -891,7 +891,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
                /* READ/WRITE MULTIPLE */
                unsigned int nsect;
 
-               WARN_ON(qc->dev->multi_count == 0);
+               WARN_ON_ONCE(qc->dev->multi_count == 0);
 
                nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
                            qc->dev->multi_count);
@@ -918,7 +918,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
 {
        /* send SCSI cdb */
        DPRINTK("send cdb\n");
-       WARN_ON(qc->dev->cdb_len < 12);
+       WARN_ON_ONCE(qc->dev->cdb_len < 12);
 
        ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
        ata_sff_sync(ap);
@@ -1014,7 +1014,7 @@ next_sg:
        }
 
        /* consumed can be larger than count only for the last transfer */
-       WARN_ON(qc->cursg && count != consumed);
+       WARN_ON_ONCE(qc->cursg && count != consumed);
 
        if (bytes)
                goto next_sg;
@@ -1172,13 +1172,13 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
        unsigned long flags = 0;
        int poll_next;
 
-       WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+       WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 
        /* Make sure ata_sff_qc_issue() does not throw things
         * like DMA polling into the workqueue. Notice that
         * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
         */
-       WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
+       WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
 
 fsm_start:
        DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
@@ -1387,7 +1387,7 @@ fsm_start:
                DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
                        ap->print_id, qc->dev->devno, status);
 
-               WARN_ON(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
+               WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
 
                ap->hsm_task_state = HSM_ST_IDLE;
 
@@ -1423,7 +1423,7 @@ void ata_pio_task(struct work_struct *work)
        int poll_next;
 
 fsm_start:
-       WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
+       WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
 
        /*
         * This is purely heuristic.  This is a fast path.
@@ -1512,7 +1512,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                break;
 
        case ATA_PROT_DMA:
-               WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+               WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
 
                ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
                ap->ops->bmdma_setup(qc);           /* set up bmdma */
@@ -1564,7 +1564,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                break;
 
        case ATAPI_PROT_DMA:
-               WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+               WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
 
                ap->ops->sff_tf_load(ap, &qc->tf);  /* load tf registers */
                ap->ops->bmdma_setup(qc);           /* set up bmdma */
@@ -1576,7 +1576,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
                break;
 
        default:
-               WARN_ON(1);
+               WARN_ON_ONCE(1);
                return AC_ERR_SYSTEM;
        }
 
index d883e1b8bb8c2ec09ec71b5eb64c660a7b3c571c..55433849bfa67a30a1cbddd5724f076057f476a4 100644 (file)
@@ -270,6 +270,6 @@ static void __exit dca_exit(void)
        dca_sysfs_exit();
 }
 
-subsys_initcall(dca_init);
+arch_initcall(dca_init);
 module_exit(dca_exit);
 
index 904e57558bb5ea9f34dae3d3df84f5070b36744f..e34b06420816bb83d7b3187d703455fbc680ebe9 100644 (file)
@@ -33,7 +33,6 @@ config INTEL_IOATDMA
 config INTEL_IOP_ADMA
        tristate "Intel IOP ADMA support"
        depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
-       select ASYNC_CORE
        select DMA_ENGINE
        help
          Enable support for the Intel(R) IOP Series RAID engines.
@@ -59,7 +58,6 @@ config FSL_DMA
 config MV_XOR
        bool "Marvell XOR engine support"
        depends on PLAT_ORION
-       select ASYNC_CORE
        select DMA_ENGINE
        ---help---
          Enable support for the Marvell XOR engine.
index 657996517374f30f51f10257dd0ef0ee40ae66b9..403dbe781122660267ebf0919f9447a7679d72a5 100644 (file)
  *
  * LOCKING:
  *
- * The subsystem keeps two global lists, dma_device_list and dma_client_list.
- * Both of these are protected by a mutex, dma_list_mutex.
+ * The subsystem keeps a global list of dma_device structs it is protected by a
+ * mutex, dma_list_mutex.
+ *
+ * A subsystem can get access to a channel by calling dmaengine_get() followed
+ * by dma_find_channel(), or if it has need for an exclusive channel it can call
+ * dma_request_channel().  Once a channel is allocated a reference is taken
+ * against its corresponding driver to disable removal.
  *
  * Each device has a channels list, which runs unlocked but is never modified
  * once the device is registered, it's just setup by the driver.
  *
- * Each client is responsible for keeping track of the channels it uses.  See
- * the definition of dma_event_callback in dmaengine.h.
- *
- * Each device has a kref, which is initialized to 1 when the device is
- * registered. A kref_get is done for each device registered.  When the
- * device is released, the corresponding kref_put is done in the release
- * method. Every time one of the device's channels is allocated to a client,
- * a kref_get occurs.  When the channel is freed, the corresponding kref_put
- * happens. The device's release function does a completion, so
- * unregister_device does a remove event, device_unregister, a kref_put
- * for the first reference, then waits on the completion for all other
- * references to finish.
- *
- * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
- * with a kref and a per_cpu local_t.  A dma_chan_get is called when a client
- * signals that it wants to use a channel, and dma_chan_put is called when
- * a channel is removed or a client using it is unregistered.  A client can
- * take extra references per outstanding transaction, as is the case with
- * the NET DMA client.  The release function does a kref_put on the device.
- *     -ChrisL, DanW
+ * See Documentation/dmaengine.txt for more details
  */
 
 #include <linux/init.h>
 #include <linux/rcupdate.h>
 #include <linux/mutex.h>
 #include <linux/jiffies.h>
+#include <linux/rculist.h>
+#include <linux/idr.h>
 
 static DEFINE_MUTEX(dma_list_mutex);
 static LIST_HEAD(dma_device_list);
-static LIST_HEAD(dma_client_list);
+static long dmaengine_ref_count;
+static struct idr dma_idr;
 
 /* --- sysfs implementation --- */
 
+/**
+ * dev_to_dma_chan - convert a device pointer to the its sysfs container object
+ * @dev - device node
+ *
+ * Must be called under dma_list_mutex
+ */
+static struct dma_chan *dev_to_dma_chan(struct device *dev)
+{
+       struct dma_chan_dev *chan_dev;
+
+       chan_dev = container_of(dev, typeof(*chan_dev), device);
+       return chan_dev->chan;
+}
+
 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct dma_chan *chan = to_dma_chan(dev);
+       struct dma_chan *chan;
        unsigned long count = 0;
        int i;
+       int err;
 
-       for_each_possible_cpu(i)
-               count += per_cpu_ptr(chan->local, i)->memcpy_count;
+       mutex_lock(&dma_list_mutex);
+       chan = dev_to_dma_chan(dev);
+       if (chan) {
+               for_each_possible_cpu(i)
+                       count += per_cpu_ptr(chan->local, i)->memcpy_count;
+               err = sprintf(buf, "%lu\n", count);
+       } else
+               err = -ENODEV;
+       mutex_unlock(&dma_list_mutex);
 
-       return sprintf(buf, "%lu\n", count);
+       return err;
 }
 
 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
                                      char *buf)
 {
-       struct dma_chan *chan = to_dma_chan(dev);
+       struct dma_chan *chan;
        unsigned long count = 0;
        int i;
+       int err;
 
-       for_each_possible_cpu(i)
-               count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+       mutex_lock(&dma_list_mutex);
+       chan = dev_to_dma_chan(dev);
+       if (chan) {
+               for_each_possible_cpu(i)
+                       count += per_cpu_ptr(chan->local, i)->bytes_transferred;
+               err = sprintf(buf, "%lu\n", count);
+       } else
+               err = -ENODEV;
+       mutex_unlock(&dma_list_mutex);
 
-       return sprintf(buf, "%lu\n", count);
+       return err;
 }
 
 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct dma_chan *chan = to_dma_chan(dev);
-       int in_use = 0;
-
-       if (unlikely(chan->slow_ref) &&
-               atomic_read(&chan->refcount.refcount) > 1)
-               in_use = 1;
-       else {
-               if (local_read(&(per_cpu_ptr(chan->local,
-                       get_cpu())->refcount)) > 0)
-                       in_use = 1;
-               put_cpu();
-       }
+       struct dma_chan *chan;
+       int err;
 
-       return sprintf(buf, "%d\n", in_use);
+       mutex_lock(&dma_list_mutex);
+       chan = dev_to_dma_chan(dev);
+       if (chan)
+               err = sprintf(buf, "%d\n", chan->client_count);
+       else
+               err = -ENODEV;
+       mutex_unlock(&dma_list_mutex);
+
+       return err;
 }
 
 static struct device_attribute dma_attrs[] = {
@@ -127,76 +144,110 @@ static struct device_attribute dma_attrs[] = {
        __ATTR_NULL
 };
 
-static void dma_async_device_cleanup(struct kref *kref);
-
-static void dma_dev_release(struct device *dev)
+static void chan_dev_release(struct device *dev)
 {
-       struct dma_chan *chan = to_dma_chan(dev);
-       kref_put(&chan->device->refcount, dma_async_device_cleanup);
+       struct dma_chan_dev *chan_dev;
+
+       chan_dev = container_of(dev, typeof(*chan_dev), device);
+       if (atomic_dec_and_test(chan_dev->idr_ref)) {
+               mutex_lock(&dma_list_mutex);
+               idr_remove(&dma_idr, chan_dev->dev_id);
+               mutex_unlock(&dma_list_mutex);
+               kfree(chan_dev->idr_ref);
+       }
+       kfree(chan_dev);
 }
 
 static struct class dma_devclass = {
        .name           = "dma",
        .dev_attrs      = dma_attrs,
-       .dev_release    = dma_dev_release,
+       .dev_release    = chan_dev_release,
 };
 
 /* --- client and device registration --- */
 
-#define dma_chan_satisfies_mask(chan, mask) \
-       __dma_chan_satisfies_mask((chan), &(mask))
+#define dma_device_satisfies_mask(device, mask) \
+       __dma_device_satisfies_mask((device), &(mask))
 static int
-__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
+__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
 {
        dma_cap_mask_t has;
 
-       bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
+       bitmap_and(has.bits, want->bits, device->cap_mask.bits,
                DMA_TX_TYPE_END);
        return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 }
 
+static struct module *dma_chan_to_owner(struct dma_chan *chan)
+{
+       return chan->device->dev->driver->owner;
+}
+
 /**
- * dma_client_chan_alloc - try to allocate channels to a client
- * @client: &dma_client
+ * balance_ref_count - catch up the channel reference count
+ * @chan - channel to balance ->client_count versus dmaengine_ref_count
  *
- * Called with dma_list_mutex held.
+ * balance_ref_count must be called under dma_list_mutex
  */
-static void dma_client_chan_alloc(struct dma_client *client)
+static void balance_ref_count(struct dma_chan *chan)
 {
-       struct dma_device *device;
-       struct dma_chan *chan;
-       int desc;       /* allocated descriptor count */
-       enum dma_state_client ack;
+       struct module *owner = dma_chan_to_owner(chan);
 
-       /* Find a channel */
-       list_for_each_entry(device, &dma_device_list, global_node) {
-               /* Does the client require a specific DMA controller? */
-               if (client->slave && client->slave->dma_dev
-                               && client->slave->dma_dev != device->dev)
-                       continue;
+       while (chan->client_count < dmaengine_ref_count) {
+               __module_get(owner);
+               chan->client_count++;
+       }
+}
 
-               list_for_each_entry(chan, &device->channels, device_node) {
-                       if (!dma_chan_satisfies_mask(chan, client->cap_mask))
-                               continue;
+/**
+ * dma_chan_get - try to grab a dma channel's parent driver module
+ * @chan - channel to grab
+ *
+ * Must be called under dma_list_mutex
+ */
+static int dma_chan_get(struct dma_chan *chan)
+{
+       int err = -ENODEV;
+       struct module *owner = dma_chan_to_owner(chan);
+
+       if (chan->client_count) {
+               __module_get(owner);
+               err = 0;
+       } else if (try_module_get(owner))
+               err = 0;
+
+       if (err == 0)
+               chan->client_count++;
+
+       /* allocate upon first client reference */
+       if (chan->client_count == 1 && err == 0) {
+               int desc_cnt = chan->device->device_alloc_chan_resources(chan);
+
+               if (desc_cnt < 0) {
+                       err = desc_cnt;
+                       chan->client_count = 0;
+                       module_put(owner);
+               } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
+                       balance_ref_count(chan);
+       }
 
-                       desc = chan->device->device_alloc_chan_resources(
-                                       chan, client);
-                       if (desc >= 0) {
-                               ack = client->event_callback(client,
-                                               chan,
-                                               DMA_RESOURCE_AVAILABLE);
+       return err;
+}
 
-                               /* we are done once this client rejects
-                                * an available resource
-                                */
-                               if (ack == DMA_ACK) {
-                                       dma_chan_get(chan);
-                                       chan->client_count++;
-                               } else if (ack == DMA_NAK)
-                                       return;
-                       }
-               }
-       }
+/**
+ * dma_chan_put - drop a reference to a dma channel's parent driver module
+ * @chan - channel to release
+ *
+ * Must be called under dma_list_mutex
+ */
+static void dma_chan_put(struct dma_chan *chan)
+{
+       if (!chan->client_count)
+               return; /* this channel failed alloc_chan_resources */
+       chan->client_count--;
+       module_put(dma_chan_to_owner(chan));
+       if (chan->client_count == 0)
+               chan->device->device_free_chan_resources(chan);
 }
 
 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
@@ -218,138 +269,342 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 EXPORT_SYMBOL(dma_sync_wait);
 
 /**
- * dma_chan_cleanup - release a DMA channel's resources
- * @kref: kernel reference structure that contains the DMA channel device
+ * dma_cap_mask_all - enable iteration over all operation types
+ */
+static dma_cap_mask_t dma_cap_mask_all;
+
+/**
+ * dma_chan_tbl_ent - tracks channel allocations per core/operation
+ * @chan - associated channel for this entry
+ */
+struct dma_chan_tbl_ent {
+       struct dma_chan *chan;
+};
+
+/**
+ * channel_table - percpu lookup table for memory-to-memory offload providers
  */
-void dma_chan_cleanup(struct kref *kref)
+static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
+
+static int __init dma_channel_table_init(void)
 {
-       struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
-       chan->device->device_free_chan_resources(chan);
-       kref_put(&chan->device->refcount, dma_async_device_cleanup);
+       enum dma_transaction_type cap;
+       int err = 0;
+
+       bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
+
+       /* 'interrupt', 'private', and 'slave' are channel capabilities,
+        * but are not associated with an operation so they do not need
+        * an entry in the channel_table
+        */
+       clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
+       clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
+       clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
+
+       for_each_dma_cap_mask(cap, dma_cap_mask_all) {
+               channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
+               if (!channel_table[cap]) {
+                       err = -ENOMEM;
+                       break;
+               }
+       }
+
+       if (err) {
+               pr_err("dmaengine: initialization failure\n");
+               for_each_dma_cap_mask(cap, dma_cap_mask_all)
+                       if (channel_table[cap])
+                               free_percpu(channel_table[cap]);
+       }
+
+       return err;
 }
-EXPORT_SYMBOL(dma_chan_cleanup);
+arch_initcall(dma_channel_table_init);
 
-static void dma_chan_free_rcu(struct rcu_head *rcu)
+/**
+ * dma_find_channel - find a channel to carry out the operation
+ * @tx_type: transaction type
+ */
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 {
-       struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
-       int bias = 0x7FFFFFFF;
-       int i;
-       for_each_possible_cpu(i)
-               bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
-       atomic_sub(bias, &chan->refcount.refcount);
-       kref_put(&chan->refcount, dma_chan_cleanup);
+       struct dma_chan *chan;
+       int cpu;
+
+       WARN_ONCE(dmaengine_ref_count == 0,
+                 "client called %s without a reference", __func__);
+
+       cpu = get_cpu();
+       chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
+       put_cpu();
+
+       return chan;
 }
+EXPORT_SYMBOL(dma_find_channel);
 
-static void dma_chan_release(struct dma_chan *chan)
+/**
+ * dma_issue_pending_all - flush all pending operations across all channels
+ */
+void dma_issue_pending_all(void)
 {
-       atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
-       chan->slow_ref = 1;
-       call_rcu(&chan->rcu, dma_chan_free_rcu);
+       struct dma_device *device;
+       struct dma_chan *chan;
+
+       WARN_ONCE(dmaengine_ref_count == 0,
+                 "client called %s without a reference", __func__);
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(device, &dma_device_list, global_node) {
+               if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+                       continue;
+               list_for_each_entry(chan, &device->channels, device_node)
+                       if (chan->client_count)
+                               device->device_issue_pending(chan);
+       }
+       rcu_read_unlock();
 }
+EXPORT_SYMBOL(dma_issue_pending_all);
 
 /**
- * dma_chans_notify_available - broadcast available channels to the clients
+ * nth_chan - returns the nth channel of the given capability
+ * @cap: capability to match
+ * @n: nth channel desired
+ *
+ * Defaults to returning the channel with the desired capability and the
+ * lowest reference count when 'n' cannot be satisfied.  Must be called
+ * under dma_list_mutex.
  */
-static void dma_clients_notify_available(void)
+static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
 {
-       struct dma_client *client;
+       struct dma_device *device;
+       struct dma_chan *chan;
+       struct dma_chan *ret = NULL;
+       struct dma_chan *min = NULL;
 
-       mutex_lock(&dma_list_mutex);
+       list_for_each_entry(device, &dma_device_list, global_node) {
+               if (!dma_has_cap(cap, device->cap_mask) ||
+                   dma_has_cap(DMA_PRIVATE, device->cap_mask))
+                       continue;
+               list_for_each_entry(chan, &device->channels, device_node) {
+                       if (!chan->client_count)
+                               continue;
+                       if (!min)
+                               min = chan;
+                       else if (chan->table_count < min->table_count)
+                               min = chan;
+
+                       if (n-- == 0) {
+                               ret = chan;
+                               break; /* done */
+                       }
+               }
+               if (ret)
+                       break; /* done */
+       }
 
-       list_for_each_entry(client, &dma_client_list, global_node)
-               dma_client_chan_alloc(client);
+       if (!ret)
+               ret = min;
 
-       mutex_unlock(&dma_list_mutex);
+       if (ret)
+               ret->table_count++;
+
+       return ret;
 }
 
 /**
- * dma_chans_notify_available - tell the clients that a channel is going away
- * @chan: channel on its way out
+ * dma_channel_rebalance - redistribute the available channels
+ *
+ * Optimize for cpu isolation (each cpu gets a dedicated channel for an
+ * operation type) in the SMP case,  and operation isolation (avoid
+ * multi-tasking channels) in the non-SMP case.  Must be called under
+ * dma_list_mutex.
  */
-static void dma_clients_notify_removed(struct dma_chan *chan)
+static void dma_channel_rebalance(void)
 {
-       struct dma_client *client;
-       enum dma_state_client ack;
+       struct dma_chan *chan;
+       struct dma_device *device;
+       int cpu;
+       int cap;
+       int n;
 
-       mutex_lock(&dma_list_mutex);
+       /* undo the last distribution */
+       for_each_dma_cap_mask(cap, dma_cap_mask_all)
+               for_each_possible_cpu(cpu)
+                       per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
+
+       list_for_each_entry(device, &dma_device_list, global_node) {
+               if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+                       continue;
+               list_for_each_entry(chan, &device->channels, device_node)
+                       chan->table_count = 0;
+       }
 
-       list_for_each_entry(client, &dma_client_list, global_node) {
-               ack = client->event_callback(client, chan,
-                               DMA_RESOURCE_REMOVED);
+       /* don't populate the channel_table if no clients are available */
+       if (!dmaengine_ref_count)
+               return;
 
-               /* client was holding resources for this channel so
-                * free it
-                */
-               if (ack == DMA_ACK) {
-                       dma_chan_put(chan);
-                       chan->client_count--;
+       /* redistribute available channels */
+       n = 0;
+       for_each_dma_cap_mask(cap, dma_cap_mask_all)
+               for_each_online_cpu(cpu) {
+                       if (num_possible_cpus() > 1)
+                               chan = nth_chan(cap, n++);
+                       else
+                               chan = nth_chan(cap, -1);
+
+                       per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
+               }
+}
+
+static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
+                                         dma_filter_fn fn, void *fn_param)
+{
+       struct dma_chan *chan;
+
+       if (!__dma_device_satisfies_mask(dev, mask)) {
+               pr_debug("%s: wrong capabilities\n", __func__);
+               return NULL;
+       }
+       /* devices with multiple channels need special handling as we need to
+        * ensure that all channels are either private or public.
+        */
+       if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
+               list_for_each_entry(chan, &dev->channels, device_node) {
+                       /* some channels are already publicly allocated */
+                       if (chan->client_count)
+                               return NULL;
                }
+
+       list_for_each_entry(chan, &dev->channels, device_node) {
+               if (chan->client_count) {
+                       pr_debug("%s: %s busy\n",
+                                __func__, dma_chan_name(chan));
+                       continue;
+               }
+               if (fn && !fn(chan, fn_param)) {
+                       pr_debug("%s: %s filter said false\n",
+                                __func__, dma_chan_name(chan));
+                       continue;
+               }
+               return chan;
        }
 
-       mutex_unlock(&dma_list_mutex);
+       return NULL;
 }
 
 /**
- * dma_async_client_register - register a &dma_client
- * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
+ * dma_request_channel - try to allocate an exclusive channel
+ * @mask: capabilities that the channel must satisfy
+ * @fn: optional callback to disposition available channels
+ * @fn_param: opaque parameter to pass to dma_filter_fn
  */
-void dma_async_client_register(struct dma_client *client)
+struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
 {
-       /* validate client data */
-       BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
-               !client->slave);
+       struct dma_device *device, *_d;
+       struct dma_chan *chan = NULL;
+       int err;
 
+       /* Find a channel */
+       mutex_lock(&dma_list_mutex);
+       list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+               chan = private_candidate(mask, device, fn, fn_param);
+               if (chan) {
+                       /* Found a suitable channel, try to grab, prep, and
+                        * return it.  We first set DMA_PRIVATE to disable
+                        * balance_ref_count as this channel will not be
+                        * published in the general-purpose allocator
+                        */
+                       dma_cap_set(DMA_PRIVATE, device->cap_mask);
+                       err = dma_chan_get(chan);
+
+                       if (err == -ENODEV) {
+                               pr_debug("%s: %s module removed\n", __func__,
+                                        dma_chan_name(chan));
+                               list_del_rcu(&device->global_node);
+                       } else if (err)
+                               pr_err("dmaengine: failed to get %s: (%d)\n",
+                                      dma_chan_name(chan), err);
+                       else
+                               break;
+                       chan = NULL;
+               }
+       }
+       mutex_unlock(&dma_list_mutex);
+
+       pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
+                chan ? dma_chan_name(chan) : NULL);
+
+       return chan;
+}
+EXPORT_SYMBOL_GPL(__dma_request_channel);
+
+void dma_release_channel(struct dma_chan *chan)
+{
        mutex_lock(&dma_list_mutex);
-       list_add_tail(&client->global_node, &dma_client_list);
+       WARN_ONCE(chan->client_count != 1,
+                 "chan reference count %d != 1\n", chan->client_count);
+       dma_chan_put(chan);
        mutex_unlock(&dma_list_mutex);
 }
-EXPORT_SYMBOL(dma_async_client_register);
+EXPORT_SYMBOL_GPL(dma_release_channel);
 
 /**
- * dma_async_client_unregister - unregister a client and free the &dma_client
- * @client: &dma_client to free
- *
- * Force frees any allocated DMA channels, frees the &dma_client memory
+ * dmaengine_get - register interest in dma_channels
  */
-void dma_async_client_unregister(struct dma_client *client)
+void dmaengine_get(void)
 {
-       struct dma_device *device;
+       struct dma_device *device, *_d;
        struct dma_chan *chan;
-       enum dma_state_client ack;
-
-       if (!client)
-               return;
+       int err;
 
        mutex_lock(&dma_list_mutex);
-       /* free all channels the client is holding */
-       list_for_each_entry(device, &dma_device_list, global_node)
-               list_for_each_entry(chan, &device->channels, device_node) {
-                       ack = client->event_callback(client, chan,
-                               DMA_RESOURCE_REMOVED);
+       dmaengine_ref_count++;
 
-                       if (ack == DMA_ACK) {
-                               dma_chan_put(chan);
-                               chan->client_count--;
-                       }
+       /* try to grab channels */
+       list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
+               if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+                       continue;
+               list_for_each_entry(chan, &device->channels, device_node) {
+                       err = dma_chan_get(chan);
+                       if (err == -ENODEV) {
+                               /* module removed before we could use it */
+                               list_del_rcu(&device->global_node);
+                               break;
+                       } else if (err)
+                               pr_err("dmaengine: failed to get %s: (%d)\n",
+                                      dma_chan_name(chan), err);
                }
+       }
 
-       list_del(&client->global_node);
+       /* if this is the first reference and there were channels
+        * waiting we need to rebalance to get those channels
+        * incorporated into the channel table
+        */
+       if (dmaengine_ref_count == 1)
+               dma_channel_rebalance();
        mutex_unlock(&dma_list_mutex);
 }
-EXPORT_SYMBOL(dma_async_client_unregister);
+EXPORT_SYMBOL(dmaengine_get);
 
 /**
- * dma_async_client_chan_request - send all available channels to the
- * client that satisfy the capability mask
- * @client - requester
+ * dmaengine_put - let dma drivers be removed when ref_count == 0
  */
-void dma_async_client_chan_request(struct dma_client *client)
+void dmaengine_put(void)
 {
+       struct dma_device *device;
+       struct dma_chan *chan;
+
        mutex_lock(&dma_list_mutex);
-       dma_client_chan_alloc(client);
+       dmaengine_ref_count--;
+       BUG_ON(dmaengine_ref_count < 0);
+       /* drop channel references */
+       list_for_each_entry(device, &dma_device_list, global_node) {
+               if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
+                       continue;
+               list_for_each_entry(chan, &device->channels, device_node)
+                       dma_chan_put(chan);
+       }
        mutex_unlock(&dma_list_mutex);
 }
-EXPORT_SYMBOL(dma_async_client_chan_request);
+EXPORT_SYMBOL(dmaengine_put);
 
 /**
  * dma_async_device_register - registers DMA devices found
@@ -357,9 +612,9 @@ EXPORT_SYMBOL(dma_async_client_chan_request);
  */
 int dma_async_device_register(struct dma_device *device)
 {
-       static int id;
        int chancnt = 0, rc;
        struct dma_chan* chan;
+       atomic_t *idr_ref;
 
        if (!device)
                return -ENODEV;
@@ -386,57 +641,83 @@ int dma_async_device_register(struct dma_device *device)
        BUG_ON(!device->device_issue_pending);
        BUG_ON(!device->dev);
 
-       init_completion(&device->done);
-       kref_init(&device->refcount);
-
+       idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
+       if (!idr_ref)
+               return -ENOMEM;
+       atomic_set(idr_ref, 0);
+ idr_retry:
+       if (!idr_pre_get(&dma_idr, GFP_KERNEL))
+               return -ENOMEM;
        mutex_lock(&dma_list_mutex);
-       device->dev_id = id++;
+       rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
        mutex_unlock(&dma_list_mutex);
+       if (rc == -EAGAIN)
+               goto idr_retry;
+       else if (rc != 0)
+               return rc;
 
        /* represent channels in sysfs. Probably want devs too */
        list_for_each_entry(chan, &device->channels, device_node) {
                chan->local = alloc_percpu(typeof(*chan->local));
                if (chan->local == NULL)
                        continue;
+               chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
+               if (chan->dev == NULL) {
+                       free_percpu(chan->local);
+                       continue;
+               }
 
                chan->chan_id = chancnt++;
-               chan->dev.class = &dma_devclass;
-               chan->dev.parent = device->dev;
-               dev_set_name(&chan->dev, "dma%dchan%d",
+               chan->dev->device.class = &dma_devclass;
+               chan->dev->device.parent = device->dev;
+               chan->dev->chan = chan;
+               chan->dev->idr_ref = idr_ref;
+               chan->dev->dev_id = device->dev_id;
+               atomic_inc(idr_ref);
+               dev_set_name(&chan->dev->device, "dma%dchan%d",
                             device->dev_id, chan->chan_id);
 
-               rc = device_register(&chan->dev);
+               rc = device_register(&chan->dev->device);
                if (rc) {
-                       chancnt--;
                        free_percpu(chan->local);
                        chan->local = NULL;
                        goto err_out;
                }
-
-               /* One for the channel, one of the class device */
-               kref_get(&device->refcount);
-               kref_get(&device->refcount);
-               kref_init(&chan->refcount);
                chan->client_count = 0;
-               chan->slow_ref = 0;
-               INIT_RCU_HEAD(&chan->rcu);
        }
+       device->chancnt = chancnt;
 
        mutex_lock(&dma_list_mutex);
-       list_add_tail(&device->global_node, &dma_device_list);
+       /* take references on public channels */
+       if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
+               list_for_each_entry(chan, &device->channels, device_node) {
+                       /* if clients are already waiting for channels we need
+                        * to take references on their behalf
+                        */
+                       if (dma_chan_get(chan) == -ENODEV) {
+                               /* note we can only get here for the first
+                                * channel as the remaining channels are
+                                * guaranteed to get a reference
+                                */
+                               rc = -ENODEV;
+                               mutex_unlock(&dma_list_mutex);
+                               goto err_out;
+                       }
+               }
+       list_add_tail_rcu(&device->global_node, &dma_device_list);
+       dma_channel_rebalance();
        mutex_unlock(&dma_list_mutex);
 
-       dma_clients_notify_available();
-
        return 0;
 
 err_out:
        list_for_each_entry(chan, &device->channels, device_node) {
                if (chan->local == NULL)
                        continue;
-               kref_put(&device->refcount, dma_async_device_cleanup);
-               device_unregister(&chan->dev);
-               chancnt--;
+               mutex_lock(&dma_list_mutex);
+               chan->dev->chan = NULL;
+               mutex_unlock(&dma_list_mutex);
+               device_unregister(&chan->dev->device);
                free_percpu(chan->local);
        }
        return rc;
@@ -444,37 +725,30 @@ err_out:
 EXPORT_SYMBOL(dma_async_device_register);
 
 /**
- * dma_async_device_cleanup - function called when all references are released
- * @kref: kernel reference object
- */
-static void dma_async_device_cleanup(struct kref *kref)
-{
-       struct dma_device *device;
-
-       device = container_of(kref, struct dma_device, refcount);
-       complete(&device->done);
-}
-
-/**
- * dma_async_device_unregister - unregisters DMA devices
+ * dma_async_device_unregister - unregister a DMA device
  * @device: &dma_device
+ *
+ * This routine is called by dma driver exit routines, dmaengine holds module
+ * references to prevent it being called while channels are in use.
  */
 void dma_async_device_unregister(struct dma_device *device)
 {
        struct dma_chan *chan;
 
        mutex_lock(&dma_list_mutex);
-       list_del(&device->global_node);
+       list_del_rcu(&device->global_node);
+       dma_channel_rebalance();
        mutex_unlock(&dma_list_mutex);
 
        list_for_each_entry(chan, &device->channels, device_node) {
-               dma_clients_notify_removed(chan);
-               device_unregister(&chan->dev);
-               dma_chan_release(chan);
+               WARN_ONCE(chan->client_count,
+                         "%s called while %d clients hold a reference\n",
+                         __func__, chan->client_count);
+               mutex_lock(&dma_list_mutex);
+               chan->dev->chan = NULL;
+               mutex_unlock(&dma_list_mutex);
+               device_unregister(&chan->dev->device);
        }
-
-       kref_put(&device->refcount, dma_async_device_cleanup);
-       wait_for_completion(&device->done);
 }
 EXPORT_SYMBOL(dma_async_device_unregister);
 
@@ -626,10 +900,96 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
 }
 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
 
+/* dma_wait_for_async_tx - spin wait for a transaction to complete
+ * @tx: in-flight transaction to wait on
+ *
+ * This routine assumes that tx was obtained from a call to async_memcpy,
+ * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
+ * and submitted).  Walking the parent chain is only meant to cover for DMA
+ * drivers that do not implement the DMA_INTERRUPT capability and may race with
+ * the driver's descriptor cleanup routine.
+ */
+enum dma_status
+dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+{
+       enum dma_status status;
+       struct dma_async_tx_descriptor *iter;
+       struct dma_async_tx_descriptor *parent;
+
+       if (!tx)
+               return DMA_SUCCESS;
+
+       WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
+                 " %s\n", __func__, dma_chan_name(tx->chan));
+
+       /* poll through the dependency chain, return when tx is complete */
+       do {
+               iter = tx;
+
+               /* find the root of the unsubmitted dependency chain */
+               do {
+                       parent = iter->parent;
+                       if (!parent)
+                               break;
+                       else
+                               iter = parent;
+               } while (parent);
+
+               /* there is a small window for ->parent == NULL and
+                * ->cookie == -EBUSY
+                */
+               while (iter->cookie == -EBUSY)
+                       cpu_relax();
+
+               status = dma_sync_wait(iter->chan, iter->cookie);
+       } while (status == DMA_IN_PROGRESS || (iter != tx));
+
+       return status;
+}
+EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
+
+/* dma_run_dependencies - helper routine for dma drivers to process
+ *     (start) dependent operations on their target channel
+ * @tx: transaction with dependencies
+ */
+void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
+{
+       struct dma_async_tx_descriptor *dep = tx->next;
+       struct dma_async_tx_descriptor *dep_next;
+       struct dma_chan *chan;
+
+       if (!dep)
+               return;
+
+       chan = dep->chan;
+
+       /* keep submitting up until a channel switch is detected
+        * in that case we will be called again as a result of
+        * processing the interrupt from async_tx_channel_switch
+        */
+       for (; dep; dep = dep_next) {
+               spin_lock_bh(&dep->lock);
+               dep->parent = NULL;
+               dep_next = dep->next;
+               if (dep_next && dep_next->chan == chan)
+                       dep->next = NULL; /* ->next will be submitted */
+               else
+                       dep_next = NULL; /* submit current dep and terminate */
+               spin_unlock_bh(&dep->lock);
+
+               dep->tx_submit(dep);
+       }
+
+       chan->device->device_issue_pending(chan);
+}
+EXPORT_SYMBOL_GPL(dma_run_dependencies);
+
 static int __init dma_bus_init(void)
 {
+       idr_init(&dma_idr);
        mutex_init(&dma_list_mutex);
        return class_register(&dma_devclass);
 }
-subsys_initcall(dma_bus_init);
+arch_initcall(dma_bus_init);
+
 
index ed9636bfb54af0e1805ed3ab4dc039b59e434213..3603f1ea5b2806141e6d01006f27a8c213a5f244 100644 (file)
@@ -35,7 +35,7 @@ MODULE_PARM_DESC(threads_per_chan,
 
 static unsigned int max_channels;
 module_param(max_channels, uint, S_IRUGO);
-MODULE_PARM_DESC(nr_channels,
+MODULE_PARM_DESC(max_channels,
                "Maximum number of channels to use (default: all)");
 
 /*
@@ -71,7 +71,7 @@ struct dmatest_chan {
 
 /*
  * These are protected by dma_list_mutex since they're only used by
- * the DMA client event callback
+ * the DMA filter function callback
  */
 static LIST_HEAD(dmatest_channels);
 static unsigned int nr_channels;
@@ -80,7 +80,7 @@ static bool dmatest_match_channel(struct dma_chan *chan)
 {
        if (test_channel[0] == '\0')
                return true;
-       return strcmp(dev_name(&chan->dev), test_channel) == 0;
+       return strcmp(dma_chan_name(chan), test_channel) == 0;
 }
 
 static bool dmatest_match_device(struct dma_device *device)
@@ -215,7 +215,6 @@ static int dmatest_func(void *data)
 
        smp_rmb();
        chan = thread->chan;
-       dma_chan_get(chan);
 
        while (!kthread_should_stop()) {
                total_tests++;
@@ -293,7 +292,6 @@ static int dmatest_func(void *data)
        }
 
        ret = 0;
-       dma_chan_put(chan);
        kfree(thread->dstbuf);
 err_dstbuf:
        kfree(thread->srcbuf);
@@ -319,21 +317,16 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
        kfree(dtc);
 }
 
-static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
+static int dmatest_add_channel(struct dma_chan *chan)
 {
        struct dmatest_chan     *dtc;
        struct dmatest_thread   *thread;
        unsigned int            i;
 
-       /* Have we already been told about this channel? */
-       list_for_each_entry(dtc, &dmatest_channels, node)
-               if (dtc->chan == chan)
-                       return DMA_DUP;
-
        dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
        if (!dtc) {
-               pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev));
-               return DMA_NAK;
+               pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
+               return -ENOMEM;
        }
 
        dtc->chan = chan;
@@ -343,16 +336,16 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
                thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
                if (!thread) {
                        pr_warning("dmatest: No memory for %s-test%u\n",
-                                  dev_name(&chan->dev), i);
+                                  dma_chan_name(chan), i);
                        break;
                }
                thread->chan = dtc->chan;
                smp_wmb();
                thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
-                               dev_name(&chan->dev), i);
+                               dma_chan_name(chan), i);
                if (IS_ERR(thread->task)) {
                        pr_warning("dmatest: Failed to run thread %s-test%u\n",
-                                       dev_name(&chan->dev), i);
+                                       dma_chan_name(chan), i);
                        kfree(thread);
                        break;
                }
@@ -362,86 +355,62 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
                list_add_tail(&thread->node, &dtc->threads);
        }
 
-       pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev));
+       pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan));
 
        list_add_tail(&dtc->node, &dmatest_channels);
        nr_channels++;
 
-       return DMA_ACK;
-}
-
-static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
-{
-       struct dmatest_chan     *dtc, *_dtc;
-
-       list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
-               if (dtc->chan == chan) {
-                       list_del(&dtc->node);
-                       dmatest_cleanup_channel(dtc);
-                       pr_debug("dmatest: lost channel %s\n",
-                                       dev_name(&chan->dev));
-                       return DMA_ACK;
-               }
-       }
-
-       return DMA_DUP;
+       return 0;
 }
 
-/*
- * Start testing threads as new channels are assigned to us, and kill
- * them when the channels go away.
- *
- * When we unregister the client, all channels are removed so this
- * will also take care of cleaning things up when the module is
- * unloaded.
- */
-static enum dma_state_client
-dmatest_event(struct dma_client *client, struct dma_chan *chan,
-               enum dma_state state)
+static bool filter(struct dma_chan *chan, void *param)
 {
-       enum dma_state_client   ack = DMA_NAK;
-
-       switch (state) {
-       case DMA_RESOURCE_AVAILABLE:
-               if (!dmatest_match_channel(chan)
-                               || !dmatest_match_device(chan->device))
-                       ack = DMA_DUP;
-               else if (max_channels && nr_channels >= max_channels)
-                       ack = DMA_NAK;
-               else
-                       ack = dmatest_add_channel(chan);
-               break;
-
-       case DMA_RESOURCE_REMOVED:
-               ack = dmatest_remove_channel(chan);
-               break;
-
-       default:
-               pr_info("dmatest: Unhandled event %u (%s)\n",
-                               state, dev_name(&chan->dev));
-               break;
-       }
-
-       return ack;
+       if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device))
+               return false;
+       else
+               return true;
 }
 
-static struct dma_client dmatest_client = {
-       .event_callback = dmatest_event,
-};
-
 static int __init dmatest_init(void)
 {
-       dma_cap_set(DMA_MEMCPY, dmatest_client.cap_mask);
-       dma_async_client_register(&dmatest_client);
-       dma_async_client_chan_request(&dmatest_client);
+       dma_cap_mask_t mask;
+       struct dma_chan *chan;
+       int err = 0;
+
+       dma_cap_zero(mask);
+       dma_cap_set(DMA_MEMCPY, mask);
+       for (;;) {
+               chan = dma_request_channel(mask, filter, NULL);
+               if (chan) {
+                       err = dmatest_add_channel(chan);
+                       if (err == 0)
+                               continue;
+                       else {
+                               dma_release_channel(chan);
+                               break; /* add_channel failed, punt */
+                       }
+               } else
+                       break; /* no more channels available */
+               if (max_channels && nr_channels >= max_channels)
+                       break; /* we have all we need */
+       }
 
-       return 0;
+       return err;
 }
-module_init(dmatest_init);
+/* when compiled-in wait for drivers to load first */
+late_initcall(dmatest_init);
 
 static void __exit dmatest_exit(void)
 {
-       dma_async_client_unregister(&dmatest_client);
+       struct dmatest_chan *dtc, *_dtc;
+
+       list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) {
+               list_del(&dtc->node);
+               dmatest_cleanup_channel(dtc);
+               pr_debug("dmatest: dropped channel %s\n",
+                        dma_chan_name(dtc->chan));
+               dma_release_channel(dtc->chan);
+       }
 }
 module_exit(dmatest_exit);
 
index 0778d99aea7c3e6c53e56cdb92b6f11320a6c9f5..6b702cc46b3d1c36d4a7bdd19228f7c253f35ca8 100644 (file)
  * the controller, though.
  */
 
+static struct device *chan2dev(struct dma_chan *chan)
+{
+       return &chan->dev->device;
+}
+static struct device *chan2parent(struct dma_chan *chan)
+{
+       return chan->dev->device.parent;
+}
+
 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
 {
        return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
@@ -93,12 +102,12 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
                        ret = desc;
                        break;
                }
-               dev_dbg(&dwc->chan.dev, "desc %p not ACKed\n", desc);
+               dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
                i++;
        }
        spin_unlock_bh(&dwc->lock);
 
-       dev_vdbg(&dwc->chan.dev, "scanned %u descriptors on freelist\n", i);
+       dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
 
        return ret;
 }
@@ -108,10 +117,10 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
        struct dw_desc  *child;
 
        list_for_each_entry(child, &desc->txd.tx_list, desc_node)
-               dma_sync_single_for_cpu(dwc->chan.dev.parent,
+               dma_sync_single_for_cpu(chan2parent(&dwc->chan),
                                child->txd.phys, sizeof(child->lli),
                                DMA_TO_DEVICE);
-       dma_sync_single_for_cpu(dwc->chan.dev.parent,
+       dma_sync_single_for_cpu(chan2parent(&dwc->chan),
                        desc->txd.phys, sizeof(desc->lli),
                        DMA_TO_DEVICE);
 }
@@ -129,11 +138,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
 
                spin_lock_bh(&dwc->lock);
                list_for_each_entry(child, &desc->txd.tx_list, desc_node)
-                       dev_vdbg(&dwc->chan.dev,
+                       dev_vdbg(chan2dev(&dwc->chan),
                                        "moving child desc %p to freelist\n",
                                        child);
                list_splice_init(&desc->txd.tx_list, &dwc->free_list);
-               dev_vdbg(&dwc->chan.dev, "moving desc %p to freelist\n", desc);
+               dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
                list_add(&desc->desc_node, &dwc->free_list);
                spin_unlock_bh(&dwc->lock);
        }
@@ -163,9 +172,9 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
 
        /* ASSERT:  channel is idle */
        if (dma_readl(dw, CH_EN) & dwc->mask) {
-               dev_err(&dwc->chan.dev,
+               dev_err(chan2dev(&dwc->chan),
                        "BUG: Attempted to start non-idle channel\n");
-               dev_err(&dwc->chan.dev,
+               dev_err(chan2dev(&dwc->chan),
                        "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
                        channel_readl(dwc, SAR),
                        channel_readl(dwc, DAR),
@@ -193,7 +202,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
        void                            *param;
        struct dma_async_tx_descriptor  *txd = &desc->txd;
 
-       dev_vdbg(&dwc->chan.dev, "descriptor %u complete\n", txd->cookie);
+       dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
 
        dwc->completed = txd->cookie;
        callback = txd->callback;
@@ -208,11 +217,11 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
         * mapped before they were submitted...
         */
        if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP))
-               dma_unmap_page(dwc->chan.dev.parent, desc->lli.dar, desc->len,
-                               DMA_FROM_DEVICE);
+               dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar,
+                              desc->len, DMA_FROM_DEVICE);
        if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
-               dma_unmap_page(dwc->chan.dev.parent, desc->lli.sar, desc->len,
-                               DMA_TO_DEVICE);
+               dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar,
+                              desc->len, DMA_TO_DEVICE);
 
        /*
         * The API requires that no submissions are done from a
@@ -228,7 +237,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
        LIST_HEAD(list);
 
        if (dma_readl(dw, CH_EN) & dwc->mask) {
-               dev_err(&dwc->chan.dev,
+               dev_err(chan2dev(&dwc->chan),
                        "BUG: XFER bit set, but channel not idle!\n");
 
                /* Try to continue after resetting the channel... */
@@ -273,7 +282,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
                return;
        }
 
-       dev_vdbg(&dwc->chan.dev, "scan_descriptors: llp=0x%x\n", llp);
+       dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
 
        list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
                if (desc->lli.llp == llp)
@@ -292,7 +301,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
                dwc_descriptor_complete(dwc, desc);
        }
 
-       dev_err(&dwc->chan.dev,
+       dev_err(chan2dev(&dwc->chan),
                "BUG: All descriptors done, but channel not idle!\n");
 
        /* Try to continue after resetting the channel... */
@@ -308,7 +317,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
 
 static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
 {
-       dev_printk(KERN_CRIT, &dwc->chan.dev,
+       dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
                        "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
                        lli->sar, lli->dar, lli->llp,
                        lli->ctlhi, lli->ctllo);
@@ -342,9 +351,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
         * controller flagged an error instead of scribbling over
         * random memory locations.
         */
-       dev_printk(KERN_CRIT, &dwc->chan.dev,
+       dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
                        "Bad descriptor submitted for DMA!\n");
-       dev_printk(KERN_CRIT, &dwc->chan.dev,
+       dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
                        "  cookie: %d\n", bad_desc->txd.cookie);
        dwc_dump_lli(dwc, &bad_desc->lli);
        list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node)
@@ -442,12 +451,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
         * for DMA. But this is hard to do in a race-free manner.
         */
        if (list_empty(&dwc->active_list)) {
-               dev_vdbg(&tx->chan->dev, "tx_submit: started %u\n",
+               dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
                                desc->txd.cookie);
                dwc_dostart(dwc, desc);
                list_add_tail(&desc->desc_node, &dwc->active_list);
        } else {
-               dev_vdbg(&tx->chan->dev, "tx_submit: queued %u\n",
+               dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
                                desc->txd.cookie);
 
                list_add_tail(&desc->desc_node, &dwc->queue);
@@ -472,11 +481,11 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
        unsigned int            dst_width;
        u32                     ctllo;
 
-       dev_vdbg(&chan->dev, "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
+       dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
                        dest, src, len, flags);
 
        if (unlikely(!len)) {
-               dev_dbg(&chan->dev, "prep_dma_memcpy: length is zero!\n");
+               dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
                return NULL;
        }
 
@@ -516,7 +525,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                        first = desc;
                } else {
                        prev->lli.llp = desc->txd.phys;
-                       dma_sync_single_for_device(chan->dev.parent,
+                       dma_sync_single_for_device(chan2parent(chan),
                                        prev->txd.phys, sizeof(prev->lli),
                                        DMA_TO_DEVICE);
                        list_add_tail(&desc->desc_node,
@@ -531,7 +540,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                prev->lli.ctllo |= DWC_CTLL_INT_EN;
 
        prev->lli.llp = 0;
-       dma_sync_single_for_device(chan->dev.parent,
+       dma_sync_single_for_device(chan2parent(chan),
                        prev->txd.phys, sizeof(prev->lli),
                        DMA_TO_DEVICE);
 
@@ -562,15 +571,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        struct scatterlist      *sg;
        size_t                  total_len = 0;
 
-       dev_vdbg(&chan->dev, "prep_dma_slave\n");
+       dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
 
        if (unlikely(!dws || !sg_len))
                return NULL;
 
-       reg_width = dws->slave.reg_width;
+       reg_width = dws->reg_width;
        prev = first = NULL;
 
-       sg_len = dma_map_sg(chan->dev.parent, sgl, sg_len, direction);
+       sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction);
 
        switch (direction) {
        case DMA_TO_DEVICE:
@@ -579,7 +588,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                                | DWC_CTLL_DST_FIX
                                | DWC_CTLL_SRC_INC
                                | DWC_CTLL_FC_M2P);
-               reg = dws->slave.tx_reg;
+               reg = dws->tx_reg;
                for_each_sg(sgl, sg, sg_len, i) {
                        struct dw_desc  *desc;
                        u32             len;
@@ -587,7 +596,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
                        desc = dwc_desc_get(dwc);
                        if (!desc) {
-                               dev_err(&chan->dev,
+                               dev_err(chan2dev(chan),
                                        "not enough descriptors available\n");
                                goto err_desc_get;
                        }
@@ -607,7 +616,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                                first = desc;
                        } else {
                                prev->lli.llp = desc->txd.phys;
-                               dma_sync_single_for_device(chan->dev.parent,
+                               dma_sync_single_for_device(chan2parent(chan),
                                                prev->txd.phys,
                                                sizeof(prev->lli),
                                                DMA_TO_DEVICE);
@@ -625,7 +634,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                                | DWC_CTLL_SRC_FIX
                                | DWC_CTLL_FC_P2M);
 
-               reg = dws->slave.rx_reg;
+               reg = dws->rx_reg;
                for_each_sg(sgl, sg, sg_len, i) {
                        struct dw_desc  *desc;
                        u32             len;
@@ -633,7 +642,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
                        desc = dwc_desc_get(dwc);
                        if (!desc) {
-                               dev_err(&chan->dev,
+                               dev_err(chan2dev(chan),
                                        "not enough descriptors available\n");
                                goto err_desc_get;
                        }
@@ -653,7 +662,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                                first = desc;
                        } else {
                                prev->lli.llp = desc->txd.phys;
-                               dma_sync_single_for_device(chan->dev.parent,
+                               dma_sync_single_for_device(chan2parent(chan),
                                                prev->txd.phys,
                                                sizeof(prev->lli),
                                                DMA_TO_DEVICE);
@@ -673,7 +682,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                prev->lli.ctllo |= DWC_CTLL_INT_EN;
 
        prev->lli.llp = 0;
-       dma_sync_single_for_device(chan->dev.parent,
+       dma_sync_single_for_device(chan2parent(chan),
                        prev->txd.phys, sizeof(prev->lli),
                        DMA_TO_DEVICE);
 
@@ -758,29 +767,21 @@ static void dwc_issue_pending(struct dma_chan *chan)
        spin_unlock_bh(&dwc->lock);
 }
 
-static int dwc_alloc_chan_resources(struct dma_chan *chan,
-               struct dma_client *client)
+static int dwc_alloc_chan_resources(struct dma_chan *chan)
 {
        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
        struct dw_dma           *dw = to_dw_dma(chan->device);
        struct dw_desc          *desc;
-       struct dma_slave        *slave;
        struct dw_dma_slave     *dws;
        int                     i;
        u32                     cfghi;
        u32                     cfglo;
 
-       dev_vdbg(&chan->dev, "alloc_chan_resources\n");
-
-       /* Channels doing slave DMA can only handle one client. */
-       if (dwc->dws || client->slave) {
-               if (chan->client_count)
-                       return -EBUSY;
-       }
+       dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
 
        /* ASSERT:  channel is idle */
        if (dma_readl(dw, CH_EN) & dwc->mask) {
-               dev_dbg(&chan->dev, "DMA channel not idle?\n");
+               dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
                return -EIO;
        }
 
@@ -789,23 +790,17 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
        cfghi = DWC_CFGH_FIFO_MODE;
        cfglo = 0;
 
-       slave = client->slave;
-       if (slave) {
+       dws = dwc->dws;
+       if (dws) {
                /*
                 * We need controller-specific data to set up slave
                 * transfers.
                 */
-               BUG_ON(!slave->dma_dev || slave->dma_dev != dw->dma.dev);
-
-               dws = container_of(slave, struct dw_dma_slave, slave);
+               BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
 
-               dwc->dws = dws;
                cfghi = dws->cfg_hi;
                cfglo = dws->cfg_lo;
-       } else {
-               dwc->dws = NULL;
        }
-
        channel_writel(dwc, CFG_LO, cfglo);
        channel_writel(dwc, CFG_HI, cfghi);
 
@@ -822,7 +817,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
 
                desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
                if (!desc) {
-                       dev_info(&chan->dev,
+                       dev_info(chan2dev(chan),
                                "only allocated %d descriptors\n", i);
                        spin_lock_bh(&dwc->lock);
                        break;
@@ -832,7 +827,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
                desc->txd.tx_submit = dwc_tx_submit;
                desc->txd.flags = DMA_CTRL_ACK;
                INIT_LIST_HEAD(&desc->txd.tx_list);
-               desc->txd.phys = dma_map_single(chan->dev.parent, &desc->lli,
+               desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
                                sizeof(desc->lli), DMA_TO_DEVICE);
                dwc_desc_put(dwc, desc);
 
@@ -847,7 +842,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan,
 
        spin_unlock_bh(&dwc->lock);
 
-       dev_dbg(&chan->dev,
+       dev_dbg(chan2dev(chan),
                "alloc_chan_resources allocated %d descriptors\n", i);
 
        return i;
@@ -860,7 +855,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        struct dw_desc          *desc, *_desc;
        LIST_HEAD(list);
 
-       dev_dbg(&chan->dev, "free_chan_resources (descs allocated=%u)\n",
+       dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
                        dwc->descs_allocated);
 
        /* ASSERT:  channel is idle */
@@ -881,13 +876,13 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        spin_unlock_bh(&dwc->lock);
 
        list_for_each_entry_safe(desc, _desc, &list, desc_node) {
-               dev_vdbg(&chan->dev, "  freeing descriptor %p\n", desc);
-               dma_unmap_single(chan->dev.parent, desc->txd.phys,
+               dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
+               dma_unmap_single(chan2parent(chan), desc->txd.phys,
                                sizeof(desc->lli), DMA_TO_DEVICE);
                kfree(desc);
        }
 
-       dev_vdbg(&chan->dev, "free_chan_resources done\n");
+       dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
 }
 
 /*----------------------------------------------------------------------*/
index 0b95dcce447e91baef611d769026386ac7c252c3..ca70a21afc681ec04c1c230cf1a6ba3667aa1e11 100644 (file)
@@ -366,8 +366,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
  *
  * Return - The number of descriptors allocated.
  */
-static int fsl_dma_alloc_chan_resources(struct dma_chan *chan,
-                                       struct dma_client *client)
+static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
 {
        struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
 
@@ -823,7 +822,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
         */
        WARN_ON(fdev->feature != new_fsl_chan->feature);
 
-       new_fsl_chan->dev = &new_fsl_chan->common.dev;
+       new_fsl_chan->dev = &new_fsl_chan->common.dev->device;
        new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
                        new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
 
index 9b16a3af9a0af6a9a377d0f10048216ac0f86510..4105d6575b645dc7674c35ea5bca0c60bde89d9f 100644 (file)
@@ -75,60 +75,10 @@ static int ioat_dca_enabled = 1;
 module_param(ioat_dca_enabled, int, 0644);
 MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
 
-static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
-{
-       struct ioat_device *device = pci_get_drvdata(pdev);
-       u8 version;
-       int err = 0;
-
-       version = readb(iobase + IOAT_VER_OFFSET);
-       switch (version) {
-       case IOAT_VER_1_2:
-               device->dma = ioat_dma_probe(pdev, iobase);
-               if (device->dma && ioat_dca_enabled)
-                       device->dca = ioat_dca_init(pdev, iobase);
-               break;
-       case IOAT_VER_2_0:
-               device->dma = ioat_dma_probe(pdev, iobase);
-               if (device->dma && ioat_dca_enabled)
-                       device->dca = ioat2_dca_init(pdev, iobase);
-               break;
-       case IOAT_VER_3_0:
-               device->dma = ioat_dma_probe(pdev, iobase);
-               if (device->dma && ioat_dca_enabled)
-                       device->dca = ioat3_dca_init(pdev, iobase);
-               break;
-       default:
-               err = -ENODEV;
-               break;
-       }
-       if (!device->dma)
-               err = -ENODEV;
-       return err;
-}
-
-static void ioat_shutdown_functionality(struct pci_dev *pdev)
-{
-       struct ioat_device *device = pci_get_drvdata(pdev);
-
-       dev_err(&pdev->dev, "Removing dma and dca services\n");
-       if (device->dca) {
-               unregister_dca_provider(device->dca);
-               free_dca_provider(device->dca);
-               device->dca = NULL;
-       }
-
-       if (device->dma) {
-               ioat_dma_remove(device->dma);
-               device->dma = NULL;
-       }
-}
-
 static struct pci_driver ioat_pci_driver = {
        .name           = "ioatdma",
        .id_table       = ioat_pci_tbl,
        .probe          = ioat_probe,
-       .shutdown       = ioat_shutdown_functionality,
        .remove         = __devexit_p(ioat_remove),
 };
 
@@ -179,7 +129,29 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       err = ioat_setup_functionality(pdev, iobase);
+       switch (readb(iobase + IOAT_VER_OFFSET)) {
+       case IOAT_VER_1_2:
+               device->dma = ioat_dma_probe(pdev, iobase);
+               if (device->dma && ioat_dca_enabled)
+                       device->dca = ioat_dca_init(pdev, iobase);
+               break;
+       case IOAT_VER_2_0:
+               device->dma = ioat_dma_probe(pdev, iobase);
+               if (device->dma && ioat_dca_enabled)
+                       device->dca = ioat2_dca_init(pdev, iobase);
+               break;
+       case IOAT_VER_3_0:
+               device->dma = ioat_dma_probe(pdev, iobase);
+               if (device->dma && ioat_dca_enabled)
+                       device->dca = ioat3_dca_init(pdev, iobase);
+               break;
+       default:
+               err = -ENODEV;
+               break;
+       }
+       if (!device->dma)
+               err = -ENODEV;
+
        if (err)
                goto err_version;
 
@@ -198,17 +170,21 @@ err_enable_device:
        return err;
 }
 
-/*
- * It is unsafe to remove this module: if removed while a requested
- * dma is outstanding, esp. from tcp, it is possible to hang while
- * waiting for something that will never finish.  However, if you're
- * feeling lucky, this usually works just fine.
- */
 static void __devexit ioat_remove(struct pci_dev *pdev)
 {
        struct ioat_device *device = pci_get_drvdata(pdev);
 
-       ioat_shutdown_functionality(pdev);
+       dev_err(&pdev->dev, "Removing dma and dca services\n");
+       if (device->dca) {
+               unregister_dca_provider(device->dca);
+               free_dca_provider(device->dca);
+               device->dca = NULL;
+       }
+
+       if (device->dma) {
+               ioat_dma_remove(device->dma);
+               device->dma = NULL;
+       }
 
        kfree(device);
 }
index 6607fdd00b1cd3a31655cc6b53fca580919f3354..b3759c4b65360c0c3b72664c0a675887474f041e 100644 (file)
@@ -734,8 +734,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
  * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
  * @chan: the channel to be filled out
  */
-static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
-                                        struct dma_client *client)
+static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
 {
        struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
        struct ioat_desc_sw *desc;
@@ -1341,12 +1340,11 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
  */
 #define IOAT_TEST_SIZE 2000
 
-DECLARE_COMPLETION(test_completion);
 static void ioat_dma_test_callback(void *dma_async_param)
 {
-       printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
-               dma_async_param);
-       complete(&test_completion);
+       struct completion *cmp = dma_async_param;
+
+       complete(cmp);
 }
 
 /**
@@ -1363,6 +1361,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
        dma_addr_t dma_dest, dma_src;
        dma_cookie_t cookie;
        int err = 0;
+       struct completion cmp;
 
        src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
        if (!src)
@@ -1381,7 +1380,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
        dma_chan = container_of(device->common.channels.next,
                                struct dma_chan,
                                device_node);
-       if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
+       if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
                dev_err(&device->pdev->dev,
                        "selftest cannot allocate chan resource\n");
                err = -ENODEV;
@@ -1402,8 +1401,9 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
        }
 
        async_tx_ack(tx);
+       init_completion(&cmp);
        tx->callback = ioat_dma_test_callback;
-       tx->callback_param = (void *)0x8086;
+       tx->callback_param = &cmp;
        cookie = tx->tx_submit(tx);
        if (cookie < 0) {
                dev_err(&device->pdev->dev,
@@ -1413,7 +1413,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
        }
        device->common.device_issue_pending(dma_chan);
 
-       wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
+       wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
        if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
                                        != DMA_SUCCESS) {
index 6be3172622009ff67ec502b049f167d6ccebe1cd..ea5440dd10dc1ffb56dda5393d51f388e02ca87b 100644 (file)
@@ -24,7 +24,6 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/async_tx.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
@@ -116,7 +115,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
        }
 
        /* run dependent operations */
-       async_tx_run_dependencies(&desc->async_tx);
+       dma_run_dependencies(&desc->async_tx);
 
        return cookie;
 }
@@ -270,8 +269,6 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
                        break;
        }
 
-       BUG_ON(!seen_current);
-
        if (cookie > 0) {
                iop_chan->completed_cookie = cookie;
                pr_debug("\tcompleted cookie %d\n", cookie);
@@ -471,8 +468,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
  * greater than 2x the number slots needed to satisfy a device->max_xor
  * request.
  * */
-static int iop_adma_alloc_chan_resources(struct dma_chan *chan,
-                                        struct dma_client *client)
+static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
 {
        char *hw_desc;
        int idx;
@@ -866,7 +862,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
        dma_chan = container_of(device->common.channels.next,
                                struct dma_chan,
                                device_node);
-       if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
+       if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
                err = -ENODEV;
                goto out;
        }
@@ -964,7 +960,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
        dma_chan = container_of(device->common.channels.next,
                                struct dma_chan,
                                device_node);
-       if (iop_adma_alloc_chan_resources(dma_chan, NULL) < 1) {
+       if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
                err = -ENODEV;
                goto out;
        }
@@ -1115,26 +1111,13 @@ static int __devexit iop_adma_remove(struct platform_device *dev)
        struct iop_adma_device *device = platform_get_drvdata(dev);
        struct dma_chan *chan, *_chan;
        struct iop_adma_chan *iop_chan;
-       int i;
        struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
 
        dma_async_device_unregister(&device->common);
 
-       for (i = 0; i < 3; i++) {
-               unsigned int irq;
-               irq = platform_get_irq(dev, i);
-               free_irq(irq, device);
-       }
-
        dma_free_coherent(&dev->dev, plat_data->pool_size,
                        device->dma_desc_pool_virt, device->dma_desc_pool);
 
-       do {
-               struct resource *res;
-               res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-               release_mem_region(res->start, res->end - res->start);
-       } while (0);
-
        list_for_each_entry_safe(chan, _chan, &device->common.channels,
                                device_node) {
                iop_chan = to_iop_adma_chan(chan);
@@ -1255,7 +1238,6 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
        spin_lock_init(&iop_chan->lock);
        INIT_LIST_HEAD(&iop_chan->chain);
        INIT_LIST_HEAD(&iop_chan->all_slots);
-       INIT_RCU_HEAD(&iop_chan->common.rcu);
        iop_chan->common.device = dma_dev;
        list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
 
@@ -1431,16 +1413,12 @@ static int __init iop_adma_init (void)
        return platform_driver_register(&iop_adma_driver);
 }
 
-/* it's currently unsafe to unload this module */
-#if 0
 static void __exit iop_adma_exit (void)
 {
        platform_driver_unregister(&iop_adma_driver);
        return;
 }
 module_exit(iop_adma_exit);
-#endif
-
 module_init(iop_adma_init);
 
 MODULE_AUTHOR("Intel Corporation");
index bcda17426411ea6f4c311aa5b077a1e91e906b63..d35cbd1ff0b31e6635dd803eeae708cf75d68772 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/async_tx.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
@@ -340,7 +339,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
        }
 
        /* run dependent operations */
-       async_tx_run_dependencies(&desc->async_tx);
+       dma_run_dependencies(&desc->async_tx);
 
        return cookie;
 }
@@ -607,8 +606,7 @@ submit_done:
 }
 
 /* returns the number of allocated descriptors */
-static int mv_xor_alloc_chan_resources(struct dma_chan *chan,
-                                      struct dma_client *client)
+static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
 {
        char *hw_desc;
        int idx;
@@ -958,7 +956,7 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
        dma_chan = container_of(device->common.channels.next,
                                struct dma_chan,
                                device_node);
-       if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
+       if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
                err = -ENODEV;
                goto out;
        }
@@ -1053,7 +1051,7 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
        dma_chan = container_of(device->common.channels.next,
                                struct dma_chan,
                                device_node);
-       if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) {
+       if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
                err = -ENODEV;
                goto out;
        }
@@ -1221,7 +1219,6 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
        INIT_LIST_HEAD(&mv_chan->chain);
        INIT_LIST_HEAD(&mv_chan->completed_slots);
        INIT_LIST_HEAD(&mv_chan->all_slots);
-       INIT_RCU_HEAD(&mv_chan->common.rcu);
        mv_chan->common.device = dma_dev;
 
        list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
index 1e97916914adf9b596f7dc17795979cf47b3ceff..76bfe16c09b1e3e159a8016b479408a906c5bebd 100644 (file)
@@ -55,7 +55,6 @@ enum atmel_mci_state {
 
 struct atmel_mci_dma {
 #ifdef CONFIG_MMC_ATMELMCI_DMA
-       struct dma_client               client;
        struct dma_chan                 *chan;
        struct dma_async_tx_descriptor  *data_desc;
 #endif
@@ -593,10 +592,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
 
        /* If we don't have a channel, we can't do DMA */
        chan = host->dma.chan;
-       if (chan) {
-               dma_chan_get(chan);
+       if (chan)
                host->data_chan = chan;
-       }
 
        if (!chan)
                return -ENODEV;
@@ -1443,60 +1440,6 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_MMC_ATMELMCI_DMA
-
-static inline struct atmel_mci *
-dma_client_to_atmel_mci(struct dma_client *client)
-{
-       return container_of(client, struct atmel_mci, dma.client);
-}
-
-static enum dma_state_client atmci_dma_event(struct dma_client *client,
-               struct dma_chan *chan, enum dma_state state)
-{
-       struct atmel_mci        *host;
-       enum dma_state_client   ret = DMA_NAK;
-
-       host = dma_client_to_atmel_mci(client);
-
-       switch (state) {
-       case DMA_RESOURCE_AVAILABLE:
-               spin_lock_bh(&host->lock);
-               if (!host->dma.chan) {
-                       host->dma.chan = chan;
-                       ret = DMA_ACK;
-               }
-               spin_unlock_bh(&host->lock);
-
-               if (ret == DMA_ACK)
-                       dev_info(&host->pdev->dev,
-                                       "Using %s for DMA transfers\n",
-                                       chan->dev.bus_id);
-               break;
-
-       case DMA_RESOURCE_REMOVED:
-               spin_lock_bh(&host->lock);
-               if (host->dma.chan == chan) {
-                       host->dma.chan = NULL;
-                       ret = DMA_ACK;
-               }
-               spin_unlock_bh(&host->lock);
-
-               if (ret == DMA_ACK)
-                       dev_info(&host->pdev->dev,
-                                       "Lost %s, falling back to PIO\n",
-                                       chan->dev.bus_id);
-               break;
-
-       default:
-               break;
-       }
-
-
-       return ret;
-}
-#endif /* CONFIG_MMC_ATMELMCI_DMA */
-
 static int __init atmci_init_slot(struct atmel_mci *host,
                struct mci_slot_pdata *slot_data, unsigned int id,
                u32 sdc_reg)
@@ -1600,6 +1543,18 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,
        mmc_free_host(slot->mmc);
 }
 
+#ifdef CONFIG_MMC_ATMELMCI_DMA
+static bool filter(struct dma_chan *chan, void *slave)
+{
+       struct dw_dma_slave *dws = slave;
+
+       if (dws->dma_dev == chan->device->dev)
+               return true;
+       else
+               return false;
+}
+#endif
+
 static int __init atmci_probe(struct platform_device *pdev)
 {
        struct mci_platform_data        *pdata;
@@ -1652,22 +1607,20 @@ static int __init atmci_probe(struct platform_device *pdev)
                goto err_request_irq;
 
 #ifdef CONFIG_MMC_ATMELMCI_DMA
-       if (pdata->dma_slave) {
-               struct dma_slave *slave = pdata->dma_slave;
+       if (pdata->dma_slave.dma_dev) {
+               struct dw_dma_slave *dws = &pdata->dma_slave;
+               dma_cap_mask_t mask;
 
-               slave->tx_reg = regs->start + MCI_TDR;
-               slave->rx_reg = regs->start + MCI_RDR;
+               dws->tx_reg = regs->start + MCI_TDR;
+               dws->rx_reg = regs->start + MCI_RDR;
 
                /* Try to grab a DMA channel */
-               host->dma.client.event_callback = atmci_dma_event;
-               dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask);
-               host->dma.client.slave = slave;
-
-               dma_async_client_register(&host->dma.client);
-               dma_async_client_chan_request(&host->dma.client);
-       } else {
-               dev_notice(&pdev->dev, "DMA not available, using PIO\n");
+               dma_cap_zero(mask);
+               dma_cap_set(DMA_SLAVE, mask);
+               host->dma.chan = dma_request_channel(mask, filter, dws);
        }
+       if (!host->dma.chan)
+               dev_notice(&pdev->dev, "DMA not available, using PIO\n");
 #endif /* CONFIG_MMC_ATMELMCI_DMA */
 
        platform_set_drvdata(pdev, host);
@@ -1699,8 +1652,8 @@ static int __init atmci_probe(struct platform_device *pdev)
 
 err_init_slot:
 #ifdef CONFIG_MMC_ATMELMCI_DMA
-       if (pdata->dma_slave)
-               dma_async_client_unregister(&host->dma.client);
+       if (host->dma.chan)
+               dma_release_channel(host->dma.chan);
 #endif
        free_irq(irq, host);
 err_request_irq:
@@ -1731,8 +1684,8 @@ static int __exit atmci_remove(struct platform_device *pdev)
        clk_disable(host->mck);
 
 #ifdef CONFIG_MMC_ATMELMCI_DMA
-       if (host->dma.client.slave)
-               dma_async_client_unregister(&host->dma.client);
+       if (host->dma.chan)
+               dma_release_channel(host->dma.chan);
 #endif
 
        free_irq(platform_get_irq(pdev, 0), host);
@@ -1761,7 +1714,7 @@ static void __exit atmci_exit(void)
        platform_driver_unregister(&atmci_driver);
 }
 
-module_init(atmci_init);
+late_initcall(atmci_init); /* try to load after dma driver when built-in */
 module_exit(atmci_exit);
 
 MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver");
index 821369135369c52c0e141a10867e3233b655293f..7931133526c48dd57438e9bfb2cda2ef6e784824 100644 (file)
@@ -71,8 +71,7 @@ static void asp_choose_irq(struct parisc_device *dev, void *ctrl)
  */
 #define ASP_INTERRUPT_ADDR 0xf0800000
 
-int __init
-asp_init_chip(struct parisc_device *dev)
+static int __init asp_init_chip(struct parisc_device *dev)
 {
        struct gsc_irq gsc_irq;
        int ret;
index dcc1e9958d2f9bb0b0fd3625c43ed9eaf806bfd2..cd4dd7ed2c06b60fd471319fc6ef78fca8a91184 100644 (file)
@@ -555,7 +555,7 @@ static u32 hint_lookup[] = {
  * (Load Coherence Index) instruction.  The 8 bits used for the virtual
  * index are bits 12:19 of the value returned by LCI.
  */ 
-void CCIO_INLINE
+static void CCIO_INLINE
 ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
                   unsigned long hints)
 {
@@ -1578,8 +1578,6 @@ static int __init ccio_probe(struct parisc_device *dev)
 
        ioc_count++;
 
-       parisc_vmerge_boundary = IOVP_SIZE;
-       parisc_vmerge_max_size = BITS_PER_LONG * IOVP_SIZE;
        parisc_has_iommu();
        return 0;
 }
index 77cc8bfef8c9aca1bf9eb141db38fa3626588675..d539d9df88e7648cca17415ac6ce1a8ceffd2513 100644 (file)
@@ -287,7 +287,7 @@ DINO_PORT_OUT(b,  8, 3)
 DINO_PORT_OUT(w, 16, 2)
 DINO_PORT_OUT(l, 32, 0)
 
-struct pci_port_ops dino_port_ops = {
+static struct pci_port_ops dino_port_ops = {
        .inb    = dino_in8,
        .inw    = dino_in16,
        .inl    = dino_in32,
@@ -690,7 +690,7 @@ dino_fixup_bus(struct pci_bus *bus)
 }
 
 
-struct pci_bios_ops dino_bios_ops = {
+static struct pci_bios_ops dino_bios_ops = {
        .init           = dino_bios_init,
        .fixup_bus      = dino_fixup_bus
 };
index 65eee67aa2ae66d9ac9130c9ee7efb6fab896a74..13856415b4326aace5ceacec61962e94e59f0306 100644 (file)
@@ -29,7 +29,7 @@ struct hppb_card {
        struct hppb_card *next;
 };
 
-struct hppb_card hppb_card_head = {
+static struct hppb_card hppb_card_head = {
        .hpa = 0,
        .next = NULL,
 };
index bee510098ce86ce23963dbf442b084fa02400d94..e65727ca9fc070ed2233289a33498b403d3e229c 100644 (file)
@@ -107,7 +107,7 @@ lasi_init_irq(struct gsc_asic *this_lasi)
 
 #else
 
-void __init lasi_led_init(unsigned long lasi_hpa)
+static void __init lasi_led_init(unsigned long lasi_hpa)
 {
        unsigned long datareg;
 
@@ -163,8 +163,7 @@ static void lasi_power_off(void)
        gsc_writel(0x02, datareg);
 }
 
-int __init
-lasi_init_chip(struct parisc_device *dev)
+static int __init lasi_init_chip(struct parisc_device *dev)
 {
        extern void (*chassis_power_off)(void);
        struct gsc_asic *lasi;
index a28c8946deaa1602cc83483fcb8156846443e3e2..d8233de8c75d92584694ecfa45b5655880f02518 100644 (file)
@@ -824,7 +824,7 @@ lba_fixup_bus(struct pci_bus *bus)
 }
 
 
-struct pci_bios_ops lba_bios_ops = {
+static struct pci_bios_ops lba_bios_ops = {
        .init =         lba_bios_init,
        .fixup_bus =    lba_fixup_bus,
 };
index bc73b96346ff1be8b20130a88d9b9b8a7591e68a..3fac8f81d59d4df607efb7871e9bce0f595c182a 100644 (file)
@@ -561,7 +561,7 @@ typedef unsigned long space_t;
  * IOMMU uses little endian for the pdir.
  */
 
-void SBA_INLINE
+static void SBA_INLINE
 sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
                  unsigned long hint)
 {
@@ -1874,7 +1874,7 @@ static struct parisc_device_id sba_tbl[] = {
        { 0, }
 };
 
-int sba_driver_callback(struct parisc_device *);
+static int sba_driver_callback(struct parisc_device *);
 
 static struct parisc_driver sba_driver = {
        .name =         MODULE_NAME,
@@ -1887,8 +1887,7 @@ static struct parisc_driver sba_driver = {
 ** If so, initialize the chip and tell other partners in crime they
 ** have work to do.
 */
-int
-sba_driver_callback(struct parisc_device *dev)
+static int sba_driver_callback(struct parisc_device *dev)
 {
        struct sba_device *sba_dev;
        u32 func_class;
@@ -1979,8 +1978,6 @@ sba_driver_callback(struct parisc_device *dev)
        proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
 #endif
 
-       parisc_vmerge_boundary = IOVP_SIZE;
-       parisc_vmerge_max_size = IOVP_SIZE * BITS_PER_LONG;
        parisc_has_iommu();
        return 0;
 }
index 892a83bbe73d52d977e980ad5a18fca44914cd66..da9d5ad1353cb64056eb4f2c20190030d803fdbd 100644 (file)
@@ -68,8 +68,7 @@ wax_init_irq(struct gsc_asic *wax)
 //     gsc_writel(0xFFFFFFFF, base+0x2000); /* RS232-B on Wax */
 }
 
-int __init
-wax_init_chip(struct parisc_device *dev)
+static int __init wax_init_chip(struct parisc_device *dev)
 {
        struct gsc_asic *wax;
        struct parisc_device *parent;
index 346d633655e79d1841fe61df46c11a6ae8b39207..c6bfa6fe1a2a289dddc0748ebd660df195087724 100644 (file)
@@ -34,7 +34,8 @@ static int parisc_get_time(struct device *dev, struct rtc_time *tm)
 static int parisc_set_time(struct device *dev, struct rtc_time *tm)
 {
        struct parisc_rtc *p = dev_get_drvdata(dev);
-       unsigned long flags, ret;
+       unsigned long flags;
+       int ret;
 
        spin_lock_irqsave(&p->lock, flags);
        ret = set_rtc_time(tm);
index 0f50d4cc4360e65d3453704aa400448f99cb95a6..45f6297821bd6c7afee48e247d8a25b027bc7445 100644 (file)
@@ -59,9 +59,7 @@ enum async_tx_flags {
 };
 
 #ifdef CONFIG_DMA_ENGINE
-void async_tx_issue_pending_all(void);
-enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
-void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
+#define async_tx_issue_pending_all dma_issue_pending_all
 #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
 #include <asm/async_tx.h>
 #else
@@ -77,19 +75,6 @@ static inline void async_tx_issue_pending_all(void)
        do { } while (0);
 }
 
-static inline enum dma_status
-dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
-{
-       return DMA_SUCCESS;
-}
-
-static inline void
-async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
-       struct dma_chan *host_chan)
-{
-       do { } while (0);
-}
-
 static inline struct dma_chan *
 async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
        enum dma_transaction_type tx_type, struct page **dst, int dst_count,
index 2a2213eefd859ed1e6e15354c522d18ac5ae83da..2f1f95737acb5d7e9e6df044a7190363522038e6 100644 (file)
@@ -3,7 +3,7 @@
 
 #define ATMEL_MCI_MAX_NR_SLOTS 2
 
-struct dma_slave;
+#include <linux/dw_dmac.h>
 
 /**
  * struct mci_slot_pdata - board-specific per-slot configuration
@@ -28,11 +28,11 @@ struct mci_slot_pdata {
 
 /**
  * struct mci_platform_data - board-specific MMC/SDcard configuration
- * @dma_slave: DMA slave interface to use in data transfers, or NULL.
+ * @dma_slave: DMA slave interface to use in data transfers.
  * @slot: Per-slot configuration data.
  */
 struct mci_platform_data {
-       struct dma_slave        *dma_slave;
+       struct dw_dma_slave     dma_slave;
        struct mci_slot_pdata   slot[ATMEL_MCI_MAX_NR_SLOTS];
 };
 
index adb0b084eb5a413c601d4a8489e006e061906840..64dea2ab326c29a22a2069b02c00a46d39706604 100644 (file)
 #include <linux/rcupdate.h>
 #include <linux/dma-mapping.h>
 
-/**
- * enum dma_state - resource PNP/power management state
- * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
- * @DMA_RESOURCE_RESUME: DMA device returning to full power
- * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
- * @DMA_RESOURCE_REMOVED: DMA device removed from the system
- */
-enum dma_state {
-       DMA_RESOURCE_SUSPEND,
-       DMA_RESOURCE_RESUME,
-       DMA_RESOURCE_AVAILABLE,
-       DMA_RESOURCE_REMOVED,
-};
-
-/**
- * enum dma_state_client - state of the channel in the client
- * @DMA_ACK: client would like to use, or was using this channel
- * @DMA_DUP: client has already seen this channel, or is not using this channel
- * @DMA_NAK: client does not want to see any more channels
- */
-enum dma_state_client {
-       DMA_ACK,
-       DMA_DUP,
-       DMA_NAK,
-};
-
 /**
  * typedef dma_cookie_t - an opaque DMA cookie
  *
@@ -89,23 +63,13 @@ enum dma_transaction_type {
        DMA_MEMSET,
        DMA_MEMCPY_CRC32C,
        DMA_INTERRUPT,
+       DMA_PRIVATE,
        DMA_SLAVE,
 };
 
 /* last transaction type for creation of the capabilities mask */
 #define DMA_TX_TYPE_END (DMA_SLAVE + 1)
 
-/**
- * enum dma_slave_width - DMA slave register access width.
- * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
- * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
- * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
- */
-enum dma_slave_width {
-       DMA_SLAVE_WIDTH_8BIT,
-       DMA_SLAVE_WIDTH_16BIT,
-       DMA_SLAVE_WIDTH_32BIT,
-};
 
 /**
  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -131,32 +95,6 @@ enum dma_ctrl_flags {
  */
 typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
 
-/**
- * struct dma_slave - Information about a DMA slave
- * @dev: device acting as DMA slave
- * @dma_dev: required DMA master device. If non-NULL, the client can not be
- *     bound to other masters than this.
- * @tx_reg: physical address of data register used for
- *     memory-to-peripheral transfers
- * @rx_reg: physical address of data register used for
- *     peripheral-to-memory transfers
- * @reg_width: peripheral register width
- *
- * If dma_dev is non-NULL, the client can not be bound to other DMA
- * masters than the one corresponding to this device. The DMA master
- * driver may use this to determine if there is controller-specific
- * data wrapped around this struct. Drivers of platform code that sets
- * the dma_dev field must therefore make sure to use an appropriate
- * controller-specific dma slave structure wrapping this struct.
- */
-struct dma_slave {
-       struct device           *dev;
-       struct device           *dma_dev;
-       dma_addr_t              tx_reg;
-       dma_addr_t              rx_reg;
-       enum dma_slave_width    reg_width;
-};
-
 /**
  * struct dma_chan_percpu - the per-CPU part of struct dma_chan
  * @refcount: local_t used for open-coded "bigref" counting
@@ -165,7 +103,6 @@ struct dma_slave {
  */
 
 struct dma_chan_percpu {
-       local_t refcount;
        /* stats */
        unsigned long memcpy_count;
        unsigned long bytes_transferred;
@@ -176,13 +113,14 @@ struct dma_chan_percpu {
  * @device: ptr to the dma device who supplies this channel, always !%NULL
  * @cookie: last cookie value returned to client
  * @chan_id: channel ID for sysfs
- * @class_dev: class device for sysfs
+ * @dev: class device for sysfs
  * @refcount: kref, used in "bigref" slow-mode
  * @slow_ref: indicates that the DMA channel is free
  * @rcu: the DMA channel's RCU head
  * @device_node: used to add this to the device chan list
  * @local: per-cpu pointer to a struct dma_chan_percpu
  * @client-count: how many clients are using this channel
+ * @table_count: number of appearances in the mem-to-mem allocation table
  */
 struct dma_chan {
        struct dma_device *device;
@@ -190,73 +128,47 @@ struct dma_chan {
 
        /* sysfs */
        int chan_id;
-       struct device dev;
-
-       struct kref refcount;
-       int slow_ref;
-       struct rcu_head rcu;
+       struct dma_chan_dev *dev;
 
        struct list_head device_node;
        struct dma_chan_percpu *local;
        int client_count;
+       int table_count;
 };
 
-#define to_dma_chan(p) container_of(p, struct dma_chan, dev)
-
-void dma_chan_cleanup(struct kref *kref);
-
-static inline void dma_chan_get(struct dma_chan *chan)
-{
-       if (unlikely(chan->slow_ref))
-               kref_get(&chan->refcount);
-       else {
-               local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
-               put_cpu();
-       }
-}
+/**
+ * struct dma_chan_dev - relate sysfs device node to backing channel device
+ * @chan - driver channel device
+ * @device - sysfs device
+ * @dev_id - parent dma_device dev_id
+ * @idr_ref - reference count to gate release of dma_device dev_id
+ */
+struct dma_chan_dev {
+       struct dma_chan *chan;
+       struct device device;
+       int dev_id;
+       atomic_t *idr_ref;
+};
 
-static inline void dma_chan_put(struct dma_chan *chan)
+static inline const char *dma_chan_name(struct dma_chan *chan)
 {
-       if (unlikely(chan->slow_ref))
-               kref_put(&chan->refcount, dma_chan_cleanup);
-       else {
-               local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
-               put_cpu();
-       }
+       return dev_name(&chan->dev->device);
 }
 
-/*
- * typedef dma_event_callback - function pointer to a DMA event callback
- * For each channel added to the system this routine is called for each client.
- * If the client would like to use the channel it returns '1' to signal (ack)
- * the dmaengine core to take out a reference on the channel and its
- * corresponding device.  A client must not 'ack' an available channel more
- * than once.  When a channel is removed all clients are notified.  If a client
- * is using the channel it must 'ack' the removal.  A client must not 'ack' a
- * removed channel more than once.
- * @client - 'this' pointer for the client context
- * @chan - channel to be acted upon
- * @state - available or removed
- */
-struct dma_client;
-typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
-               struct dma_chan *chan, enum dma_state state);
+void dma_chan_cleanup(struct kref *kref);
 
 /**
- * struct dma_client - info on the entity making use of DMA services
- * @event_callback: func ptr to call when something happens
- * @cap_mask: only return channels that satisfy the requested capabilities
- *  a value of zero corresponds to any capability
- * @slave: data for preparing slave transfer. Must be non-NULL iff the
- *  DMA_SLAVE capability is requested.
- * @global_node: list_head for global dma_client_list
+ * typedef dma_filter_fn - callback filter for dma_request_channel
+ * @chan: channel to be reviewed
+ * @filter_param: opaque parameter passed through dma_request_channel
+ *
+ * When this optional parameter is specified in a call to dma_request_channel a
+ * suitable channel is passed to this routine for further dispositioning before
+ * being returned.  Where 'suitable' indicates a non-busy channel that
+ * satisfies the given capability mask.  It returns 'true' to indicate that the
+ * channel is suitable.
  */
-struct dma_client {
-       dma_event_callback      event_callback;
-       dma_cap_mask_t          cap_mask;
-       struct dma_slave        *slave;
-       struct list_head        global_node;
-};
+typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
 
 typedef void (*dma_async_tx_callback)(void *dma_async_param);
 /**
@@ -323,14 +235,10 @@ struct dma_device {
        dma_cap_mask_t  cap_mask;
        int max_xor;
 
-       struct kref refcount;
-       struct completion done;
-
        int dev_id;
        struct device *dev;
 
-       int (*device_alloc_chan_resources)(struct dma_chan *chan,
-                       struct dma_client *client);
+       int (*device_alloc_chan_resources)(struct dma_chan *chan);
        void (*device_free_chan_resources)(struct dma_chan *chan);
 
        struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
@@ -362,9 +270,8 @@ struct dma_device {
 
 /* --- public DMA engine API --- */
 
-void dma_async_client_register(struct dma_client *client);
-void dma_async_client_unregister(struct dma_client *client);
-void dma_async_client_chan_request(struct dma_client *client);
+void dmaengine_get(void);
+void dmaengine_put(void);
 dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
        void *dest, void *src, size_t len);
 dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -406,6 +313,12 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
        set_bit(tx_type, dstp->bits);
 }
 
+#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
+static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
+{
+       bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
+}
+
 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
 static inline int
 __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
@@ -475,11 +388,25 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
 }
 
 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
+#ifdef CONFIG_DMA_ENGINE
+enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
+#else
+static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+{
+       return DMA_SUCCESS;
+}
+#endif
 
 /* --- DMA device --- */
 
 int dma_async_device_register(struct dma_device *device);
 void dma_async_device_unregister(struct dma_device *device);
+void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+void dma_issue_pending_all(void);
+#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
+struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param);
+void dma_release_channel(struct dma_chan *chan);
 
 /* --- Helper iov-locking functions --- */
 
index 04d217b442bf86725c2cbe51efa7012cb07b1550..d797dde247f763392427cac3d9cad4b1ba5cc595 100644 (file)
@@ -21,15 +21,35 @@ struct dw_dma_platform_data {
        unsigned int    nr_channels;
 };
 
+/**
+ * enum dw_dma_slave_width - DMA slave register access width.
+ * @DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
+ * @DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
+ * @DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
+ */
+enum dw_dma_slave_width {
+       DW_DMA_SLAVE_WIDTH_8BIT,
+       DW_DMA_SLAVE_WIDTH_16BIT,
+       DW_DMA_SLAVE_WIDTH_32BIT,
+};
+
 /**
  * struct dw_dma_slave - Controller-specific information about a slave
- * @slave: Generic information about the slave
- * @ctl_lo: Platform-specific initializer for the CTL_LO register
+ *
+ * @dma_dev: required DMA master device
+ * @tx_reg: physical address of data register used for
+ *     memory-to-peripheral transfers
+ * @rx_reg: physical address of data register used for
+ *     peripheral-to-memory transfers
+ * @reg_width: peripheral register width
  * @cfg_hi: Platform-specific initializer for the CFG_HI register
  * @cfg_lo: Platform-specific initializer for the CFG_LO register
  */
 struct dw_dma_slave {
-       struct dma_slave        slave;
+       struct device           *dma_dev;
+       dma_addr_t              tx_reg;
+       dma_addr_t              rx_reg;
+       enum dw_dma_slave_width reg_width;
        u32                     cfg_hi;
        u32                     cfg_lo;
 };
@@ -54,9 +74,4 @@ struct dw_dma_slave {
 #define DWC_CFGL_HS_DST_POL    (1 << 18)       /* dst handshake active low */
 #define DWC_CFGL_HS_SRC_POL    (1 << 19)       /* src handshake active low */
 
-static inline struct dw_dma_slave *to_dw_dma_slave(struct dma_slave *slave)
-{
-       return container_of(slave, struct dw_dma_slave, slave);
-}
-
 #endif /* DW_DMAC_H */
index 114091be88721b1e9183a401aea93f4ec1bca6a1..f24556813375f0b54ec13066f22c10a40775010a 100644 (file)
@@ -1125,9 +1125,6 @@ struct softnet_data
        struct sk_buff          *completion_queue;
 
        struct napi_struct      backlog;
-#ifdef CONFIG_NET_DMA
-       struct dma_chan         *net_dma;
-#endif
 };
 
 DECLARE_PER_CPU(struct softnet_data,softnet_data);
index f28c6e064e8f9d7ba5ebbf9ba5b928b05236d15f..8ba8ce284eeb8d36fa83f146897e7482ca66db02 100644 (file)
 #include <linux/dmaengine.h>
 #include <linux/skbuff.h>
 
-static inline struct dma_chan *get_softnet_dma(void)
-{
-       struct dma_chan *chan;
-       rcu_read_lock();
-       chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma);
-       if (chan)
-               dma_chan_get(chan);
-       rcu_read_unlock();
-       return chan;
-}
-
 int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
                struct sk_buff *skb, int offset, struct iovec *to,
                size_t len, struct dma_pinned_list *pinned_list);
index ff7bc071991c3dd008f5d67078bfde1cefb46bf4..043f78c133c46ccbfe4a56bcbf92b35e57f92018 100644 (file)
@@ -506,6 +506,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
        else
                old = get_cred(&init_cred);
 
+       *new = *old;
        get_uid(new->user);
        get_group_info(new->group_info);
 
@@ -529,6 +530,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
 
 error:
        put_cred(new);
+       put_cred(old);
        return NULL;
 }
 EXPORT_SYMBOL(prepare_kernel_cred);
index bab8bcedd62eaed745eee9aba085fd437b4bd2cf..5f736f1ceeae0200ba77371d6373384e398233f1 100644 (file)
@@ -170,25 +170,6 @@ static DEFINE_SPINLOCK(ptype_lock);
 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
 static struct list_head ptype_all __read_mostly;       /* Taps */
 
-#ifdef CONFIG_NET_DMA
-struct net_dma {
-       struct dma_client client;
-       spinlock_t lock;
-       cpumask_t channel_mask;
-       struct dma_chan **channels;
-};
-
-static enum dma_state_client
-netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
-       enum dma_state state);
-
-static struct net_dma net_dma = {
-       .client = {
-               .event_callback = netdev_dma_event,
-       },
-};
-#endif
-
 /*
  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
  * semaphore.
@@ -2754,14 +2735,7 @@ out:
         * There may not be any more sk_buffs coming right now, so push
         * any pending DMA copies to hardware
         */
-       if (!cpus_empty(net_dma.channel_mask)) {
-               int chan_idx;
-               for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
-                       struct dma_chan *chan = net_dma.channels[chan_idx];
-                       if (chan)
-                               dma_async_memcpy_issue_pending(chan);
-               }
-       }
+       dma_issue_pending_all();
 #endif
 
        return;
@@ -4952,122 +4926,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-#ifdef CONFIG_NET_DMA
-/**
- * net_dma_rebalance - try to maintain one DMA channel per CPU
- * @net_dma: DMA client and associated data (lock, channels, channel_mask)
- *
- * This is called when the number of channels allocated to the net_dma client
- * changes.  The net_dma client tries to have one DMA channel per CPU.
- */
-
-static void net_dma_rebalance(struct net_dma *net_dma)
-{
-       unsigned int cpu, i, n, chan_idx;
-       struct dma_chan *chan;
-
-       if (cpus_empty(net_dma->channel_mask)) {
-               for_each_online_cpu(cpu)
-                       rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
-               return;
-       }
-
-       i = 0;
-       cpu = first_cpu(cpu_online_map);
-
-       for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
-               chan = net_dma->channels[chan_idx];
-
-               n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
-                  + (i < (num_online_cpus() %
-                       cpus_weight(net_dma->channel_mask)) ? 1 : 0));
-
-               while(n) {
-                       per_cpu(softnet_data, cpu).net_dma = chan;
-                       cpu = next_cpu(cpu, cpu_online_map);
-                       n--;
-               }
-               i++;
-       }
-}
-
-/**
- * netdev_dma_event - event callback for the net_dma_client
- * @client: should always be net_dma_client
- * @chan: DMA channel for the event
- * @state: DMA state to be handled
- */
-static enum dma_state_client
-netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
-       enum dma_state state)
-{
-       int i, found = 0, pos = -1;
-       struct net_dma *net_dma =
-               container_of(client, struct net_dma, client);
-       enum dma_state_client ack = DMA_DUP; /* default: take no action */
-
-       spin_lock(&net_dma->lock);
-       switch (state) {
-       case DMA_RESOURCE_AVAILABLE:
-               for (i = 0; i < nr_cpu_ids; i++)
-                       if (net_dma->channels[i] == chan) {
-                               found = 1;
-                               break;
-                       } else if (net_dma->channels[i] == NULL && pos < 0)
-                               pos = i;
-
-               if (!found && pos >= 0) {
-                       ack = DMA_ACK;
-                       net_dma->channels[pos] = chan;
-                       cpu_set(pos, net_dma->channel_mask);
-                       net_dma_rebalance(net_dma);
-               }
-               break;
-       case DMA_RESOURCE_REMOVED:
-               for (i = 0; i < nr_cpu_ids; i++)
-                       if (net_dma->channels[i] == chan) {
-                               found = 1;
-                               pos = i;
-                               break;
-                       }
-
-               if (found) {
-                       ack = DMA_ACK;
-                       cpu_clear(pos, net_dma->channel_mask);
-                       net_dma->channels[i] = NULL;
-                       net_dma_rebalance(net_dma);
-               }
-               break;
-       default:
-               break;
-       }
-       spin_unlock(&net_dma->lock);
-
-       return ack;
-}
-
-/**
- * netdev_dma_register - register the networking subsystem as a DMA client
- */
-static int __init netdev_dma_register(void)
-{
-       net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
-                                                               GFP_KERNEL);
-       if (unlikely(!net_dma.channels)) {
-               printk(KERN_NOTICE
-                               "netdev_dma: no memory for net_dma.channels\n");
-               return -ENOMEM;
-       }
-       spin_lock_init(&net_dma.lock);
-       dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
-       dma_async_client_register(&net_dma.client);
-       dma_async_client_chan_request(&net_dma.client);
-       return 0;
-}
-
-#else
-static int __init netdev_dma_register(void) { return -ENODEV; }
-#endif /* CONFIG_NET_DMA */
 
 /**
  *     netdev_increment_features - increment feature set by one
@@ -5287,14 +5145,15 @@ static int __init net_dev_init(void)
        if (register_pernet_device(&default_device_ops))
                goto out;
 
-       netdev_dma_register();
-
        open_softirq(NET_TX_SOFTIRQ, net_tx_action);
        open_softirq(NET_RX_SOFTIRQ, net_rx_action);
 
        hotcpu_notifier(dev_cpu_callback, 0);
        dst_init();
        dev_mcast_init();
+       #ifdef CONFIG_NET_DMA
+       dmaengine_get();
+       #endif
        rc = 0;
 out:
        return rc;
index bd6ff907d9e4bc18d548e2379d73f796e0fe5e83..ce572f9dff023529e6aaea9dcea9aa9476a9746d 100644 (file)
@@ -1313,7 +1313,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                if ((available < target) &&
                    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
                    !sysctl_tcp_low_latency &&
-                   __get_cpu_var(softnet_data).net_dma) {
+                   dma_find_channel(DMA_MEMCPY)) {
                        preempt_enable_no_resched();
                        tp->ucopy.pinned_list =
                                        dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1523,7 +1523,7 @@ do_prequeue:
                if (!(flags & MSG_TRUNC)) {
 #ifdef CONFIG_NET_DMA
                        if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-                               tp->ucopy.dma_chan = get_softnet_dma();
+                               tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
 
                        if (tp->ucopy.dma_chan) {
                                tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -1628,7 +1628,6 @@ skip_copy:
 
                /* Safe to free early-copied skbs now */
                __skb_queue_purge(&sk->sk_async_wait_queue);
-               dma_chan_put(tp->ucopy.dma_chan);
                tp->ucopy.dma_chan = NULL;
        }
        if (tp->ucopy.pinned_list) {
index 99b7ecbe88934a5f5cd4bc6b6a0a676677c7b12a..a6961d75c7ea6b744d699602bfbdf547694764fe 100644 (file)
@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
                return 0;
 
        if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-               tp->ucopy.dma_chan = get_softnet_dma();
+               tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
 
        if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
 
index 9d839fa9331e5845c39b22d07d9d3a93f39634d3..19d7b429a2625b8ecc3b7fc35392d3e0af99ffd2 100644 (file)
@@ -1594,7 +1594,7 @@ process:
 #ifdef CONFIG_NET_DMA
                struct tcp_sock *tp = tcp_sk(sk);
                if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-                       tp->ucopy.dma_chan = get_softnet_dma();
+                       tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
                if (tp->ucopy.dma_chan)
                        ret = tcp_v4_do_rcv(sk, skb);
                else
index 1297306d729cb0f231dba5bd0d150485c925d07a..e5b85d45bee837153cd6c07d94dc4a128ed0cb16 100644 (file)
@@ -1675,7 +1675,7 @@ process:
 #ifdef CONFIG_NET_DMA
                struct tcp_sock *tp = tcp_sk(sk);
                if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-                       tp->ucopy.dma_chan = get_softnet_dma();
+                       tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
                if (tp->ucopy.dma_chan)
                        ret = tcp_v6_do_rcv(sk, skb);
                else