]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
USB: Add MUSB and TUSB support
authorFelipe Balbi <felipe.balbi@nokia.com>
Thu, 24 Jul 2008 09:27:36 +0000 (12:27 +0300)
committerGreg Kroah-Hartman <gregkh@suse.de>
Thu, 14 Aug 2008 00:33:00 +0000 (17:33 -0700)
This patch adds support for MUSB and TUSB controllers
integrated into omap2430 and davinci. It also adds support
for external tusb6010 controller.

Cc: David Brownell <dbrownell@users.sourceforge.net>
Cc: Tony Lindgren <tony@atomide.com>
Signed-off-by: Felipe Balbi <felipe.balbi@nokia.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
30 files changed:
MAINTAINERS
drivers/Makefile
drivers/usb/Kconfig
drivers/usb/gadget/Kconfig
drivers/usb/musb/Kconfig [new file with mode: 0644]
drivers/usb/musb/Makefile [new file with mode: 0644]
drivers/usb/musb/cppi_dma.c [new file with mode: 0644]
drivers/usb/musb/cppi_dma.h [new file with mode: 0644]
drivers/usb/musb/davinci.c [new file with mode: 0644]
drivers/usb/musb/davinci.h [new file with mode: 0644]
drivers/usb/musb/musb_core.c [new file with mode: 0644]
drivers/usb/musb/musb_core.h [new file with mode: 0644]
drivers/usb/musb/musb_debug.h [new file with mode: 0644]
drivers/usb/musb/musb_dma.h [new file with mode: 0644]
drivers/usb/musb/musb_gadget.c [new file with mode: 0644]
drivers/usb/musb/musb_gadget.h [new file with mode: 0644]
drivers/usb/musb/musb_gadget_ep0.c [new file with mode: 0644]
drivers/usb/musb/musb_host.c [new file with mode: 0644]
drivers/usb/musb/musb_host.h [new file with mode: 0644]
drivers/usb/musb/musb_io.h [new file with mode: 0644]
drivers/usb/musb/musb_procfs.c [new file with mode: 0644]
drivers/usb/musb/musb_regs.h [new file with mode: 0644]
drivers/usb/musb/musb_virthub.c [new file with mode: 0644]
drivers/usb/musb/musbhsdma.c [new file with mode: 0644]
drivers/usb/musb/omap2430.c [new file with mode: 0644]
drivers/usb/musb/omap2430.h [new file with mode: 0644]
drivers/usb/musb/tusb6010.c [new file with mode: 0644]
drivers/usb/musb/tusb6010.h [new file with mode: 0644]
drivers/usb/musb/tusb6010_omap.c [new file with mode: 0644]
include/linux/usb/musb.h [new file with mode: 0644]

index 0c42dc25e0e735f80d91a825e64e37d2a173526d..773d6bc3a9a18bb6e5caeed1c08db610ae168d1d 100644 (file)
@@ -2928,6 +2928,12 @@ M:       jirislaby@gmail.com
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
+MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
+P:     Felipe Balbi
+M:     felipe.balbi@nokia.com
+L:     linux-usb@vger.kernel.org
+S:     Maintained
+
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
 P:     Andrew Gallatin
 M:     gallatin@myri.com
index a280ab3d0833fa29e8ae031bbb53bfdba37c4191..2735bde73475c5f840c2d52266fd47e54b730bcd 100644 (file)
@@ -57,6 +57,7 @@ obj-$(CONFIG_ATA_OVER_ETH)    += block/aoe/
 obj-$(CONFIG_PARIDE)           += block/paride/
 obj-$(CONFIG_TC)               += tc/
 obj-$(CONFIG_USB)              += usb/
+obj-$(CONFIG_USB_MUSB_HDRC)    += usb/musb/
 obj-$(CONFIG_PCI)              += usb/
 obj-$(CONFIG_USB_GADGET)       += usb/gadget/
 obj-$(CONFIG_SERIO)            += input/serio/
index 4f9b5ecfb7212dc16120ca8f24d99dbd8b705cae..bcefbddeba5099877981a1717cfd22edfd35e159 100644 (file)
@@ -99,6 +99,8 @@ source "drivers/usb/mon/Kconfig"
 
 source "drivers/usb/host/Kconfig"
 
+source "drivers/usb/musb/Kconfig"
+
 source "drivers/usb/class/Kconfig"
 
 source "drivers/usb/storage/Kconfig"
index c6a8c6b1116a45381d5ee4acb98bbda541d36ae8..acc95b2ac6f85ad24c9fc067bdc511a3e50a9589 100644 (file)
@@ -284,6 +284,16 @@ config USB_LH7A40X
        default USB_GADGET
        select USB_GADGET_SELECTED
 
+# built in ../musb along with host support
+config USB_GADGET_MUSB_HDRC
+       boolean "Inventra HDRC USB Peripheral (TI, ...)"
+       depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+       select USB_GADGET_DUALSPEED
+       select USB_GADGET_SELECTED
+       help
+         This OTG-capable silicon IP is used in dual designs including
+         the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
+
 config USB_GADGET_OMAP
        boolean "OMAP USB Device Controller"
        depends on ARCH_OMAP
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
new file mode 100644 (file)
index 0000000..faca433
--- /dev/null
@@ -0,0 +1,176 @@
+#
+# USB Dual Role (OTG-ready) Controller Drivers
+# for silicon based on Mentor Graphics INVENTRA designs
+#
+
+comment "Enable Host or Gadget support to see Inventra options"
+       depends on !USB && USB_GADGET=n
+
+# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
+config USB_MUSB_HDRC
+       depends on (USB || USB_GADGET) && HAVE_CLK
+       select TWL4030_USB if MACH_OMAP_3430SDP
+       tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
+       help
+         Say Y here if your system has a dual role high speed USB
+         controller based on the Mentor Graphics silicon IP.  Then
+         configure options to match your silicon and the board
+         it's being used with, including the USB peripheral role,
+         or the USB host role, or both.
+
+         Texas Instruments parts using this IP include DaVinci 644x,
+         OMAP 243x, OMAP 343x, and TUSB 6010.
+
+         If you do not know what this is, please say N.
+
+         To compile this driver as a module, choose M here; the
+         module will be called "musb_hdrc".
+
+config USB_MUSB_SOC
+       boolean
+       depends on USB_MUSB_HDRC
+       default y if ARCH_DAVINCI
+       default y if ARCH_OMAP2430
+       default y if ARCH_OMAP34XX
+       help
+         Use a static <asm/arch/hdrc_cnf.h> file to describe how the
+         controller is configured (endpoints, mechanisms, etc) on the
+         current iteration of a given system-on-chip.
+
+comment "DaVinci 644x USB support"
+       depends on USB_MUSB_HDRC && ARCH_DAVINCI
+
+comment "OMAP 243x high speed USB support"
+       depends on USB_MUSB_HDRC && ARCH_OMAP2430
+
+comment "OMAP 343x high speed USB support"
+       depends on USB_MUSB_HDRC && ARCH_OMAP34XX
+
+config USB_TUSB6010
+       boolean "TUSB 6010 support"
+       depends on USB_MUSB_HDRC && !USB_MUSB_SOC
+       default y
+       help
+         The TUSB 6010 chip, from Texas Instruments, connects a discrete
+         HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
+         (a high speed serial link).  It can use system-specific external
+         DMA controllers.
+
+choice
+       prompt "Driver Mode"
+       depends on USB_MUSB_HDRC
+       help
+         Dual-Role devices can support both host and peripheral roles,
+         as well as a the special "OTG Device" role which can switch
+         between both roles as needed.
+
+# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support;
+# OTG needs both roles, not just USB_MUSB_HOST.
+config USB_MUSB_HOST
+       depends on USB
+       bool "USB Host"
+       help
+         Say Y here if your system supports the USB host role.
+         If it has a USB "A" (rectangular), "Mini-A" (uncommon),
+         or "Mini-AB" connector, it supports the host role.
+         (With a "Mini-AB" connector, you should enable USB OTG.)
+
+# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral
+# side support ... OTG needs both roles
+config USB_MUSB_PERIPHERAL
+       depends on USB_GADGET
+       bool "USB Peripheral (gadget stack)"
+       select USB_GADGET_MUSB_HDRC
+       help
+         Say Y here if your system supports the USB peripheral role.
+         If it has a USB "B" (squarish), "Mini-B", or "Mini-AB"
+         connector, it supports the peripheral role.
+         (With a "Mini-AB" connector, you should enable USB OTG.)
+
+config USB_MUSB_OTG
+       depends on USB && USB_GADGET && PM && EXPERIMENTAL
+       bool "Both host and peripheral:  USB OTG (On The Go) Device"
+       select USB_GADGET_MUSB_HDRC
+       select USB_OTG
+       help
+          The most notable feature of USB OTG is support for a
+          "Dual-Role" device, which can act as either a device
+          or a host.  The initial role choice can be changed
+          later, when two dual-role devices talk to each other.
+
+          At this writing, the OTG support in this driver is incomplete,
+          omitting the mandatory HNP or SRP protocols.  However, some
+          of the cable based role switching works.  (That is, grounding
+          the ID pin switches the controller to host mode, while leaving
+          it floating leaves it in peripheral mode.)
+
+          Select this if your system has a Mini-AB connector, or
+          to simplify certain kinds of configuration.
+
+          To implement your OTG Targeted Peripherals List (TPL), enable
+          USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h"
+          to match your requirements.
+
+endchoice
+
+# enable peripheral support (including with OTG)
+config USB_GADGET_MUSB_HDRC
+       bool
+       depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+#      default y
+#      select USB_GADGET_DUALSPEED
+#      select USB_GADGET_SELECTED
+
+# enables host support (including with OTG)
+config USB_MUSB_HDRC_HCD
+       bool
+       depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG)
+       select USB_OTG if USB_GADGET_MUSB_HDRC
+       default y
+
+
+config MUSB_PIO_ONLY
+       bool 'Disable DMA (always use PIO)'
+       depends on USB_MUSB_HDRC
+       default y if USB_TUSB6010
+       help
+         All data is copied between memory and FIFO by the CPU.
+         DMA controllers are ignored.
+
+         Do not select 'n' here unless DMA support for your SOC or board
+         is unavailable (or unstable).  When DMA is enabled at compile time,
+         you can still disable it at run time using the "use_dma=n" module
+         parameter.
+
+config USB_INVENTRA_DMA
+       bool
+       depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+       default ARCH_OMAP2430 || ARCH_OMAP34XX
+       help
+         Enable DMA transfers using Mentor's engine.
+
+config USB_TI_CPPI_DMA
+       bool
+       depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+       default ARCH_DAVINCI
+       help
+         Enable DMA transfers when TI CPPI DMA is available.
+
+config USB_TUSB_OMAP_DMA
+       bool
+       depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+       depends on USB_TUSB6010
+       depends on ARCH_OMAP
+       default y
+       help
+         Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+
+config USB_MUSB_LOGLEVEL
+       depends on USB_MUSB_HDRC
+       int  'Logging Level (0 - none / 3 - annoying / ... )'
+       default 0
+       help
+         Set the logging level. 0 disables the debugging altogether,
+         although when USB_DEBUG is set the value is at least 1.
+         Starting at level 3, per-transfer (urb, usb_request, packet,
+         or dma transfer) tracing may kick in.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
new file mode 100644 (file)
index 0000000..88eb67d
--- /dev/null
@@ -0,0 +1,86 @@
+#
+# for USB OTG silicon based on Mentor Graphics INVENTRA designs
+#
+
+musb_hdrc-objs := musb_core.o
+
+obj-$(CONFIG_USB_MUSB_HDRC)    += musb_hdrc.o
+
+ifeq ($(CONFIG_ARCH_DAVINCI),y)
+       musb_hdrc-objs  += davinci.o
+endif
+
+ifeq ($(CONFIG_USB_TUSB6010),y)
+       musb_hdrc-objs  += tusb6010.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP2430),y)
+       musb_hdrc-objs  += omap2430.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP3430),y)
+       musb_hdrc-objs  += omap2430.o
+endif
+
+ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
+       musb_hdrc-objs          += musb_gadget_ep0.o musb_gadget.o
+endif
+
+ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
+       musb_hdrc-objs          += musb_virthub.o musb_host.o
+endif
+
+# the kconfig must guarantee that only one of the
+# possible I/O schemes will be enabled at a time ...
+# PIO only, or DMA (several potential schemes).
+# though PIO is always there to back up DMA, and for ep0
+
+ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
+
+  ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
+    musb_hdrc-objs             += musbhsdma.o
+
+  else
+    ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
+      musb_hdrc-objs           += cppi_dma.o
+
+    else
+      ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
+        musb_hdrc-objs         += tusb6010_omap.o
+
+      endif
+    endif
+  endif
+endif
+
+
+################################################################################
+
+# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
+
+ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
+       EXTRA_CFLAGS += -DMUSB_AHB_ID
+endif
+
+# Debugging
+
+MUSB_DEBUG:=$(CONFIG_USB_MUSB_LOGLEVEL)
+
+ifeq ("$(strip $(MUSB_DEBUG))","")
+    ifdef CONFIG_USB_DEBUG
+       MUSB_DEBUG:=1
+    else
+       MUSB_DEBUG:=0
+    endif
+endif
+
+ifneq ($(MUSB_DEBUG),0)
+    EXTRA_CFLAGS += -DDEBUG
+
+    ifeq ($(CONFIG_PROC_FS),y)
+       musb_hdrc-objs          += musb_procfs.o
+    endif
+
+endif
+
+EXTRA_CFLAGS += -DMUSB_DEBUG=$(MUSB_DEBUG)
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
new file mode 100644 (file)
index 0000000..5ad6d08
--- /dev/null
@@ -0,0 +1,1540 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file implements a DMA  interface using TI's CPPI DMA.
+ * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
+ * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
+ */
+
+#include <linux/usb.h>
+
+#include "musb_core.h"
+#include "cppi_dma.h"
+
+
+/* CPPI DMA status 7-mar-2006:
+ *
+ * - See musb_{host,gadget}.c for more info
+ *
+ * - Correct RX DMA generally forces the engine into irq-per-packet mode,
+ *   which can easily saturate the CPU under non-mass-storage loads.
+ *
+ * NOTES 24-aug-2006 (2.6.18-rc4):
+ *
+ * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
+ *   evidently after the 1 byte packet was received and acked, the queue
+ *   of BDs got garbaged so it wouldn't empty the fifo.  (rxcsr 0x2003,
+ *   and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
+ *   004001ff 00000001 .. 8feff860)  Host was just getting NAKed on tx
+ *   of its next (512 byte) packet.  IRQ issues?
+ *
+ * REVISIT:  the "transfer DMA" glue between CPPI and USB fifos will
+ * evidently also directly update the RX and TX CSRs ... so audit all
+ * host and peripheral side DMA code to avoid CSR access after DMA has
+ * been started.
+ */
+
+/* REVISIT now we can avoid preallocating these descriptors; or
+ * more simply, switch to a global freelist not per-channel ones.
+ * Note: at full speed, 64 descriptors == 4K bulk data.
+ */
+#define NUM_TXCHAN_BD       64
+#define NUM_RXCHAN_BD       64
+
+static inline void cpu_drain_writebuffer(void)
+{
+       wmb();
+#ifdef CONFIG_CPU_ARM926T
+       /* REVISIT this "should not be needed",
+        * but lack of it sure seemed to hurt ...
+        */
+       asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
+#endif
+}
+
+static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
+{
+       struct cppi_descriptor  *bd = c->freelist;
+
+       if (bd)
+               c->freelist = bd->next;
+       return bd;
+}
+
+static inline void
+cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
+{
+       if (!bd)
+               return;
+       bd->next = c->freelist;
+       c->freelist = bd;
+}
+
+/*
+ *  Start DMA controller
+ *
+ *  Initialize the DMA controller as necessary.
+ */
+
+/* zero out entire rx state RAM entry for the channel */
+static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
+{
+       musb_writel(&rx->rx_skipbytes, 0, 0);
+       musb_writel(&rx->rx_head, 0, 0);
+       musb_writel(&rx->rx_sop, 0, 0);
+       musb_writel(&rx->rx_current, 0, 0);
+       musb_writel(&rx->rx_buf_current, 0, 0);
+       musb_writel(&rx->rx_len_len, 0, 0);
+       musb_writel(&rx->rx_cnt_cnt, 0, 0);
+}
+
+/* zero out entire tx state RAM entry for the channel */
+static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
+{
+       musb_writel(&tx->tx_head, 0, 0);
+       musb_writel(&tx->tx_buf, 0, 0);
+       musb_writel(&tx->tx_current, 0, 0);
+       musb_writel(&tx->tx_buf_current, 0, 0);
+       musb_writel(&tx->tx_info, 0, 0);
+       musb_writel(&tx->tx_rem_len, 0, 0);
+       /* musb_writel(&tx->tx_dummy, 0, 0); */
+       musb_writel(&tx->tx_complete, 0, ptr);
+}
+
+static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+{
+       int     j;
+
+       /* initialize channel fields */
+       c->head = NULL;
+       c->tail = NULL;
+       c->last_processed = NULL;
+       c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+       c->controller = cppi;
+       c->is_rndis = 0;
+       c->freelist = NULL;
+
+       /* build the BD Free list for the channel */
+       for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
+               struct cppi_descriptor  *bd;
+               dma_addr_t              dma;
+
+               bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
+               bd->dma = dma;
+               cppi_bd_free(c, bd);
+       }
+}
+
+static int cppi_channel_abort(struct dma_channel *);
+
+static void cppi_pool_free(struct cppi_channel *c)
+{
+       struct cppi             *cppi = c->controller;
+       struct cppi_descriptor  *bd;
+
+       (void) cppi_channel_abort(&c->channel);
+       c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+       c->controller = NULL;
+
+       /* free all its bds */
+       bd = c->last_processed;
+       do {
+               if (bd)
+                       dma_pool_free(cppi->pool, bd, bd->dma);
+               bd = cppi_bd_alloc(c);
+       } while (bd);
+       c->last_processed = NULL;
+}
+
+static int __init cppi_controller_start(struct dma_controller *c)
+{
+       struct cppi     *controller;
+       void __iomem    *tibase;
+       int             i;
+
+       controller = container_of(c, struct cppi, controller);
+
+       /* do whatever is necessary to start controller */
+       for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+               controller->tx[i].transmit = true;
+               controller->tx[i].index = i;
+       }
+       for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+               controller->rx[i].transmit = false;
+               controller->rx[i].index = i;
+       }
+
+       /* setup BD list on a per channel basis */
+       for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
+               cppi_pool_init(controller, controller->tx + i);
+       for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+               cppi_pool_init(controller, controller->rx + i);
+
+       tibase =  controller->tibase;
+       INIT_LIST_HEAD(&controller->tx_complete);
+
+       /* initialise tx/rx channel head pointers to zero */
+       for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+               struct cppi_channel     *tx_ch = controller->tx + i;
+               struct cppi_tx_stateram __iomem *tx;
+
+               INIT_LIST_HEAD(&tx_ch->tx_complete);
+
+               tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
+               tx_ch->state_ram = tx;
+               cppi_reset_tx(tx, 0);
+       }
+       for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+               struct cppi_channel     *rx_ch = controller->rx + i;
+               struct cppi_rx_stateram __iomem *rx;
+
+               INIT_LIST_HEAD(&rx_ch->tx_complete);
+
+               rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
+               rx_ch->state_ram = rx;
+               cppi_reset_rx(rx);
+       }
+
+       /* enable individual cppi channels */
+       musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+                       DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+       musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
+                       DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+       /* enable tx/rx CPPI control */
+       musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+       musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+
+       /* disable RNDIS mode, also host rx RNDIS autorequest */
+       musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
+       musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
+
+       return 0;
+}
+
+/*
+ *  Stop DMA controller
+ *
+ *  De-Init the DMA controller as necessary.
+ */
+
+static int cppi_controller_stop(struct dma_controller *c)
+{
+       struct cppi             *controller;
+       void __iomem            *tibase;
+       int                     i;
+
+       controller = container_of(c, struct cppi, controller);
+
+       tibase = controller->tibase;
+       /* DISABLE INDIVIDUAL CHANNEL Interrupts */
+       musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+                       DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+       musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
+                       DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+       DBG(1, "Tearing down RX and TX Channels\n");
+       for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+               /* FIXME restructure of txdma to use bds like rxdma */
+               controller->tx[i].last_processed = NULL;
+               cppi_pool_free(controller->tx + i);
+       }
+       for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+               cppi_pool_free(controller->rx + i);
+
+       /* in Tx Case proper teardown is supported. We resort to disabling
+        * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
+        * complete TX CPPI cannot be disabled.
+        */
+       /*disable tx/rx cppi */
+       musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+       musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+
+       return 0;
+}
+
+/* While dma channel is allocated, we only want the core irqs active
+ * for fault reports, otherwise we'd get irqs that we don't care about.
+ * Except for TX irqs, where dma done != fifo empty and reusable ...
+ *
+ * NOTE: docs don't say either way, but irq masking **enables** irqs.
+ *
+ * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
+ */
+static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
+{
+       musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
+}
+
+static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
+{
+       musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
+}
+
+
+/*
+ * Allocate a CPPI Channel for DMA.  With CPPI, channels are bound to
+ * each transfer direction of a non-control endpoint, so allocating
+ * (and deallocating) is mostly a way to notice bad housekeeping on
+ * the software side.  We assume the irqs are always active.
+ */
+static struct dma_channel *
+cppi_channel_allocate(struct dma_controller *c,
+               struct musb_hw_ep *ep, u8 transmit)
+{
+       struct cppi             *controller;
+       u8                      index;
+       struct cppi_channel     *cppi_ch;
+       void __iomem            *tibase;
+
+       controller = container_of(c, struct cppi, controller);
+       tibase = controller->tibase;
+
+       /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
+       index = ep->epnum - 1;
+
+       /* return the corresponding CPPI Channel Handle, and
+        * probably disable the non-CPPI irq until we need it.
+        */
+       if (transmit) {
+               if (index >= ARRAY_SIZE(controller->tx)) {
+                       DBG(1, "no %cX%d CPPI channel\n", 'T', index);
+                       return NULL;
+               }
+               cppi_ch = controller->tx + index;
+       } else {
+               if (index >= ARRAY_SIZE(controller->rx)) {
+                       DBG(1, "no %cX%d CPPI channel\n", 'R', index);
+                       return NULL;
+               }
+               cppi_ch = controller->rx + index;
+               core_rxirq_disable(tibase, ep->epnum);
+       }
+
+       /* REVISIT make this an error later once the same driver code works
+        * with the other DMA engine too
+        */
+       if (cppi_ch->hw_ep)
+               DBG(1, "re-allocating DMA%d %cX channel %p\n",
+                               index, transmit ? 'T' : 'R', cppi_ch);
+       cppi_ch->hw_ep = ep;
+       cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+       DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
+       return &cppi_ch->channel;
+}
+
+/* Release a CPPI Channel.  */
+static void cppi_channel_release(struct dma_channel *channel)
+{
+       struct cppi_channel     *c;
+       void __iomem            *tibase;
+
+       /* REVISIT:  for paranoia, check state and abort if needed... */
+
+       c = container_of(channel, struct cppi_channel, channel);
+       tibase = c->controller->tibase;
+       if (!c->hw_ep)
+               DBG(1, "releasing idle DMA channel %p\n", c);
+       else if (!c->transmit)
+               core_rxirq_enable(tibase, c->index + 1);
+
+       /* for now, leave its cppi IRQ enabled (we won't trigger it) */
+       c->hw_ep = NULL;
+       channel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
+{
+       void __iomem                    *base = c->controller->mregs;
+       struct cppi_rx_stateram __iomem *rx = c->state_ram;
+
+       musb_ep_select(base, c->index + 1);
+
+       DBG(level, "RX DMA%d%s: %d left, csr %04x, "
+                       "%08x H%08x S%08x C%08x, "
+                       "B%08x L%08x %08x .. %08x"
+                       "\n",
+               c->index, tag,
+               musb_readl(c->controller->tibase,
+                       DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
+               musb_readw(c->hw_ep->regs, MUSB_RXCSR),
+
+               musb_readl(&rx->rx_skipbytes, 0),
+               musb_readl(&rx->rx_head, 0),
+               musb_readl(&rx->rx_sop, 0),
+               musb_readl(&rx->rx_current, 0),
+
+               musb_readl(&rx->rx_buf_current, 0),
+               musb_readl(&rx->rx_len_len, 0),
+               musb_readl(&rx->rx_cnt_cnt, 0),
+               musb_readl(&rx->rx_complete, 0)
+               );
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
+{
+       void __iomem                    *base = c->controller->mregs;
+       struct cppi_tx_stateram __iomem *tx = c->state_ram;
+
+       musb_ep_select(base, c->index + 1);
+
+       DBG(level, "TX DMA%d%s: csr %04x, "
+                       "H%08x S%08x C%08x %08x, "
+                       "F%08x L%08x .. %08x"
+                       "\n",
+               c->index, tag,
+               musb_readw(c->hw_ep->regs, MUSB_TXCSR),
+
+               musb_readl(&tx->tx_head, 0),
+               musb_readl(&tx->tx_buf, 0),
+               musb_readl(&tx->tx_current, 0),
+               musb_readl(&tx->tx_buf_current, 0),
+
+               musb_readl(&tx->tx_info, 0),
+               musb_readl(&tx->tx_rem_len, 0),
+               /* dummy/unused word 6 */
+               musb_readl(&tx->tx_complete, 0)
+               );
+}
+
+/* Context: controller irqlocked */
+static inline void
+cppi_rndis_update(struct cppi_channel *c, int is_rx,
+               void __iomem *tibase, int is_rndis)
+{
+       /* we may need to change the rndis flag for this cppi channel */
+       if (c->is_rndis != is_rndis) {
+               u32     value = musb_readl(tibase, DAVINCI_RNDIS_REG);
+               u32     temp = 1 << (c->index);
+
+               if (is_rx)
+                       temp <<= 16;
+               if (is_rndis)
+                       value |= temp;
+               else
+                       value &= ~temp;
+               musb_writel(tibase, DAVINCI_RNDIS_REG, value);
+               c->is_rndis = is_rndis;
+       }
+}
+
+static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
+{
+       pr_debug("RXBD/%s %08x: "
+                       "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
+                       tag, bd->dma,
+                       bd->hw_next, bd->hw_bufp, bd->hw_off_len,
+                       bd->hw_options);
+}
+
+static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
+{
+#if MUSB_DEBUG > 0
+       struct cppi_descriptor  *bd;
+
+       if (!_dbg_level(level))
+               return;
+       cppi_dump_rx(level, rx, tag);
+       if (rx->last_processed)
+               cppi_dump_rxbd("last", rx->last_processed);
+       for (bd = rx->head; bd; bd = bd->next)
+               cppi_dump_rxbd("active", bd);
+#endif
+}
+
+
+/* NOTE:  DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
+ * so we won't ever use it (see "CPPI RX Woes" below).
+ */
+static inline int cppi_autoreq_update(struct cppi_channel *rx,
+               void __iomem *tibase, int onepacket, unsigned n_bds)
+{
+       u32     val;
+
+#ifdef RNDIS_RX_IS_USABLE
+       u32     tmp;
+       /* assert(is_host_active(musb)) */
+
+       /* start from "AutoReq never" */
+       tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+       val = tmp & ~((0x3) << (rx->index * 2));
+
+       /* HCD arranged reqpkt for packet #1.  we arrange int
+        * for all but the last one, maybe in two segments.
+        */
+       if (!onepacket) {
+#if 0
+               /* use two segments, autoreq "all" then the last "never" */
+               val |= ((0x3) << (rx->index * 2));
+               n_bds--;
+#else
+               /* one segment, autoreq "all-but-last" */
+               val |= ((0x1) << (rx->index * 2));
+#endif
+       }
+
+       if (val != tmp) {
+               int n = 100;
+
+               /* make sure that autoreq is updated before continuing */
+               musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
+               do {
+                       tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+                       if (tmp == val)
+                               break;
+                       cpu_relax();
+               } while (n-- > 0);
+       }
+#endif
+
+       /* REQPKT is turned off after each segment */
+       if (n_bds && rx->channel.actual_len) {
+               void __iomem    *regs = rx->hw_ep->regs;
+
+               val = musb_readw(regs, MUSB_RXCSR);
+               if (!(val & MUSB_RXCSR_H_REQPKT)) {
+                       val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
+                       musb_writew(regs, MUSB_RXCSR, val);
+                       /* flush writebufer */
+                       val = musb_readw(regs, MUSB_RXCSR);
+               }
+       }
+       return n_bds;
+}
+
+
+/* Buffer enqueuing Logic:
+ *
+ *  - RX builds new queues each time, to help handle routine "early
+ *    termination" cases (faults, including errors and short reads)
+ *    more correctly.
+ *
+ *  - for now, TX reuses the same queue of BDs every time
+ *
+ * REVISIT long term, we want a normal dynamic model.
+ * ... the goal will be to append to the
+ * existing queue, processing completed "dma buffers" (segments) on the fly.
+ *
+ * Otherwise we force an IRQ latency between requests, which slows us a lot
+ * (especially in "transparent" dma).  Unfortunately that model seems to be
+ * inherent in the DMA model from the Mentor code, except in the rare case
+ * of transfers big enough (~128+ KB) that we could append "middle" segments
+ * in the TX paths.  (RX can't do this, see below.)
+ *
+ * That's true even in the CPPI- friendly iso case, where most urbs have
+ * several small segments provided in a group and where the "packet at a time"
+ * "transparent" DMA model is always correct, even on the RX side.
+ */
+
+/*
+ * CPPI TX:
+ * ========
+ * TX is a lot more reasonable than RX; it doesn't need to run in
+ * irq-per-packet mode very often.  RNDIS mode seems to behave too
+ * (except how it handles the exactly-N-packets case).  Building a
+ * txdma queue with multiple requests (urb or usb_request) looks
+ * like it would work ... but fault handling would need much testing.
+ *
+ * The main issue with TX mode RNDIS relates to transfer lengths that
+ * are an exact multiple of the packet length.  It appears that there's
+ * a hiccup in that case (maybe the DMA completes before the ZLP gets
+ * written?) boiling down to not being able to rely on CPPI writing any
+ * terminating zero length packet before the next transfer is written.
+ * So that's punted to PIO; better yet, gadget drivers can avoid it.
+ *
+ * Plus, there's allegedly an undocumented constraint that rndis transfer
+ * length be a multiple of 64 bytes ... but the chip doesn't act that
+ * way, and we really don't _want_ that behavior anyway.
+ *
+ * On TX, "transparent" mode works ... although experiments have shown
+ * problems trying to use the SOP/EOP bits in different USB packets.
+ *
+ * REVISIT try to handle terminating zero length packets using CPPI
+ * instead of doing it by PIO after an IRQ.  (Meanwhile, make Ethernet
+ * links avoid that issue by forcing them to avoid zlps.)
+ */
+static void
+cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
+{
+       unsigned                maxpacket = tx->maxpacket;
+       dma_addr_t              addr = tx->buf_dma + tx->offset;
+       size_t                  length = tx->buf_len - tx->offset;
+       struct cppi_descriptor  *bd;
+       unsigned                n_bds;
+       unsigned                i;
+       struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
+       int                     rndis;
+
+       /* TX can use the CPPI "rndis" mode, where we can probably fit this
+        * transfer in one BD and one IRQ.  The only time we would NOT want
+        * to use it is when hardware constraints prevent it, or if we'd
+        * trigger the "send a ZLP?" confusion.
+        */
+       rndis = (maxpacket & 0x3f) == 0
+               && length < 0xffff
+               && (length % maxpacket) != 0;
+
+       if (rndis) {
+               maxpacket = length;
+               n_bds = 1;
+       } else {
+               n_bds = length / maxpacket;
+               if (!length || (length % maxpacket))
+                       n_bds++;
+               n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
+               length = min(n_bds * maxpacket, length);
+       }
+
+       DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
+                       tx->index,
+                       maxpacket,
+                       rndis ? "rndis" : "transparent",
+                       n_bds,
+                       addr, length);
+
+       cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
+
+       /* assuming here that channel_program is called during
+        * transfer initiation ... current code maintains state
+        * for one outstanding request only (no queues, not even
+        * the implicit ones of an iso urb).
+        */
+
+       bd = tx->freelist;
+       tx->head = bd;
+       tx->last_processed = NULL;
+
+       /* FIXME use BD pool like RX side does, and just queue
+        * the minimum number for this request.
+        */
+
+       /* Prepare queue of BDs first, then hand it to hardware.
+        * All BDs except maybe the last should be of full packet
+        * size; for RNDIS there _is_ only that last packet.
+        */
+       for (i = 0; i < n_bds; ) {
+               if (++i < n_bds && bd->next)
+                       bd->hw_next = bd->next->dma;
+               else
+                       bd->hw_next = 0;
+
+               bd->hw_bufp = tx->buf_dma + tx->offset;
+
+               /* FIXME set EOP only on the last packet,
+                * SOP only on the first ... avoid IRQs
+                */
+               if ((tx->offset + maxpacket) <= tx->buf_len) {
+                       tx->offset += maxpacket;
+                       bd->hw_off_len = maxpacket;
+                       bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+                               | CPPI_OWN_SET | maxpacket;
+               } else {
+                       /* only this one may be a partial USB Packet */
+                       u32             partial_len;
+
+                       partial_len = tx->buf_len - tx->offset;
+                       tx->offset = tx->buf_len;
+                       bd->hw_off_len = partial_len;
+
+                       bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+                               | CPPI_OWN_SET | partial_len;
+                       if (partial_len == 0)
+                               bd->hw_options |= CPPI_ZERO_SET;
+               }
+
+               DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
+                               bd, bd->hw_next, bd->hw_bufp,
+                               bd->hw_off_len, bd->hw_options);
+
+               /* update the last BD enqueued to the list */
+               tx->tail = bd;
+               bd = bd->next;
+       }
+
+       /* BDs live in DMA-coherent memory, but writes might be pending */
+       cpu_drain_writebuffer();
+
+       /* Write to the HeadPtr in state RAM to trigger */
+       musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
+
+       cppi_dump_tx(5, tx, "/S");
+}
+
+/*
+ * CPPI RX Woes:
+ * =============
+ * Consider a 1KB bulk RX buffer in two scenarios:  (a) it's fed two 300 byte
+ * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
+ * (Full speed transfers have similar scenarios.)
+ *
+ * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
+ * and the next packet goes into a buffer that's queued later; while (b) fills
+ * the buffer with 1024 bytes.  How to do that with CPPI?
+ *
+ * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
+ *   (b) loses **BADLY** because nothing (!) happens when that second packet
+ *   fills the buffer, much less when a third one arrives.  (Which makes this
+ *   not a "true" RNDIS mode.  In the RNDIS protocol short-packet termination
+ *   is optional, and it's fine if peripherals -- not hosts! -- pad messages
+ *   out to end-of-buffer.  Standard PCI host controller DMA descriptors
+ *   implement that mode by default ... which is no accident.)
+ *
+ * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
+ *   converse problems:  (b) is handled right, but (a) loses badly.  CPPI RX
+ *   ignores SOP/EOP markings and processes both of those BDs; so both packets
+ *   are loaded into the buffer (with a 212 byte gap between them), and the next
+ *   buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
+ *   are intended as outputs for RX queues, not inputs...)
+ *
+ * - A variant of "transparent" mode -- one BD at a time -- is the only way to
+ *   reliably make both cases work, with software handling both cases correctly
+ *   and at the significant penalty of needing an IRQ per packet.  (The lack of
+ *   I/O overlap can be slightly ameliorated by enabling double buffering.)
+ *
+ * So how to get rid of IRQ-per-packet?  The transparent multi-BD case could
+ * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
+ * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
+ * with guaranteed driver level fault recovery and scrubbing out what's left
+ * of that garbaged datastream.
+ *
+ * But there seems to be no way to identify the cases where CPPI RNDIS mode
+ * is appropriate -- which do NOT include RNDIS host drivers, but do include
+ * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
+ * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
+ * that applies best on the peripheral side (and which could fail rudely).
+ *
+ * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
+ * cases other than mass storage class.  Otherwise we're correct but slow,
+ * since CPPI penalizes our need for a "true RNDIS" default mode.
+ */
+
+
+/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
+ *
+ * IFF
+ *  (a)        peripheral mode ... since rndis peripherals could pad their
+ *     writes to hosts, causing i/o failure; or we'd have to cope with
+ *     a largely unknowable variety of host side protocol variants
+ *  (b)        and short reads are NOT errors ... since full reads would
+ *     cause those same i/o failures
+ *  (c)        and read length is
+ *     - less than 64KB (max per cppi descriptor)
+ *     - not a multiple of 4096 (g_zero default, full reads typical)
+ *     - N (>1) packets long, ditto (full reads not EXPECTED)
+ * THEN
+ *   try rx rndis mode
+ *
+ * Cost of heuristic failing:  RXDMA wedges at the end of transfers that
+ * fill out the whole buffer.  Buggy host side usb network drivers could
+ * trigger that, but "in the field" such bugs seem to be all but unknown.
+ *
+ * So this module parameter lets the heuristic be disabled.  When using
+ * gadgetfs, the heuristic will probably need to be disabled.
+ */
+static int cppi_rx_rndis = 1;
+
+module_param(cppi_rx_rndis, bool, 0);
+MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
+
+
+/**
+ * cppi_next_rx_segment - dma read for the next chunk of a buffer
+ * @musb: the controller
+ * @rx: dma channel
+ * @onepacket: true unless caller treats short reads as errors, and
+ *     performs fault recovery above usbcore.
+ * Context: controller irqlocked
+ *
+ * See above notes about why we can't use multi-BD RX queues except in
+ * rare cases (mass storage class), and can never use the hardware "rndis"
+ * mode (since it's not a "true" RNDIS mode) with complete safety..
+ *
+ * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
+ * code to recover from corrupted datastreams after each short transfer.
+ */
+static void
+cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
+{
+       unsigned                maxpacket = rx->maxpacket;
+       dma_addr_t              addr = rx->buf_dma + rx->offset;
+       size_t                  length = rx->buf_len - rx->offset;
+       struct cppi_descriptor  *bd, *tail;
+       unsigned                n_bds;
+       unsigned                i;
+       void __iomem            *tibase = musb->ctrl_base;
+       int                     is_rndis = 0;
+       struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
+
+       if (onepacket) {
+               /* almost every USB driver, host or peripheral side */
+               n_bds = 1;
+
+               /* maybe apply the heuristic above */
+               if (cppi_rx_rndis
+                               && is_peripheral_active(musb)
+                               && length > maxpacket
+                               && (length & ~0xffff) == 0
+                               && (length & 0x0fff) != 0
+                               && (length & (maxpacket - 1)) == 0) {
+                       maxpacket = length;
+                       is_rndis = 1;
+               }
+       } else {
+               /* virtually nothing except mass storage class */
+               if (length > 0xffff) {
+                       n_bds = 0xffff / maxpacket;
+                       length = n_bds * maxpacket;
+               } else {
+                       n_bds = length / maxpacket;
+                       if (length % maxpacket)
+                               n_bds++;
+               }
+               if (n_bds == 1)
+                       onepacket = 1;
+               else
+                       n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
+       }
+
+       /* In host mode, autorequest logic can generate some IN tokens; it's
+        * tricky since we can't leave REQPKT set in RXCSR after the transfer
+        * finishes. So:  multipacket transfers involve two or more segments.
+        * And always at least two IRQs ... RNDIS mode is not an option.
+        */
+       if (is_host_active(musb))
+               n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
+
+       cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
+
+       length = min(n_bds * maxpacket, length);
+
+       DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
+                       "dma 0x%x len %u %u/%u\n",
+                       rx->index, maxpacket,
+                       onepacket
+                               ? (is_rndis ? "rndis" : "onepacket")
+                               : "multipacket",
+                       n_bds,
+                       musb_readl(tibase,
+                               DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+                                       & 0xffff,
+                       addr, length, rx->channel.actual_len, rx->buf_len);
+
+       /* only queue one segment at a time, since the hardware prevents
+        * correct queue shutdown after unexpected short packets
+        */
+       bd = cppi_bd_alloc(rx);
+       rx->head = bd;
+
+       /* Build BDs for all packets in this segment */
+       for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
+               u32     bd_len;
+
+               if (i) {
+                       bd = cppi_bd_alloc(rx);
+                       if (!bd)
+                               break;
+                       tail->next = bd;
+                       tail->hw_next = bd->dma;
+               }
+               bd->hw_next = 0;
+
+               /* all but the last packet will be maxpacket size */
+               if (maxpacket < length)
+                       bd_len = maxpacket;
+               else
+                       bd_len = length;
+
+               bd->hw_bufp = addr;
+               addr += bd_len;
+               rx->offset += bd_len;
+
+               bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
+               bd->buflen = bd_len;
+
+               bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
+               length -= bd_len;
+       }
+
+       /* we always expect at least one reusable BD! */
+       if (!tail) {
+               WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
+               return;
+       } else if (i < n_bds)
+               WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
+
+       tail->next = NULL;
+       tail->hw_next = 0;
+
+       bd = rx->head;
+       rx->tail = tail;
+
+       /* short reads and other faults should terminate this entire
+        * dma segment.  we want one "dma packet" per dma segment, not
+        * one per USB packet, terminating the whole queue at once...
+        * NOTE that current hardware seems to ignore SOP and EOP.
+        */
+       bd->hw_options |= CPPI_SOP_SET;
+       tail->hw_options |= CPPI_EOP_SET;
+
+       if (debug >= 5) {
+               struct cppi_descriptor  *d;
+
+               for (d = rx->head; d; d = d->next)
+                       cppi_dump_rxbd("S", d);
+       }
+
+       /* in case the preceding transfer left some state... */
+       tail = rx->last_processed;
+       if (tail) {
+               tail->next = bd;
+               tail->hw_next = bd->dma;
+       }
+
+       core_rxirq_enable(tibase, rx->index + 1);
+
+       /* BDs live in DMA-coherent memory, but writes might be pending */
+       cpu_drain_writebuffer();
+
+       /* REVISIT specs say to write this AFTER the BUFCNT register
+        * below ... but that loses badly.
+        */
+       musb_writel(&rx_ram->rx_head, 0, bd->dma);
+
+       /* bufferCount must be at least 3, and zeroes on completion
+        * unless it underflows below zero, or stops at two, or keeps
+        * growing ... grr.
+        */
+       i = musb_readl(tibase,
+                       DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+                       & 0xffff;
+
+       if (!i)
+               musb_writel(tibase,
+                       DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+                       n_bds + 2);
+       else if (n_bds > (i - 3))
+               musb_writel(tibase,
+                       DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+                       n_bds - (i - 3));
+
+       i = musb_readl(tibase,
+                       DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+                       & 0xffff;
+       if (i < (2 + n_bds)) {
+               DBG(2, "bufcnt%d underrun - %d (for %d)\n",
+                                       rx->index, i, n_bds);
+               musb_writel(tibase,
+                       DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+                       n_bds + 2);
+       }
+
+       cppi_dump_rx(4, rx, "/S");
+}
+
+/**
+ * cppi_channel_program - program channel for data transfer
+ * @ch: the channel
+ * @maxpacket: max packet size
+ * @mode: For RX, 1 unless the usb protocol driver promised to treat
+ *     all short reads as errors and kick in high level fault recovery.
+ *     For TX, ignored because of RNDIS mode races/glitches.
+ * @dma_addr: dma address of buffer
+ * @len: length of buffer
+ * Context: controller irqlocked
+ */
+static int cppi_channel_program(struct dma_channel *ch,
+               u16 maxpacket, u8 mode,
+               dma_addr_t dma_addr, u32 len)
+{
+       struct cppi_channel     *cppi_ch;
+       struct cppi             *controller;
+       struct musb             *musb;
+
+       cppi_ch = container_of(ch, struct cppi_channel, channel);
+       controller = cppi_ch->controller;
+       musb = controller->musb;
+
+       switch (ch->status) {
+       case MUSB_DMA_STATUS_BUS_ABORT:
+       case MUSB_DMA_STATUS_CORE_ABORT:
+               /* fault irq handler should have handled cleanup */
+               WARNING("%cX DMA%d not cleaned up after abort!\n",
+                               cppi_ch->transmit ? 'T' : 'R',
+                               cppi_ch->index);
+               /* WARN_ON(1); */
+               break;
+       case MUSB_DMA_STATUS_BUSY:
+               WARNING("program active channel?  %cX DMA%d\n",
+                               cppi_ch->transmit ? 'T' : 'R',
+                               cppi_ch->index);
+               /* WARN_ON(1); */
+               break;
+       case MUSB_DMA_STATUS_UNKNOWN:
+               DBG(1, "%cX DMA%d not allocated!\n",
+                               cppi_ch->transmit ? 'T' : 'R',
+                               cppi_ch->index);
+               /* FALLTHROUGH */
+       case MUSB_DMA_STATUS_FREE:
+               break;
+       }
+
+       ch->status = MUSB_DMA_STATUS_BUSY;
+
+       /* set transfer parameters, then queue up its first segment */
+       cppi_ch->buf_dma = dma_addr;
+       cppi_ch->offset = 0;
+       cppi_ch->maxpacket = maxpacket;
+       cppi_ch->buf_len = len;
+
+       /* TX channel? or RX? */
+       if (cppi_ch->transmit)
+               cppi_next_tx_segment(musb, cppi_ch);
+       else
+               cppi_next_rx_segment(musb, cppi_ch, mode);
+
+       return true;
+}
+
+static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
+{
+       struct cppi_channel             *rx = &cppi->rx[ch];
+       struct cppi_rx_stateram __iomem *state = rx->state_ram;
+       struct cppi_descriptor          *bd;
+       struct cppi_descriptor          *last = rx->last_processed;
+       bool                            completed = false;
+       bool                            acked = false;
+       int                             i;
+       dma_addr_t                      safe2ack;
+       void __iomem                    *regs = rx->hw_ep->regs;
+
+       cppi_dump_rx(6, rx, "/K");
+
+       bd = last ? last->next : rx->head;
+       if (!bd)
+               return false;
+
+       /* run through all completed BDs */
+       for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
+                       (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
+                       i++, bd = bd->next) {
+               u16     len;
+
+               /* catch latest BD writes from CPPI */
+               rmb();
+               if (!completed && (bd->hw_options & CPPI_OWN_SET))
+                       break;
+
+               DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
+                       "off.len %08x opt.len %08x (%d)\n",
+                       bd->dma, bd->hw_next, bd->hw_bufp,
+                       bd->hw_off_len, bd->hw_options,
+                       rx->channel.actual_len);
+
+               /* actual packet received length */
+               if ((bd->hw_options & CPPI_SOP_SET) && !completed)
+                       len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
+               else
+                       len = 0;
+
+               if (bd->hw_options & CPPI_EOQ_MASK)
+                       completed = true;
+
+               if (!completed && len < bd->buflen) {
+                       /* NOTE:  when we get a short packet, RXCSR_H_REQPKT
+                        * must have been cleared, and no more DMA packets may
+                        * active be in the queue... TI docs didn't say, but
+                        * CPPI ignores those BDs even though OWN is still set.
+                        */
+                       completed = true;
+                       DBG(3, "rx short %d/%d (%d)\n",
+                                       len, bd->buflen,
+                                       rx->channel.actual_len);
+               }
+
+               /* If we got here, we expect to ack at least one BD; meanwhile
+                * CPPI may completing other BDs while we scan this list...
+                *
+                * RACE: we can notice OWN cleared before CPPI raises the
+                * matching irq by writing that BD as the completion pointer.
+                * In such cases, stop scanning and wait for the irq, avoiding
+                * lost acks and states where BD ownership is unclear.
+                */
+               if (bd->dma == safe2ack) {
+                       musb_writel(&state->rx_complete, 0, safe2ack);
+                       safe2ack = musb_readl(&state->rx_complete, 0);
+                       acked = true;
+                       if (bd->dma == safe2ack)
+                               safe2ack = 0;
+               }
+
+               rx->channel.actual_len += len;
+
+               cppi_bd_free(rx, last);
+               last = bd;
+
+               /* stop scanning on end-of-segment */
+               if (bd->hw_next == 0)
+                       completed = true;
+       }
+       rx->last_processed = last;
+
+       /* dma abort, lost ack, or ... */
+       if (!acked && last) {
+               int     csr;
+
+               if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
+                       musb_writel(&state->rx_complete, 0, safe2ack);
+               if (safe2ack == 0) {
+                       cppi_bd_free(rx, last);
+                       rx->last_processed = NULL;
+
+                       /* if we land here on the host side, H_REQPKT will
+                        * be clear and we need to restart the queue...
+                        */
+                       WARN_ON(rx->head);
+               }
+               musb_ep_select(cppi->mregs, rx->index + 1);
+               csr = musb_readw(regs, MUSB_RXCSR);
+               if (csr & MUSB_RXCSR_DMAENAB) {
+                       DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
+                               rx->index,
+                               rx->head, rx->tail,
+                               rx->last_processed
+                                       ? rx->last_processed->dma
+                                       : 0,
+                               completed ? ", completed" : "",
+                               csr);
+                       cppi_dump_rxq(4, "/what?", rx);
+               }
+       }
+       if (!completed) {
+               int     csr;
+
+               rx->head = bd;
+
+               /* REVISIT seems like "autoreq all but EOP" doesn't...
+                * setting it here "should" be racey, but seems to work
+                */
+               csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+               if (is_host_active(cppi->musb)
+                               && bd
+                               && !(csr & MUSB_RXCSR_H_REQPKT)) {
+                       csr |= MUSB_RXCSR_H_REQPKT;
+                       musb_writew(regs, MUSB_RXCSR,
+                                       MUSB_RXCSR_H_WZC_BITS | csr);
+                       csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+               }
+       } else {
+               rx->head = NULL;
+               rx->tail = NULL;
+       }
+
+       cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
+       return completed;
+}
+
+void cppi_completion(struct musb *musb, u32 rx, u32 tx)
+{
+       void __iomem            *tibase;
+       int                     i, index;
+       struct cppi             *cppi;
+       struct musb_hw_ep       *hw_ep = NULL;
+
+       cppi = container_of(musb->dma_controller, struct cppi, controller);
+
+       tibase = musb->ctrl_base;
+
+       /* process TX channels */
+       for (index = 0; tx; tx = tx >> 1, index++) {
+               struct cppi_channel             *tx_ch;
+               struct cppi_tx_stateram __iomem *tx_ram;
+               bool                            completed = false;
+               struct cppi_descriptor          *bd;
+
+               if (!(tx & 1))
+                       continue;
+
+               tx_ch = cppi->tx + index;
+               tx_ram = tx_ch->state_ram;
+
+               /* FIXME  need a cppi_tx_scan() routine, which
+                * can also be called from abort code
+                */
+
+               cppi_dump_tx(5, tx_ch, "/E");
+
+               bd = tx_ch->head;
+
+               if (NULL == bd) {
+                       DBG(1, "null BD\n");
+                       continue;
+               }
+
+               /* run through all completed BDs */
+               for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
+                               i++, bd = bd->next) {
+                       u16     len;
+
+                       /* catch latest BD writes from CPPI */
+                       rmb();
+                       if (bd->hw_options & CPPI_OWN_SET)
+                               break;
+
+                       DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
+                                       bd, bd->hw_next, bd->hw_bufp,
+                                       bd->hw_off_len, bd->hw_options);
+
+                       len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
+                       tx_ch->channel.actual_len += len;
+
+                       tx_ch->last_processed = bd;
+
+                       /* write completion register to acknowledge
+                        * processing of completed BDs, and possibly
+                        * release the IRQ; EOQ might not be set ...
+                        *
+                        * REVISIT use the same ack strategy as rx
+                        *
+                        * REVISIT have observed bit 18 set; huh??
+                        */
+                       /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
+                               musb_writel(&tx_ram->tx_complete, 0, bd->dma);
+
+                       /* stop scanning on end-of-segment */
+                       if (bd->hw_next == 0)
+                               completed = true;
+               }
+
+               /* on end of segment, maybe go to next one */
+               if (completed) {
+                       /* cppi_dump_tx(4, tx_ch, "/complete"); */
+
+                       /* transfer more, or report completion */
+                       if (tx_ch->offset >= tx_ch->buf_len) {
+                               tx_ch->head = NULL;
+                               tx_ch->tail = NULL;
+                               tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+                               hw_ep = tx_ch->hw_ep;
+
+                               /* Peripheral role never repurposes the
+                                * endpoint, so immediate completion is
+                                * safe.  Host role waits for the fifo
+                                * to empty (TXPKTRDY irq) before going
+                                * to the next queued bulk transfer.
+                                */
+                               if (is_host_active(cppi->musb)) {
+#if 0
+                                       /* WORKAROUND because we may
+                                        * not always get TXKPTRDY ...
+                                        */
+                                       int     csr;
+
+                                       csr = musb_readw(hw_ep->regs,
+                                               MUSB_TXCSR);
+                                       if (csr & MUSB_TXCSR_TXPKTRDY)
+#endif
+                                               completed = false;
+                               }
+                               if (completed)
+                                       musb_dma_completion(musb, index + 1, 1);
+
+                       } else {
+                               /* Bigger transfer than we could fit in
+                                * that first batch of descriptors...
+                                */
+                               cppi_next_tx_segment(musb, tx_ch);
+                       }
+               } else
+                       tx_ch->head = bd;
+       }
+
+       /* Start processing the RX block */
+       for (index = 0; rx; rx = rx >> 1, index++) {
+
+               if (rx & 1) {
+                       struct cppi_channel             *rx_ch;
+
+                       rx_ch = cppi->rx + index;
+
+                       /* let incomplete dma segments finish */
+                       if (!cppi_rx_scan(cppi, index))
+                               continue;
+
+                       /* start another dma segment if needed */
+                       if (rx_ch->channel.actual_len != rx_ch->buf_len
+                                       && rx_ch->channel.actual_len
+                                               == rx_ch->offset) {
+                               cppi_next_rx_segment(musb, rx_ch, 1);
+                               continue;
+                       }
+
+                       /* all segments completed! */
+                       rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+                       hw_ep = rx_ch->hw_ep;
+
+                       core_rxirq_disable(tibase, index + 1);
+                       musb_dma_completion(musb, index + 1, 0);
+               }
+       }
+
+       /* write to CPPI EOI register to re-enable interrupts */
+       musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+}
+
+/* Instantiate a software object representing a DMA controller. */
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *mregs)
+{
+       struct cppi             *controller;
+
+       controller = kzalloc(sizeof *controller, GFP_KERNEL);
+       if (!controller)
+               return NULL;
+
+       controller->mregs = mregs;
+       controller->tibase = mregs - DAVINCI_BASE_OFFSET;
+
+       controller->musb = musb;
+       controller->controller.start = cppi_controller_start;
+       controller->controller.stop = cppi_controller_stop;
+       controller->controller.channel_alloc = cppi_channel_allocate;
+       controller->controller.channel_release = cppi_channel_release;
+       controller->controller.channel_program = cppi_channel_program;
+       controller->controller.channel_abort = cppi_channel_abort;
+
+       /* NOTE: allocating from on-chip SRAM would give the least
+        * contention for memory access, if that ever matters here.
+        */
+
+       /* setup BufferPool */
+       controller->pool = dma_pool_create("cppi",
+                       controller->musb->controller,
+                       sizeof(struct cppi_descriptor),
+                       CPPI_DESCRIPTOR_ALIGN, 0);
+       if (!controller->pool) {
+               kfree(controller);
+               return NULL;
+       }
+
+       return &controller->controller;
+}
+
+/*
+ *  Destroy a previously-instantiated DMA controller.
+ */
+void dma_controller_destroy(struct dma_controller *c)
+{
+       struct cppi     *cppi;
+
+       cppi = container_of(c, struct cppi, controller);
+
+       /* assert:  caller stopped the controller first */
+       dma_pool_destroy(cppi->pool);
+
+       kfree(cppi);
+}
+
+/*
+ * Context: controller irqlocked, endpoint selected
+ */
+static int cppi_channel_abort(struct dma_channel *channel)
+{
+       struct cppi_channel     *cppi_ch;
+       struct cppi             *controller;
+       void __iomem            *mbase;
+       void __iomem            *tibase;
+       void __iomem            *regs;
+       u32                     value;
+       struct cppi_descriptor  *queue;
+
+       cppi_ch = container_of(channel, struct cppi_channel, channel);
+
+       controller = cppi_ch->controller;
+
+       switch (channel->status) {
+       case MUSB_DMA_STATUS_BUS_ABORT:
+       case MUSB_DMA_STATUS_CORE_ABORT:
+               /* from RX or TX fault irq handler */
+       case MUSB_DMA_STATUS_BUSY:
+               /* the hardware needs shutting down */
+               regs = cppi_ch->hw_ep->regs;
+               break;
+       case MUSB_DMA_STATUS_UNKNOWN:
+       case MUSB_DMA_STATUS_FREE:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+
+       if (!cppi_ch->transmit && cppi_ch->head)
+               cppi_dump_rxq(3, "/abort", cppi_ch);
+
+       mbase = controller->mregs;
+       tibase = controller->tibase;
+
+       queue = cppi_ch->head;
+       cppi_ch->head = NULL;
+       cppi_ch->tail = NULL;
+
+       /* REVISIT should rely on caller having done this,
+        * and caller should rely on us not changing it.
+        * peripheral code is safe ... check host too.
+        */
+       musb_ep_select(mbase, cppi_ch->index + 1);
+
+       if (cppi_ch->transmit) {
+               struct cppi_tx_stateram __iomem *tx_ram;
+               int                     enabled;
+
+               /* mask interrupts raised to signal teardown complete.  */
+               enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
+                               & (1 << cppi_ch->index);
+               if (enabled)
+                       musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+                                       (1 << cppi_ch->index));
+
+               /* REVISIT put timeouts on these controller handshakes */
+
+               cppi_dump_tx(6, cppi_ch, " (teardown)");
+
+               /* teardown DMA engine then usb core */
+               do {
+                       value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
+               } while (!(value & CPPI_TEAR_READY));
+               musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
+
+               tx_ram = cppi_ch->state_ram;
+               do {
+                       value = musb_readl(&tx_ram->tx_complete, 0);
+               } while (0xFFFFFFFC != value);
+               musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
+
+               /* FIXME clean up the transfer state ... here?
+                * the completion routine should get called with
+                * an appropriate status code.
+                */
+
+               value = musb_readw(regs, MUSB_TXCSR);
+               value &= ~MUSB_TXCSR_DMAENAB;
+               value |= MUSB_TXCSR_FLUSHFIFO;
+               musb_writew(regs, MUSB_TXCSR, value);
+               musb_writew(regs, MUSB_TXCSR, value);
+
+               /* re-enable interrupt */
+               if (enabled)
+                       musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+                                       (1 << cppi_ch->index));
+
+               /* While we scrub the TX state RAM, ensure that we clean
+                * up any interrupt that's currently asserted:
+                * 1. Write to completion Ptr value 0x1(bit 0 set)
+                *    (write back mode)
+                * 2. Write to completion Ptr value 0x0(bit 0 cleared)
+                *    (compare mode)
+                * Value written is compared(for bits 31:2) and when
+                * equal, interrupt is deasserted.
+                */
+               cppi_reset_tx(tx_ram, 1);
+               musb_writel(&tx_ram->tx_complete, 0, 0);
+
+               cppi_dump_tx(5, cppi_ch, " (done teardown)");
+
+               /* REVISIT tx side _should_ clean up the same way
+                * as the RX side ... this does no cleanup at all!
+                */
+
+       } else /* RX */ {
+               u16                     csr;
+
+               /* NOTE: docs don't guarantee any of this works ...  we
+                * expect that if the usb core stops telling the cppi core
+                * to pull more data from it, then it'll be safe to flush
+                * current RX DMA state iff any pending fifo transfer is done.
+                */
+
+               core_rxirq_disable(tibase, cppi_ch->index + 1);
+
+               /* for host, ensure ReqPkt is never set again */
+               if (is_host_active(cppi_ch->controller->musb)) {
+                       value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+                       value &= ~((0x3) << (cppi_ch->index * 2));
+                       musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
+               }
+
+               csr = musb_readw(regs, MUSB_RXCSR);
+
+               /* for host, clear (just) ReqPkt at end of current packet(s) */
+               if (is_host_active(cppi_ch->controller->musb)) {
+                       csr |= MUSB_RXCSR_H_WZC_BITS;
+                       csr &= ~MUSB_RXCSR_H_REQPKT;
+               } else
+                       csr |= MUSB_RXCSR_P_WZC_BITS;
+
+               /* clear dma enable */
+               csr &= ~(MUSB_RXCSR_DMAENAB);
+               musb_writew(regs, MUSB_RXCSR, csr);
+               csr = musb_readw(regs, MUSB_RXCSR);
+
+               /* Quiesce: wait for current dma to finish (if not cleanup).
+                * We can't use bit zero of stateram->rx_sop, since that
+                * refers to an entire "DMA packet" not just emptying the
+                * current fifo.  Most segments need multiple usb packets.
+                */
+               if (channel->status == MUSB_DMA_STATUS_BUSY)
+                       udelay(50);
+
+               /* scan the current list, reporting any data that was
+                * transferred and acking any IRQ
+                */
+               cppi_rx_scan(controller, cppi_ch->index);
+
+               /* clobber the existing state once it's idle
+                *
+                * NOTE:  arguably, we should also wait for all the other
+                * RX channels to quiesce (how??) and then temporarily
+                * disable RXCPPI_CTRL_REG ... but it seems that we can
+                * rely on the controller restarting from state ram, with
+                * only RXCPPI_BUFCNT state being bogus.  BUFCNT will
+                * correct itself after the next DMA transfer though.
+                *
+                * REVISIT does using rndis mode change that?
+                */
+               cppi_reset_rx(cppi_ch->state_ram);
+
+               /* next DMA request _should_ load cppi head ptr */
+
+               /* ... we don't "free" that list, only mutate it in place.  */
+               cppi_dump_rx(5, cppi_ch, " (done abort)");
+
+               /* clean up previously pending bds */
+               cppi_bd_free(cppi_ch, cppi_ch->last_processed);
+               cppi_ch->last_processed = NULL;
+
+               while (queue) {
+                       struct cppi_descriptor  *tmp = queue->next;
+
+                       cppi_bd_free(cppi_ch, queue);
+                       queue = tmp;
+               }
+       }
+
+       channel->status = MUSB_DMA_STATUS_FREE;
+       cppi_ch->buf_dma = 0;
+       cppi_ch->offset = 0;
+       cppi_ch->buf_len = 0;
+       cppi_ch->maxpacket = 0;
+       return 0;
+}
+
+/* TBD Queries:
+ *
+ * Power Management ... probably turn off cppi during suspend, restart;
+ * check state ram?  Clocking is presumably shared with usb core.
+ */
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
new file mode 100644 (file)
index 0000000..fc5216b
--- /dev/null
@@ -0,0 +1,133 @@
+/* Copyright (C) 2005-2006 by Texas Instruments */
+
+#ifndef _CPPI_DMA_H_
+#define _CPPI_DMA_H_
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/dmapool.h>
+
+#include "musb_dma.h"
+#include "musb_core.h"
+
+
+/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers
+ * would seem to be shared with the TUSB6020 (over VLYNQ).
+ */
+
+#include "davinci.h"
+
+
+/* CPPI RX/TX state RAM */
+
+struct cppi_tx_stateram {
+       u32 tx_head;                    /* "DMA packet" head descriptor */
+       u32 tx_buf;
+       u32 tx_current;                 /* current descriptor */
+       u32 tx_buf_current;
+       u32 tx_info;                    /* flags, remaining buflen */
+       u32 tx_rem_len;
+       u32 tx_dummy;                   /* unused */
+       u32 tx_complete;
+};
+
+struct cppi_rx_stateram {
+       u32 rx_skipbytes;
+       u32 rx_head;
+       u32 rx_sop;                     /* "DMA packet" head descriptor */
+       u32 rx_current;                 /* current descriptor */
+       u32 rx_buf_current;
+       u32 rx_len_len;
+       u32 rx_cnt_cnt;
+       u32 rx_complete;
+};
+
+/* hw_options bits in CPPI buffer descriptors */
+#define CPPI_SOP_SET   ((u32)(1 << 31))
+#define CPPI_EOP_SET   ((u32)(1 << 30))
+#define CPPI_OWN_SET   ((u32)(1 << 29))        /* owned by cppi */
+#define CPPI_EOQ_MASK  ((u32)(1 << 28))
+#define CPPI_ZERO_SET  ((u32)(1 << 23))        /* rx saw zlp; tx issues one */
+#define CPPI_RXABT_MASK        ((u32)(1 << 19))        /* need more rx buffers */
+
+#define CPPI_RECV_PKTLEN_MASK 0xFFFF
+#define CPPI_BUFFER_LEN_MASK 0xFFFF
+
+#define CPPI_TEAR_READY ((u32)(1 << 31))
+
+/* CPPI data structure definitions */
+
+#define        CPPI_DESCRIPTOR_ALIGN   16      /* bytes; 5-dec docs say 4-byte align */
+
+struct cppi_descriptor {
+       /* hardware overlay */
+       u32             hw_next;        /* next buffer descriptor Pointer */
+       u32             hw_bufp;        /* i/o buffer pointer */
+       u32             hw_off_len;     /* buffer_offset16, buffer_length16 */
+       u32             hw_options;     /* flags:  SOP, EOP etc*/
+
+       struct cppi_descriptor *next;
+       dma_addr_t      dma;            /* address of this descriptor */
+       u32             buflen;         /* for RX: original buffer length */
+} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN)));
+
+
+struct cppi;
+
+/* CPPI  Channel Control structure */
+struct cppi_channel {
+       struct dma_channel      channel;
+
+       /* back pointer to the DMA controller structure */
+       struct cppi             *controller;
+
+       /* which direction of which endpoint? */
+       struct musb_hw_ep       *hw_ep;
+       bool                    transmit;
+       u8                      index;
+
+       /* DMA modes:  RNDIS or "transparent" */
+       u8                      is_rndis;
+
+       /* book keeping for current transfer request */
+       dma_addr_t              buf_dma;
+       u32                     buf_len;
+       u32                     maxpacket;
+       u32                     offset;         /* dma requested */
+
+       void __iomem            *state_ram;     /* CPPI state */
+
+       struct cppi_descriptor  *freelist;
+
+       /* BD management fields */
+       struct cppi_descriptor  *head;
+       struct cppi_descriptor  *tail;
+       struct cppi_descriptor  *last_processed;
+
+       /* use tx_complete in host role to track endpoints waiting for
+        * FIFONOTEMPTY to clear.
+        */
+       struct list_head        tx_complete;
+};
+
+/* CPPI DMA controller object */
+struct cppi {
+       struct dma_controller           controller;
+       struct musb                     *musb;
+       void __iomem                    *mregs;         /* Mentor regs */
+       void __iomem                    *tibase;        /* TI/CPPI regs */
+
+       struct cppi_channel             tx[MUSB_C_NUM_EPT - 1];
+       struct cppi_channel             rx[MUSB_C_NUM_EPR - 1];
+
+       struct dma_pool                 *pool;
+
+       struct list_head                tx_complete;
+};
+
+/* irq handling hook */
+extern void cppi_completion(struct musb *, u32 rx, u32 tx);
+
+#endif                         /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
new file mode 100644 (file)
index 0000000..75baf18
--- /dev/null
@@ -0,0 +1,462 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/arch/gpio.h>
+#include <asm/mach-types.h>
+
+#include "musb_core.h"
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#include <asm/arch/i2c-client.h>
+#endif
+
+#include "davinci.h"
+#include "cppi_dma.h"
+
+
+/* REVISIT (PM) we should be able to keep the PHY in low power mode most
+ * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
+ * and, when in host mode, autosuspending idle root ports... PHYPLLON
+ * (overriding SUSPENDM?) then likely needs to stay off.
+ */
+
+static inline void phy_on(void)
+{
+       /* start the on-chip PHY and its PLL */
+       __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON,
+                       (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR));
+       while ((__raw_readl((void __force __iomem *)
+                               IO_ADDRESS(USBPHY_CTL_PADDR))
+                       & USBPHY_PHYCLKGD) == 0)
+               cpu_relax();
+}
+
+static inline void phy_off(void)
+{
+       /* powerdown the on-chip PHY and its oscillator */
+       __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *)
+                       IO_ADDRESS(USBPHY_CTL_PADDR));
+}
+
+static int dma_off = 1;
+
+void musb_platform_enable(struct musb *musb)
+{
+       u32     tmp, old, val;
+
+       /* workaround:  setup irqs through both register sets */
+       tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK)
+                       << DAVINCI_USB_TXINT_SHIFT;
+       musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+       old = tmp;
+       tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK))
+                       << DAVINCI_USB_RXINT_SHIFT;
+       musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+       tmp |= old;
+
+       val = ~MUSB_INTR_SOF;
+       tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT);
+       musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+
+       if (is_dma_capable() && !dma_off)
+               printk(KERN_WARNING "%s %s: dma not reactivated\n",
+                               __FILE__, __func__);
+       else
+               dma_off = 0;
+
+       /* force a DRVVBUS irq so we can start polling for ID change */
+       if (is_otg_enabled(musb))
+               musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+                       DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT);
+}
+
+/*
+ * Disable the HDRC and flush interrupts
+ */
+void musb_platform_disable(struct musb *musb)
+{
+       /* because we don't set CTRLR.UINT, "important" to:
+        *  - not read/write INTRUSB/INTRUSBE
+        *  - (except during initial setup, as workaround)
+        *  - use INTSETR/INTCLRR instead
+        */
+       musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG,
+                         DAVINCI_USB_USBINT_MASK
+                       | DAVINCI_USB_TXINT_MASK
+                       | DAVINCI_USB_RXINT_MASK);
+       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+       musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0);
+
+       if (is_dma_capable() && !dma_off)
+               WARNING("dma still active\n");
+}
+
+
+/* REVISIT it's not clear whether DaVinci can support full OTG.  */
+
+static int vbus_state = -1;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#define        portstate(stmt)         stmt
+#else
+#define        portstate(stmt)
+#endif
+
+
+/* VBUS SWITCHING IS BOARD-SPECIFIC */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#ifndef CONFIG_MACH_DAVINCI_EVM_OTG
+
+/* I2C operations are always synchronous, and require a task context.
+ * With unloaded systems, using the shared workqueue seems to suffice
+ * to satisfy the 100msec A_WAIT_VRISE timeout...
+ */
+static void evm_deferred_drvvbus(struct work_struct *ignored)
+{
+       davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state);
+       vbus_state = !vbus_state;
+}
+static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
+
+#endif /* modified board */
+#endif /* EVM */
+
+static void davinci_source_power(struct musb *musb, int is_on, int immediate)
+{
+       if (is_on)
+               is_on = 1;
+
+       if (vbus_state == is_on)
+               return;
+       vbus_state = !is_on;            /* 0/1 vs "-1 == unknown/init" */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+       if (machine_is_davinci_evm()) {
+#ifdef CONFIG_MACH_DAVINCI_EVM_OTG
+               /* modified EVM board switching VBUS with GPIO(6) not I2C
+                * NOTE:  PINMUX0.RGB888 (bit23) must be clear
+                */
+               if (is_on)
+                       gpio_set(GPIO(6));
+               else
+                       gpio_clear(GPIO(6));
+               immediate = 1;
+#else
+               if (immediate)
+                       davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on);
+               else
+                       schedule_work(&evm_vbus_work);
+#endif
+       }
+#endif
+       if (immediate)
+               vbus_state = is_on;
+}
+
+static void davinci_set_vbus(struct musb *musb, int is_on)
+{
+       WARN_ON(is_on && is_peripheral_active(musb));
+       davinci_source_power(musb, is_on, 0);
+}
+
+
+#define        POLL_SECONDS    2
+
+static struct timer_list otg_workaround;
+
+static void otg_timer(unsigned long _musb)
+{
+       struct musb             *musb = (void *)_musb;
+       void __iomem            *mregs = musb->mregs;
+       u8                      devctl;
+       unsigned long           flags;
+
+       /* We poll because DaVinci's won't expose several OTG-critical
+       * status change events (from the transceiver) otherwise.
+        */
+       devctl = musb_readb(mregs, MUSB_DEVCTL);
+       DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
+
+       spin_lock_irqsave(&musb->lock, flags);
+       switch (musb->xceiv.state) {
+       case OTG_STATE_A_WAIT_VFALL:
+               /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
+                * seems to mis-handle session "start" otherwise (or in our
+                * case "recover"), in routine "VBUS was valid by the time
+                * VBUSERR got reported during enumeration" cases.
+                */
+               if (devctl & MUSB_DEVCTL_VBUS) {
+                       mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+                       break;
+               }
+               musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+               musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+                       MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
+               break;
+       case OTG_STATE_B_IDLE:
+               if (!is_peripheral_enabled(musb))
+                       break;
+
+               /* There's no ID-changed IRQ, so we have no good way to tell
+                * when to switch to the A-Default state machine (by setting
+                * the DEVCTL.SESSION flag).
+                *
+                * Workaround:  whenever we're in B_IDLE, try setting the
+                * session flag every few seconds.  If it works, ID was
+                * grounded and we're now in the A-Default state machine.
+                *
+                * NOTE setting the session flag is _supposed_ to trigger
+                * SRP, but clearly it doesn't.
+                */
+               musb_writeb(mregs, MUSB_DEVCTL,
+                               devctl | MUSB_DEVCTL_SESSION);
+               devctl = musb_readb(mregs, MUSB_DEVCTL);
+               if (devctl & MUSB_DEVCTL_BDEVICE)
+                       mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+               else
+                       musb->xceiv.state = OTG_STATE_A_IDLE;
+               break;
+       default:
+               break;
+       }
+       spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static irqreturn_t davinci_interrupt(int irq, void *__hci)
+{
+       unsigned long   flags;
+       irqreturn_t     retval = IRQ_NONE;
+       struct musb     *musb = __hci;
+       void __iomem    *tibase = musb->ctrl_base;
+       u32             tmp;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       /* NOTE: DaVinci shadows the Mentor IRQs.  Don't manage them through
+        * the Mentor registers (except for setup), use the TI ones and EOI.
+        *
+        * Docs describe irq "vector" registers asociated with the CPPI and
+        * USB EOI registers.  These hold a bitmask corresponding to the
+        * current IRQ, not an irq handler address.  Would using those bits
+        * resolve some of the races observed in this dispatch code??
+        */
+
+       /* CPPI interrupts share the same IRQ line, but have their own
+        * mask, state, "vector", and EOI registers.
+        */
+       if (is_cppi_enabled()) {
+               u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+               u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+               if (cppi_tx || cppi_rx) {
+                       DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
+                       cppi_completion(musb, cppi_rx, cppi_tx);
+                       retval = IRQ_HANDLED;
+               }
+       }
+
+       /* ack and handle non-CPPI interrupts */
+       tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
+       musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
+       DBG(4, "IRQ %08x\n", tmp);
+
+       musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
+                       >> DAVINCI_USB_RXINT_SHIFT;
+       musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK)
+                       >> DAVINCI_USB_TXINT_SHIFT;
+       musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK)
+                       >> DAVINCI_USB_USBINT_SHIFT;
+
+       /* DRVVBUS irqs are the only proxy we have (a very poor one!) for
+        * DaVinci's missing ID change IRQ.  We need an ID change IRQ to
+        * switch appropriately between halves of the OTG state machine.
+        * Managing DEVCTL.SESSION per Mentor docs requires we know its
+        * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
+        * Also, DRVVBUS pulses for SRP (but not at 5V) ...
+        */
+       if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) {
+               int     drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG);
+               void __iomem *mregs = musb->mregs;
+               u8      devctl = musb_readb(mregs, MUSB_DEVCTL);
+               int     err = musb->int_usb & MUSB_INTR_VBUSERROR;
+
+               err = is_host_enabled(musb)
+                               && (musb->int_usb & MUSB_INTR_VBUSERROR);
+               if (err) {
+                       /* The Mentor core doesn't debounce VBUS as needed
+                        * to cope with device connect current spikes. This
+                        * means it's not uncommon for bus-powered devices
+                        * to get VBUS errors during enumeration.
+                        *
+                        * This is a workaround, but newer RTL from Mentor
+                        * seems to allow a better one: "re"starting sessions
+                        * without waiting (on EVM, a **long** time) for VBUS
+                        * to stop registering in devctl.
+                        */
+                       musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+                       musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+                       mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+                       WARNING("VBUS error workaround (delay coming)\n");
+               } else if (is_host_enabled(musb) && drvvbus) {
+                       musb->is_active = 1;
+                       MUSB_HST_MODE(musb);
+                       musb->xceiv.default_a = 1;
+                       musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+                       portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+                       del_timer(&otg_workaround);
+               } else {
+                       musb->is_active = 0;
+                       MUSB_DEV_MODE(musb);
+                       musb->xceiv.default_a = 0;
+                       musb->xceiv.state = OTG_STATE_B_IDLE;
+                       portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+               }
+
+               /* NOTE:  this must complete poweron within 100 msec */
+               davinci_source_power(musb, drvvbus, 0);
+               DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
+                               drvvbus ? "on" : "off",
+                               otg_state_string(musb),
+                               err ? " ERROR" : "",
+                               devctl);
+               retval = IRQ_HANDLED;
+       }
+
+       if (musb->int_tx || musb->int_rx || musb->int_usb)
+               retval |= musb_interrupt(musb);
+
+       /* irq stays asserted until EOI is written */
+       musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
+
+       /* poll for ID change */
+       if (is_otg_enabled(musb)
+                       && musb->xceiv.state == OTG_STATE_B_IDLE)
+               mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       /* REVISIT we sometimes get unhandled IRQs
+        * (e.g. ep0).  not clear why...
+        */
+       if (retval != IRQ_HANDLED)
+               DBG(5, "unhandled? %08x\n", tmp);
+       return IRQ_HANDLED;
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+       void __iomem    *tibase = musb->ctrl_base;
+       u32             revision;
+
+       musb->mregs += DAVINCI_BASE_OFFSET;
+#if 0
+       /* REVISIT there's something odd about clocking, this
+        * didn't appear do the job ...
+        */
+       musb->clock = clk_get(pDevice, "usb");
+       if (IS_ERR(musb->clock))
+               return PTR_ERR(musb->clock);
+
+       status = clk_enable(musb->clock);
+       if (status < 0)
+               return -ENODEV;
+#endif
+
+       /* returns zero if e.g. not clocked */
+       revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
+       if (revision == 0)
+               return -ENODEV;
+
+       if (is_host_enabled(musb))
+               setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+
+       musb->board_set_vbus = davinci_set_vbus;
+       davinci_source_power(musb, 0, 1);
+
+       /* reset the controller */
+       musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
+
+       /* start the on-chip PHY and its PLL */
+       phy_on();
+
+       msleep(5);
+
+       /* NOTE:  irqs are in mixed mode, not bypass to pure-musb */
+       pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
+               revision, __raw_readl((void __force __iomem *)
+                               IO_ADDRESS(USBPHY_CTL_PADDR)),
+               musb_readb(tibase, DAVINCI_USB_CTRL_REG));
+
+       musb->isr = davinci_interrupt;
+       return 0;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+       if (is_host_enabled(musb))
+               del_timer_sync(&otg_workaround);
+
+       davinci_source_power(musb, 0 /*off*/, 1);
+
+       /* delay, to avoid problems with module reload */
+       if (is_host_enabled(musb) && musb->xceiv.default_a) {
+               int     maxdelay = 30;
+               u8      devctl, warn = 0;
+
+               /* if there's no peripheral connected, this can take a
+                * long time to fall, especially on EVM with huge C133.
+                */
+               do {
+                       devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+                       if (!(devctl & MUSB_DEVCTL_VBUS))
+                               break;
+                       if ((devctl & MUSB_DEVCTL_VBUS) != warn) {
+                               warn = devctl & MUSB_DEVCTL_VBUS;
+                               DBG(1, "VBUS %d\n",
+                                       warn >> MUSB_DEVCTL_VBUS_SHIFT);
+                       }
+                       msleep(1000);
+                       maxdelay--;
+               } while (maxdelay > 0);
+
+               /* in OTG mode, another host might be connected */
+               if (devctl & MUSB_DEVCTL_VBUS)
+                       DBG(1, "VBUS off timeout (devctl %02x)\n", devctl);
+       }
+
+       phy_off();
+       return 0;
+}
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
new file mode 100644 (file)
index 0000000..7fb6238
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_HDRDF_H__
+#define __MUSB_HDRDF_H__
+
+/*
+ * DaVinci-specific definitions
+ */
+
+/* Integrated highspeed/otg PHY */
+#define        USBPHY_CTL_PADDR        (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define        USBPHY_PHYCLKGD         (1 << 8)
+#define        USBPHY_SESNDEN          (1 << 7)        /* v(sess_end) comparator */
+#define        USBPHY_VBDTCTEN         (1 << 6)        /* v(bus) comparator */
+#define        USBPHY_PHYPLLON         (1 << 4)        /* override pll suspend */
+#define        USBPHY_CLKO1SEL         (1 << 3)
+#define        USBPHY_OSCPDWN          (1 << 2)
+#define        USBPHY_PHYPDWN          (1 << 0)
+
+/* For now include usb OTG module registers here */
+#define DAVINCI_USB_VERSION_REG                0x00
+#define DAVINCI_USB_CTRL_REG           0x04
+#define DAVINCI_USB_STAT_REG           0x08
+#define DAVINCI_RNDIS_REG              0x10
+#define DAVINCI_AUTOREQ_REG            0x14
+#define DAVINCI_USB_INT_SOURCE_REG     0x20
+#define DAVINCI_USB_INT_SET_REG                0x24
+#define DAVINCI_USB_INT_SRC_CLR_REG    0x28
+#define DAVINCI_USB_INT_MASK_REG       0x2c
+#define DAVINCI_USB_INT_MASK_SET_REG   0x30
+#define DAVINCI_USB_INT_MASK_CLR_REG   0x34
+#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38
+#define DAVINCI_USB_EOI_REG            0x3c
+#define DAVINCI_USB_EOI_INTVEC         0x40
+
+/* BEGIN CPPI-generic (?) */
+
+/* CPPI related registers */
+#define DAVINCI_TXCPPI_CTRL_REG                0x80
+#define DAVINCI_TXCPPI_TEAR_REG                0x84
+#define DAVINCI_CPPI_EOI_REG           0x88
+#define DAVINCI_CPPI_INTVEC_REG                0x8c
+#define DAVINCI_TXCPPI_MASKED_REG      0x90
+#define DAVINCI_TXCPPI_RAW_REG         0x94
+#define DAVINCI_TXCPPI_INTENAB_REG     0x98
+#define DAVINCI_TXCPPI_INTCLR_REG      0x9c
+
+#define DAVINCI_RXCPPI_CTRL_REG                0xC0
+#define DAVINCI_RXCPPI_MASKED_REG      0xD0
+#define DAVINCI_RXCPPI_RAW_REG         0xD4
+#define DAVINCI_RXCPPI_INTENAB_REG     0xD8
+#define DAVINCI_RXCPPI_INTCLR_REG      0xDC
+
+#define DAVINCI_RXCPPI_BUFCNT0_REG     0xE0
+#define DAVINCI_RXCPPI_BUFCNT1_REG     0xE4
+#define DAVINCI_RXCPPI_BUFCNT2_REG     0xE8
+#define DAVINCI_RXCPPI_BUFCNT3_REG     0xEC
+
+/* CPPI state RAM entries */
+#define DAVINCI_CPPI_STATERAM_BASE_OFFSET   0x100
+
+#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \
+       (DAVINCI_CPPI_STATERAM_BASE_OFFSET +       ((chnum) * 0x40))
+#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \
+       (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40))
+
+/* CPPI masks */
+#define DAVINCI_DMA_CTRL_ENABLE                1
+#define DAVINCI_DMA_CTRL_DISABLE       0
+
+#define DAVINCI_DMA_ALL_CHANNELS_ENABLE        0xF
+#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF
+
+/* END CPPI-generic (?) */
+
+#define DAVINCI_USB_TX_ENDPTS_MASK     0x1f            /* ep0 + 4 tx */
+#define DAVINCI_USB_RX_ENDPTS_MASK     0x1e            /* 4 rx */
+
+#define DAVINCI_USB_USBINT_SHIFT       16
+#define DAVINCI_USB_TXINT_SHIFT                0
+#define DAVINCI_USB_RXINT_SHIFT                8
+
+#define DAVINCI_INTR_DRVVBUS           0x0100
+
+#define DAVINCI_USB_USBINT_MASK                0x01ff0000      /* 8 Mentor, DRVVBUS */
+#define DAVINCI_USB_TXINT_MASK \
+       (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT)
+#define DAVINCI_USB_RXINT_MASK \
+       (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT)
+
+#define DAVINCI_BASE_OFFSET            0x400
+
+#endif /* __MUSB_HDRDF_H__ */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
new file mode 100644 (file)
index 0000000..462586d
--- /dev/null
@@ -0,0 +1,2266 @@
+/*
+ * MUSB OTG driver core code
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
+ *
+ * This consists of a Host Controller Driver (HCD) and a peripheral
+ * controller driver implementing the "Gadget" API; OTG support is
+ * in the works.  These are normal Linux-USB controller drivers which
+ * use IRQs and have no dedicated thread.
+ *
+ * This version of the driver has only been used with products from
+ * Texas Instruments.  Those products integrate the Inventra logic
+ * with other DMA, IRQ, and bus modules, as well as other logic that
+ * needs to be reflected in this driver.
+ *
+ *
+ * NOTE:  the original Mentor code here was pretty much a collection
+ * of mechanisms that don't seem to have been fully integrated/working
+ * for *any* Linux kernel version.  This version aims at Linux 2.6.now,
+ * Key open issues include:
+ *
+ *  - Lack of host-side transaction scheduling, for all transfer types.
+ *    The hardware doesn't do it; instead, software must.
+ *
+ *    This is not an issue for OTG devices that don't support external
+ *    hubs, but for more "normal" USB hosts it's a user issue that the
+ *    "multipoint" support doesn't scale in the expected ways.  That
+ *    includes DaVinci EVM in a common non-OTG mode.
+ *
+ *      * Control and bulk use dedicated endpoints, and there's as
+ *        yet no mechanism to either (a) reclaim the hardware when
+ *        peripherals are NAKing, which gets complicated with bulk
+ *        endpoints, or (b) use more than a single bulk endpoint in
+ *        each direction.
+ *
+ *        RESULT:  one device may be perceived as blocking another one.
+ *
+ *      * Interrupt and isochronous will dynamically allocate endpoint
+ *        hardware, but (a) there's no record keeping for bandwidth;
+ *        (b) in the common case that few endpoints are available, there
+ *        is no mechanism to reuse endpoints to talk to multiple devices.
+ *
+ *        RESULT:  At one extreme, bandwidth can be overcommitted in
+ *        some hardware configurations, no faults will be reported.
+ *        At the other extreme, the bandwidth capabilities which do
+ *        exist tend to be severely undercommitted.  You can't yet hook
+ *        up both a keyboard and a mouse to an external USB hub.
+ */
+
+/*
+ * This gets many kinds of configuration information:
+ *     - Kconfig for everything user-configurable
+ *     - <asm/arch/hdrc_cnf.h> for SOC or family details
+ *     - platform_device for addressing, irq, and platform_data
+ *     - platform_data is mostly for board-specific informarion
+ *
+ * Most of the conditional compilation will (someday) vanish.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#ifdef CONFIG_ARM
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/mach-types.h>
+#endif
+
+#include "musb_core.h"
+
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include "davinci.h"
+#endif
+
+
+
+#if MUSB_DEBUG > 0
+unsigned debug = MUSB_DEBUG;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "initial debug message level");
+
+#define MUSB_VERSION_SUFFIX    "/dbg"
+#endif
+
+#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
+#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
+
+#define MUSB_VERSION_BASE "6.0"
+
+#ifndef MUSB_VERSION_SUFFIX
+#define MUSB_VERSION_SUFFIX    ""
+#endif
+#define MUSB_VERSION   MUSB_VERSION_BASE MUSB_VERSION_SUFFIX
+
+#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
+
+#define MUSB_DRIVER_NAME "musb_hdrc"
+const char musb_driver_name[] = MUSB_DRIVER_NAME;
+
+MODULE_DESCRIPTION(DRIVER_INFO);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct musb *dev_to_musb(struct device *dev)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       /* usbcore insists dev->driver_data is a "struct hcd *" */
+       return hcd_to_musb(dev_get_drvdata(dev));
+#else
+       return dev_get_drvdata(dev);
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifndef CONFIG_USB_TUSB6010
+/*
+ * Load an endpoint's FIFO
+ */
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+{
+       void __iomem *fifo = hw_ep->fifo;
+
+       prefetch((u8 *)src);
+
+       DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+                       'T', hw_ep->epnum, fifo, len, src);
+
+       /* we can't assume unaligned reads work */
+       if (likely((0x01 & (unsigned long) src) == 0)) {
+               u16     index = 0;
+
+               /* best case is 32bit-aligned source address */
+               if ((0x02 & (unsigned long) src) == 0) {
+                       if (len >= 4) {
+                               writesl(fifo, src + index, len >> 2);
+                               index += len & ~0x03;
+                       }
+                       if (len & 0x02) {
+                               musb_writew(fifo, 0, *(u16 *)&src[index]);
+                               index += 2;
+                       }
+               } else {
+                       if (len >= 2) {
+                               writesw(fifo, src + index, len >> 1);
+                               index += len & ~0x01;
+                       }
+               }
+               if (len & 0x01)
+                       musb_writeb(fifo, 0, src[index]);
+       } else  {
+               /* byte aligned */
+               writesb(fifo, src, len);
+       }
+}
+
+/*
+ * Unload an endpoint's FIFO
+ */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+       void __iomem *fifo = hw_ep->fifo;
+
+       DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+                       'R', hw_ep->epnum, fifo, len, dst);
+
+       /* we can't assume unaligned writes work */
+       if (likely((0x01 & (unsigned long) dst) == 0)) {
+               u16     index = 0;
+
+               /* best case is 32bit-aligned destination address */
+               if ((0x02 & (unsigned long) dst) == 0) {
+                       if (len >= 4) {
+                               readsl(fifo, dst, len >> 2);
+                               index = len & ~0x03;
+                       }
+                       if (len & 0x02) {
+                               *(u16 *)&dst[index] = musb_readw(fifo, 0);
+                               index += 2;
+                       }
+               } else {
+                       if (len >= 2) {
+                               readsw(fifo, dst, len >> 1);
+                               index = len & ~0x01;
+                       }
+               }
+               if (len & 0x01)
+                       dst[index] = musb_readb(fifo, 0);
+       } else  {
+               /* byte aligned */
+               readsb(fifo, dst, len);
+       }
+}
+
+#endif /* normal PIO */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 musb_test_packet[53] = {
+       /* implicit SYNC then DATA0 to start */
+
+       /* JKJKJKJK x9 */
+       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       /* JJKKJJKK x8 */
+       0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+       /* JJJJKKKK x8 */
+       0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+       /* JJJJJJJKKKKKKK x8 */
+       0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+       /* JJJJJJJK x8 */
+       0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+       /* JKKKKKKK x10, JK */
+       0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+
+       /* implicit CRC16 then EOP to end */
+};
+
+void musb_load_testpacket(struct musb *musb)
+{
+       void __iomem    *regs = musb->endpoints[0].regs;
+
+       musb_ep_select(musb->mregs, 0);
+       musb_write_fifo(musb->control_ep,
+                       sizeof(musb_test_packet), musb_test_packet);
+       musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
+}
+
+/*-------------------------------------------------------------------------*/
+
+const char *otg_state_string(struct musb *musb)
+{
+       switch (musb->xceiv.state) {
+       case OTG_STATE_A_IDLE:          return "a_idle";
+       case OTG_STATE_A_WAIT_VRISE:    return "a_wait_vrise";
+       case OTG_STATE_A_WAIT_BCON:     return "a_wait_bcon";
+       case OTG_STATE_A_HOST:          return "a_host";
+       case OTG_STATE_A_SUSPEND:       return "a_suspend";
+       case OTG_STATE_A_PERIPHERAL:    return "a_peripheral";
+       case OTG_STATE_A_WAIT_VFALL:    return "a_wait_vfall";
+       case OTG_STATE_A_VBUS_ERR:      return "a_vbus_err";
+       case OTG_STATE_B_IDLE:          return "b_idle";
+       case OTG_STATE_B_SRP_INIT:      return "b_srp_init";
+       case OTG_STATE_B_PERIPHERAL:    return "b_peripheral";
+       case OTG_STATE_B_WAIT_ACON:     return "b_wait_acon";
+       case OTG_STATE_B_HOST:          return "b_host";
+       default:                        return "UNDEFINED";
+       }
+}
+
+#ifdef CONFIG_USB_MUSB_OTG
+
+/*
+ * See also USB_OTG_1-3.pdf 6.6.5 Timers
+ * REVISIT: Are the other timers done in the hardware?
+ */
+#define TB_ASE0_BRST           100     /* Min 3.125 ms */
+
+/*
+ * Handles OTG hnp timeouts, such as b_ase0_brst
+ */
+void musb_otg_timer_func(unsigned long data)
+{
+       struct musb     *musb = (struct musb *)data;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&musb->lock, flags);
+       switch (musb->xceiv.state) {
+       case OTG_STATE_B_WAIT_ACON:
+               DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
+               musb_g_disconnect(musb);
+               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+               musb->is_active = 0;
+               break;
+       case OTG_STATE_A_WAIT_BCON:
+               DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
+               musb_hnp_stop(musb);
+               break;
+       default:
+               DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
+       }
+       musb->ignore_disconnect = 0;
+       spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
+
+/*
+ * Stops the B-device HNP state. Caller must take care of locking.
+ */
+void musb_hnp_stop(struct musb *musb)
+{
+       struct usb_hcd  *hcd = musb_to_hcd(musb);
+       void __iomem    *mbase = musb->mregs;
+       u8      reg;
+
+       switch (musb->xceiv.state) {
+       case OTG_STATE_A_PERIPHERAL:
+       case OTG_STATE_A_WAIT_VFALL:
+       case OTG_STATE_A_WAIT_BCON:
+               DBG(1, "HNP: Switching back to A-host\n");
+               musb_g_disconnect(musb);
+               musb->xceiv.state = OTG_STATE_A_IDLE;
+               MUSB_HST_MODE(musb);
+               musb->is_active = 0;
+               break;
+       case OTG_STATE_B_HOST:
+               DBG(1, "HNP: Disabling HR\n");
+               hcd->self.is_b_host = 0;
+               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+               MUSB_DEV_MODE(musb);
+               reg = musb_readb(mbase, MUSB_POWER);
+               reg |= MUSB_POWER_SUSPENDM;
+               musb_writeb(mbase, MUSB_POWER, reg);
+               /* REVISIT: Start SESSION_REQUEST here? */
+               break;
+       default:
+               DBG(1, "HNP: Stopping in unknown state %s\n",
+                       otg_state_string(musb));
+       }
+
+       /*
+        * When returning to A state after HNP, avoid hub_port_rebounce(),
+        * which cause occasional OPT A "Did not receive reset after connect"
+        * errors.
+        */
+       musb->port1_status &=
+               ~(1 << USB_PORT_FEAT_C_CONNECTION);
+}
+
+#endif
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+
+#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \
+               | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \
+               | MUSB_INTR_RESET)
+
+static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+                               u8 devctl, u8 power)
+{
+       irqreturn_t handled = IRQ_NONE;
+       void __iomem *mbase = musb->mregs;
+
+       DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
+               int_usb);
+
+       /* in host mode, the peripheral may issue remote wakeup.
+        * in peripheral mode, the host may resume the link.
+        * spurious RESUME irqs happen too, paired with SUSPEND.
+        */
+       if (int_usb & MUSB_INTR_RESUME) {
+               handled = IRQ_HANDLED;
+               DBG(3, "RESUME (%s)\n", otg_state_string(musb));
+
+               if (devctl & MUSB_DEVCTL_HM) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+                       switch (musb->xceiv.state) {
+                       case OTG_STATE_A_SUSPEND:
+                               /* remote wakeup?  later, GetPortStatus
+                                * will stop RESUME signaling
+                                */
+
+                               if (power & MUSB_POWER_SUSPENDM) {
+                                       /* spurious */
+                                       musb->int_usb &= ~MUSB_INTR_SUSPEND;
+                                       DBG(2, "Spurious SUSPENDM\n");
+                                       break;
+                               }
+
+                               power &= ~MUSB_POWER_SUSPENDM;
+                               musb_writeb(mbase, MUSB_POWER,
+                                               power | MUSB_POWER_RESUME);
+
+                               musb->port1_status |=
+                                               (USB_PORT_STAT_C_SUSPEND << 16)
+                                               | MUSB_PORT_STAT_RESUME;
+                               musb->rh_timer = jiffies
+                                               + msecs_to_jiffies(20);
+
+                               musb->xceiv.state = OTG_STATE_A_HOST;
+                               musb->is_active = 1;
+                               usb_hcd_resume_root_hub(musb_to_hcd(musb));
+                               break;
+                       case OTG_STATE_B_WAIT_ACON:
+                               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+                               musb->is_active = 1;
+                               MUSB_DEV_MODE(musb);
+                               break;
+                       default:
+                               WARNING("bogus %s RESUME (%s)\n",
+                                       "host",
+                                       otg_state_string(musb));
+                       }
+#endif
+               } else {
+                       switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+                       case OTG_STATE_A_SUSPEND:
+                               /* possibly DISCONNECT is upcoming */
+                               musb->xceiv.state = OTG_STATE_A_HOST;
+                               usb_hcd_resume_root_hub(musb_to_hcd(musb));
+                               break;
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+                       case OTG_STATE_B_WAIT_ACON:
+                       case OTG_STATE_B_PERIPHERAL:
+                               /* disconnect while suspended?  we may
+                                * not get a disconnect irq...
+                                */
+                               if ((devctl & MUSB_DEVCTL_VBUS)
+                                               != (3 << MUSB_DEVCTL_VBUS_SHIFT)
+                                               ) {
+                                       musb->int_usb |= MUSB_INTR_DISCONNECT;
+                                       musb->int_usb &= ~MUSB_INTR_SUSPEND;
+                                       break;
+                               }
+                               musb_g_resume(musb);
+                               break;
+                       case OTG_STATE_B_IDLE:
+                               musb->int_usb &= ~MUSB_INTR_SUSPEND;
+                               break;
+#endif
+                       default:
+                               WARNING("bogus %s RESUME (%s)\n",
+                                       "peripheral",
+                                       otg_state_string(musb));
+                       }
+               }
+       }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       /* see manual for the order of the tests */
+       if (int_usb & MUSB_INTR_SESSREQ) {
+               DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
+
+               /* IRQ arrives from ID pin sense or (later, if VBUS power
+                * is removed) SRP.  responses are time critical:
+                *  - turn on VBUS (with silicon-specific mechanism)
+                *  - go through A_WAIT_VRISE
+                *  - ... to A_WAIT_BCON.
+                * a_wait_vrise_tmout triggers VBUS_ERROR transitions
+                */
+               musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+               musb->ep0_stage = MUSB_EP0_START;
+               musb->xceiv.state = OTG_STATE_A_IDLE;
+               MUSB_HST_MODE(musb);
+               musb_set_vbus(musb, 1);
+
+               handled = IRQ_HANDLED;
+       }
+
+       if (int_usb & MUSB_INTR_VBUSERROR) {
+               int     ignore = 0;
+
+               /* During connection as an A-Device, we may see a short
+                * current spikes causing voltage drop, because of cable
+                * and peripheral capacitance combined with vbus draw.
+                * (So: less common with truly self-powered devices, where
+                * vbus doesn't act like a power supply.)
+                *
+                * Such spikes are short; usually less than ~500 usec, max
+                * of ~2 msec.  That is, they're not sustained overcurrent
+                * errors, though they're reported using VBUSERROR irqs.
+                *
+                * Workarounds:  (a) hardware: use self powered devices.
+                * (b) software:  ignore non-repeated VBUS errors.
+                *
+                * REVISIT:  do delays from lots of DEBUG_KERNEL checks
+                * make trouble here, keeping VBUS < 4.4V ?
+                */
+               switch (musb->xceiv.state) {
+               case OTG_STATE_A_HOST:
+                       /* recovery is dicey once we've gotten past the
+                        * initial stages of enumeration, but if VBUS
+                        * stayed ok at the other end of the link, and
+                        * another reset is due (at least for high speed,
+                        * to redo the chirp etc), it might work OK...
+                        */
+               case OTG_STATE_A_WAIT_BCON:
+               case OTG_STATE_A_WAIT_VRISE:
+                       if (musb->vbuserr_retry) {
+                               musb->vbuserr_retry--;
+                               ignore = 1;
+                               devctl |= MUSB_DEVCTL_SESSION;
+                               musb_writeb(mbase, MUSB_DEVCTL, devctl);
+                       } else {
+                               musb->port1_status |=
+                                         (1 << USB_PORT_FEAT_OVER_CURRENT)
+                                       | (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+                       }
+                       break;
+               default:
+                       break;
+               }
+
+               DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
+                               otg_state_string(musb),
+                               devctl,
+                               ({ char *s;
+                               switch (devctl & MUSB_DEVCTL_VBUS) {
+                               case 0 << MUSB_DEVCTL_VBUS_SHIFT:
+                                       s = "<SessEnd"; break;
+                               case 1 << MUSB_DEVCTL_VBUS_SHIFT:
+                                       s = "<AValid"; break;
+                               case 2 << MUSB_DEVCTL_VBUS_SHIFT:
+                                       s = "<VBusValid"; break;
+                               /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
+                               default:
+                                       s = "VALID"; break;
+                               }; s; }),
+                               VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
+                               musb->port1_status);
+
+               /* go through A_WAIT_VFALL then start a new session */
+               if (!ignore)
+                       musb_set_vbus(musb, 0);
+               handled = IRQ_HANDLED;
+       }
+
+       if (int_usb & MUSB_INTR_CONNECT) {
+               struct usb_hcd *hcd = musb_to_hcd(musb);
+
+               handled = IRQ_HANDLED;
+               musb->is_active = 1;
+               set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+
+               musb->ep0_stage = MUSB_EP0_START;
+
+#ifdef CONFIG_USB_MUSB_OTG
+               /* flush endpoints when transitioning from Device Mode */
+               if (is_peripheral_active(musb)) {
+                       /* REVISIT HNP; just force disconnect */
+               }
+               musb_writew(mbase, MUSB_INTRTXE, musb->epmask);
+               musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe);
+               musb_writeb(mbase, MUSB_INTRUSBE, 0xf7);
+#endif
+               musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
+                                       |USB_PORT_STAT_HIGH_SPEED
+                                       |USB_PORT_STAT_ENABLE
+                                       );
+               musb->port1_status |= USB_PORT_STAT_CONNECTION
+                                       |(USB_PORT_STAT_C_CONNECTION << 16);
+
+               /* high vs full speed is just a guess until after reset */
+               if (devctl & MUSB_DEVCTL_LSDEV)
+                       musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
+
+               if (hcd->status_urb)
+                       usb_hcd_poll_rh_status(hcd);
+               else
+                       usb_hcd_resume_root_hub(hcd);
+
+               MUSB_HST_MODE(musb);
+
+               /* indicate new connection to OTG machine */
+               switch (musb->xceiv.state) {
+               case OTG_STATE_B_PERIPHERAL:
+                       if (int_usb & MUSB_INTR_SUSPEND) {
+                               DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
+                               musb->xceiv.state = OTG_STATE_B_HOST;
+                               hcd->self.is_b_host = 1;
+                               int_usb &= ~MUSB_INTR_SUSPEND;
+                       } else
+                               DBG(1, "CONNECT as b_peripheral???\n");
+                       break;
+               case OTG_STATE_B_WAIT_ACON:
+                       DBG(1, "HNP: Waiting to switch to b_host state\n");
+                       musb->xceiv.state = OTG_STATE_B_HOST;
+                       hcd->self.is_b_host = 1;
+                       break;
+               default:
+                       if ((devctl & MUSB_DEVCTL_VBUS)
+                                       == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
+                               musb->xceiv.state = OTG_STATE_A_HOST;
+                               hcd->self.is_b_host = 0;
+                       }
+                       break;
+               }
+               DBG(1, "CONNECT (%s) devctl %02x\n",
+                               otg_state_string(musb), devctl);
+       }
+#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+
+       /* mentor saves a bit: bus reset and babble share the same irq.
+        * only host sees babble; only peripheral sees bus reset.
+        */
+       if (int_usb & MUSB_INTR_RESET) {
+               if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
+                       /*
+                        * Looks like non-HS BABBLE can be ignored, but
+                        * HS BABBLE is an error condition. For HS the solution
+                        * is to avoid babble in the first place and fix what
+                        * caused BABBLE. When HS BABBLE happens we can only
+                        * stop the session.
+                        */
+                       if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
+                               DBG(1, "BABBLE devctl: %02x\n", devctl);
+                       else {
+                               ERR("Stopping host session -- babble\n");
+                               musb_writeb(mbase, MUSB_DEVCTL, 0);
+                       }
+               } else if (is_peripheral_capable()) {
+                       DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
+                       switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_OTG
+                       case OTG_STATE_A_SUSPEND:
+                               /* We need to ignore disconnect on suspend
+                                * otherwise tusb 2.0 won't reconnect after a
+                                * power cycle, which breaks otg compliance.
+                                */
+                               musb->ignore_disconnect = 1;
+                               musb_g_reset(musb);
+                               /* FALLTHROUGH */
+                       case OTG_STATE_A_WAIT_BCON:     /* OPT TD.4.7-900ms */
+                               DBG(1, "HNP: Setting timer as %s\n",
+                                               otg_state_string(musb));
+                               musb_otg_timer.data = (unsigned long)musb;
+                               mod_timer(&musb_otg_timer, jiffies
+                                       + msecs_to_jiffies(100));
+                               break;
+                       case OTG_STATE_A_PERIPHERAL:
+                               musb_hnp_stop(musb);
+                               break;
+                       case OTG_STATE_B_WAIT_ACON:
+                               DBG(1, "HNP: RESET (%s), to b_peripheral\n",
+                                       otg_state_string(musb));
+                               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+                               musb_g_reset(musb);
+                               break;
+#endif
+                       case OTG_STATE_B_IDLE:
+                               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+                               /* FALLTHROUGH */
+                       case OTG_STATE_B_PERIPHERAL:
+                               musb_g_reset(musb);
+                               break;
+                       default:
+                               DBG(1, "Unhandled BUS RESET as %s\n",
+                                       otg_state_string(musb));
+                       }
+               }
+
+               handled = IRQ_HANDLED;
+       }
+       schedule_work(&musb->irq_work);
+
+       return handled;
+}
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
+                               u8 devctl, u8 power)
+{
+       irqreturn_t handled = IRQ_NONE;
+
+#if 0
+/* REVISIT ... this would be for multiplexing periodic endpoints, or
+ * supporting transfer phasing to prevent exceeding ISO bandwidth
+ * limits of a given frame or microframe.
+ *
+ * It's not needed for peripheral side, which dedicates endpoints;
+ * though it _might_ use SOF irqs for other purposes.
+ *
+ * And it's not currently needed for host side, which also dedicates
+ * endpoints, relies on TX/RX interval registers, and isn't claimed
+ * to support ISO transfers yet.
+ */
+       if (int_usb & MUSB_INTR_SOF) {
+               void __iomem *mbase = musb->mregs;
+               struct musb_hw_ep       *ep;
+               u8 epnum;
+               u16 frame;
+
+               DBG(6, "START_OF_FRAME\n");
+               handled = IRQ_HANDLED;
+
+               /* start any periodic Tx transfers waiting for current frame */
+               frame = musb_readw(mbase, MUSB_FRAME);
+               ep = musb->endpoints;
+               for (epnum = 1; (epnum < musb->nr_endpoints)
+                                       && (musb->epmask >= (1 << epnum));
+                               epnum++, ep++) {
+                       /*
+                        * FIXME handle framecounter wraps (12 bits)
+                        * eliminate duplicated StartUrb logic
+                        */
+                       if (ep->dwWaitFrame >= frame) {
+                               ep->dwWaitFrame = 0;
+                               pr_debug("SOF --> periodic TX%s on %d\n",
+                                       ep->tx_channel ? " DMA" : "",
+                                       epnum);
+                               if (!ep->tx_channel)
+                                       musb_h_tx_start(musb, epnum);
+                               else
+                                       cppi_hostdma_start(musb, epnum);
+                       }
+               }               /* end of for loop */
+       }
+#endif
+
+       if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
+               DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
+                               otg_state_string(musb),
+                               MUSB_MODE(musb), devctl);
+               handled = IRQ_HANDLED;
+
+               switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+               case OTG_STATE_A_HOST:
+               case OTG_STATE_A_SUSPEND:
+                       musb_root_disconnect(musb);
+                       if (musb->a_wait_bcon != 0)
+                               musb_platform_try_idle(musb, jiffies
+                                       + msecs_to_jiffies(musb->a_wait_bcon));
+                       break;
+#endif /* HOST */
+#ifdef CONFIG_USB_MUSB_OTG
+               case OTG_STATE_B_HOST:
+                       musb_hnp_stop(musb);
+                       break;
+               case OTG_STATE_A_PERIPHERAL:
+                       musb_hnp_stop(musb);
+                       musb_root_disconnect(musb);
+                       /* FALLTHROUGH */
+               case OTG_STATE_B_WAIT_ACON:
+                       /* FALLTHROUGH */
+#endif /* OTG */
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+               case OTG_STATE_B_PERIPHERAL:
+               case OTG_STATE_B_IDLE:
+                       musb_g_disconnect(musb);
+                       break;
+#endif /* GADGET */
+               default:
+                       WARNING("unhandled DISCONNECT transition (%s)\n",
+                               otg_state_string(musb));
+                       break;
+               }
+
+               schedule_work(&musb->irq_work);
+       }
+
+       if (int_usb & MUSB_INTR_SUSPEND) {
+               DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
+                               otg_state_string(musb), devctl, power);
+               handled = IRQ_HANDLED;
+
+               switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_OTG
+               case OTG_STATE_A_PERIPHERAL:
+                       /*
+                        * We cannot stop HNP here, devctl BDEVICE might be
+                        * still set.
+                        */
+                       break;
+#endif
+               case OTG_STATE_B_PERIPHERAL:
+                       musb_g_suspend(musb);
+                       musb->is_active = is_otg_enabled(musb)
+                                       && musb->xceiv.gadget->b_hnp_enable;
+                       if (musb->is_active) {
+#ifdef CONFIG_USB_MUSB_OTG
+                               musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+                               DBG(1, "HNP: Setting timer for b_ase0_brst\n");
+                               musb_otg_timer.data = (unsigned long)musb;
+                               mod_timer(&musb_otg_timer, jiffies
+                                       + msecs_to_jiffies(TB_ASE0_BRST));
+#endif
+                       }
+                       break;
+               case OTG_STATE_A_WAIT_BCON:
+                       if (musb->a_wait_bcon != 0)
+                               musb_platform_try_idle(musb, jiffies
+                                       + msecs_to_jiffies(musb->a_wait_bcon));
+                       break;
+               case OTG_STATE_A_HOST:
+                       musb->xceiv.state = OTG_STATE_A_SUSPEND;
+                       musb->is_active = is_otg_enabled(musb)
+                                       && musb->xceiv.host->b_hnp_enable;
+                       break;
+               case OTG_STATE_B_HOST:
+                       /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+                       DBG(1, "REVISIT: SUSPEND as B_HOST\n");
+                       break;
+               default:
+                       /* "should not happen" */
+                       musb->is_active = 0;
+                       break;
+               }
+               schedule_work(&musb->irq_work);
+       }
+
+
+       return handled;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+* Program the HDRC to start (enable interrupts, dma, etc.).
+*/
+void musb_start(struct musb *musb)
+{
+       void __iomem    *regs = musb->mregs;
+       u8              devctl = musb_readb(regs, MUSB_DEVCTL);
+
+       DBG(2, "<== devctl %02x\n", devctl);
+
+       /*  Set INT enable registers, enable interrupts */
+       musb_writew(regs, MUSB_INTRTXE, musb->epmask);
+       musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe);
+       musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
+
+       musb_writeb(regs, MUSB_TESTMODE, 0);
+
+       /* put into basic highspeed mode and start session */
+       musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
+                                               | MUSB_POWER_SOFTCONN
+                                               | MUSB_POWER_HSENAB
+                                               /* ENSUSPEND wedges tusb */
+                                               /* | MUSB_POWER_ENSUSPEND */
+                                               );
+
+       musb->is_active = 0;
+       devctl = musb_readb(regs, MUSB_DEVCTL);
+       devctl &= ~MUSB_DEVCTL_SESSION;
+
+       if (is_otg_enabled(musb)) {
+               /* session started after:
+                * (a) ID-grounded irq, host mode;
+                * (b) vbus present/connect IRQ, peripheral mode;
+                * (c) peripheral initiates, using SRP
+                */
+               if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+                       musb->is_active = 1;
+               else
+                       devctl |= MUSB_DEVCTL_SESSION;
+
+       } else if (is_host_enabled(musb)) {
+               /* assume ID pin is hard-wired to ground */
+               devctl |= MUSB_DEVCTL_SESSION;
+
+       } else /* peripheral is enabled */ {
+               if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+                       musb->is_active = 1;
+       }
+       musb_platform_enable(musb);
+       musb_writeb(regs, MUSB_DEVCTL, devctl);
+}
+
+
+static void musb_generic_disable(struct musb *musb)
+{
+       void __iomem    *mbase = musb->mregs;
+       u16     temp;
+
+       /* disable interrupts */
+       musb_writeb(mbase, MUSB_INTRUSBE, 0);
+       musb_writew(mbase, MUSB_INTRTXE, 0);
+       musb_writew(mbase, MUSB_INTRRXE, 0);
+
+       /* off */
+       musb_writeb(mbase, MUSB_DEVCTL, 0);
+
+       /*  flush pending interrupts */
+       temp = musb_readb(mbase, MUSB_INTRUSB);
+       temp = musb_readw(mbase, MUSB_INTRTX);
+       temp = musb_readw(mbase, MUSB_INTRRX);
+
+}
+
+/*
+ * Make the HDRC stop (disable interrupts, etc.);
+ * reversible by musb_start
+ * called on gadget driver unregister
+ * with controller locked, irqs blocked
+ * acts as a NOP unless some role activated the hardware
+ */
+void musb_stop(struct musb *musb)
+{
+       /* stop IRQs, timers, ... */
+       musb_platform_disable(musb);
+       musb_generic_disable(musb);
+       DBG(3, "HDRC disabled\n");
+
+       /* FIXME
+        *  - mark host and/or peripheral drivers unusable/inactive
+        *  - disable DMA (and enable it in HdrcStart)
+        *  - make sure we can musb_start() after musb_stop(); with
+        *    OTG mode, gadget driver module rmmod/modprobe cycles that
+        *  - ...
+        */
+       musb_platform_try_idle(musb, 0);
+}
+
+static void musb_shutdown(struct platform_device *pdev)
+{
+       struct musb     *musb = dev_to_musb(&pdev->dev);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&musb->lock, flags);
+       musb_platform_disable(musb);
+       musb_generic_disable(musb);
+       if (musb->clock) {
+               clk_put(musb->clock);
+               musb->clock = NULL;
+       }
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       /* FIXME power down */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The silicon either has hard-wired endpoint configurations, or else
+ * "dynamic fifo" sizing.  The driver has support for both, though at this
+ * writing only the dynamic sizing is very well tested.   We use normal
+ * idioms to so both modes are compile-tested, but dead code elimination
+ * leaves only the relevant one in the object file.
+ *
+ * We don't currently use dynamic fifo setup capability to do anything
+ * more than selecting one of a bunch of predefined configurations.
+ */
+#ifdef MUSB_C_DYNFIFO_DEF
+#define        can_dynfifo()   1
+#else
+#define        can_dynfifo()   0
+#endif
+
+#if defined(CONFIG_USB_TUSB6010) || \
+       defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+static ushort __initdata fifo_mode = 4;
+#else
+static ushort __initdata fifo_mode = 2;
+#endif
+
+/* "modprobe ... fifo_mode=1" etc */
+module_param(fifo_mode, ushort, 0);
+MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
+
+
+#define DYN_FIFO_SIZE (1<<(MUSB_C_RAM_BITS+2))
+
+enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
+enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
+
+struct fifo_cfg {
+       u8              hw_ep_num;
+       enum fifo_style style;
+       enum buf_mode   mode;
+       u16             maxpacket;
+};
+
+/*
+ * tables defining fifo_mode values.  define more if you like.
+ * for host side, make sure both halves of ep1 are set up.
+ */
+
+/* mode 0 - fits in 2KB */
+static struct fifo_cfg __initdata mode_0_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 1 - fits in 4KB */
+static struct fifo_cfg __initdata mode_1_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 2 - fits in 4KB */
+static struct fifo_cfg __initdata mode_2_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 3 - fits in 4KB */
+static struct fifo_cfg __initdata mode_3_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX,   .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 4 - fits in 16KB */
+static struct fifo_cfg __initdata mode_4_cfg[] = {
+{ .hw_ep_num =  1, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  1, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  2, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  2, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  3, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  3, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  4, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  4, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  5, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  5, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  6, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  6, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  7, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  7, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  8, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  8, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num =  9, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num =  9, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 11, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 11, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 12, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 12, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 13, .style = FIFO_TX,   .maxpacket = 512, },
+{ .hw_ep_num = 13, .style = FIFO_RX,   .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
+
+
+/*
+ * configure a fifo; for non-shared endpoints, this may be called
+ * once for a tx fifo and once for an rx fifo.
+ *
+ * returns negative errno or offset for next fifo.
+ */
+static int __init
+fifo_setup(struct musb *musb, struct musb_hw_ep  *hw_ep,
+               const struct fifo_cfg *cfg, u16 offset)
+{
+       void __iomem    *mbase = musb->mregs;
+       int     size = 0;
+       u16     maxpacket = cfg->maxpacket;
+       u16     c_off = offset >> 3;
+       u8      c_size;
+
+       /* expect hw_ep has already been zero-initialized */
+
+       size = ffs(max(maxpacket, (u16) 8)) - 1;
+       maxpacket = 1 << size;
+
+       c_size = size - 3;
+       if (cfg->mode == BUF_DOUBLE) {
+               if ((offset + (maxpacket << 1)) > DYN_FIFO_SIZE)
+                       return -EMSGSIZE;
+               c_size |= MUSB_FIFOSZ_DPB;
+       } else {
+               if ((offset + maxpacket) > DYN_FIFO_SIZE)
+                       return -EMSGSIZE;
+       }
+
+       /* configure the FIFO */
+       musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       /* EP0 reserved endpoint for control, bidirectional;
+        * EP1 reserved for bulk, two unidirection halves.
+        */
+       if (hw_ep->epnum == 1)
+               musb->bulk_ep = hw_ep;
+       /* REVISIT error check:  be sure ep0 can both rx and tx ... */
+#endif
+       switch (cfg->style) {
+       case FIFO_TX:
+               musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+               musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+               hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+               hw_ep->max_packet_sz_tx = maxpacket;
+               break;
+       case FIFO_RX:
+               musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+               musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+               hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+               hw_ep->max_packet_sz_rx = maxpacket;
+               break;
+       case FIFO_RXTX:
+               musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+               musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+               hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+               hw_ep->max_packet_sz_rx = maxpacket;
+
+               musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+               musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+               hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
+               hw_ep->max_packet_sz_tx = maxpacket;
+
+               hw_ep->is_shared_fifo = true;
+               break;
+       }
+
+       /* NOTE rx and tx endpoint irqs aren't managed separately,
+        * which happens to be ok
+        */
+       musb->epmask |= (1 << hw_ep->epnum);
+
+       return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
+}
+
+static struct fifo_cfg __initdata ep0_cfg = {
+       .style = FIFO_RXTX, .maxpacket = 64,
+};
+
+static int __init ep_config_from_table(struct musb *musb)
+{
+       const struct fifo_cfg   *cfg;
+       unsigned                i, n;
+       int                     offset;
+       struct musb_hw_ep       *hw_ep = musb->endpoints;
+
+       switch (fifo_mode) {
+       default:
+               fifo_mode = 0;
+               /* FALLTHROUGH */
+       case 0:
+               cfg = mode_0_cfg;
+               n = ARRAY_SIZE(mode_0_cfg);
+               break;
+       case 1:
+               cfg = mode_1_cfg;
+               n = ARRAY_SIZE(mode_1_cfg);
+               break;
+       case 2:
+               cfg = mode_2_cfg;
+               n = ARRAY_SIZE(mode_2_cfg);
+               break;
+       case 3:
+               cfg = mode_3_cfg;
+               n = ARRAY_SIZE(mode_3_cfg);
+               break;
+       case 4:
+               cfg = mode_4_cfg;
+               n = ARRAY_SIZE(mode_4_cfg);
+               break;
+       }
+
+       printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
+                       musb_driver_name, fifo_mode);
+
+
+       offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
+       /* assert(offset > 0) */
+
+       /* NOTE:  for RTL versions >= 1.400 EPINFO and RAMINFO would
+        * be better than static MUSB_C_NUM_EPS and DYN_FIFO_SIZE...
+        */
+
+       for (i = 0; i < n; i++) {
+               u8      epn = cfg->hw_ep_num;
+
+               if (epn >= MUSB_C_NUM_EPS) {
+                       pr_debug("%s: invalid ep %d\n",
+                                       musb_driver_name, epn);
+                       continue;
+               }
+               offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
+               if (offset < 0) {
+                       pr_debug("%s: mem overrun, ep %d\n",
+                                       musb_driver_name, epn);
+                       return -EINVAL;
+               }
+               epn++;
+               musb->nr_endpoints = max(epn, musb->nr_endpoints);
+       }
+
+       printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n",
+                       musb_driver_name,
+                       n + 1, MUSB_C_NUM_EPS * 2 - 1,
+                       offset, DYN_FIFO_SIZE);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       if (!musb->bulk_ep) {
+               pr_debug("%s: missing bulk\n", musb_driver_name);
+               return -EINVAL;
+       }
+#endif
+
+       return 0;
+}
+
+
+/*
+ * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
+ * @param musb the controller
+ */
+static int __init ep_config_from_hw(struct musb *musb)
+{
+       u8 epnum = 0, reg;
+       struct musb_hw_ep *hw_ep;
+       void *mbase = musb->mregs;
+
+       DBG(2, "<== static silicon ep config\n");
+
+       /* FIXME pick up ep0 maxpacket size */
+
+       for (epnum = 1; epnum < MUSB_C_NUM_EPS; epnum++) {
+               musb_ep_select(mbase, epnum);
+               hw_ep = musb->endpoints + epnum;
+
+               /* read from core using indexed model */
+               reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
+               if (!reg) {
+                       /* 0's returned when no more endpoints */
+                       break;
+               }
+               musb->nr_endpoints++;
+               musb->epmask |= (1 << epnum);
+
+               hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
+
+               /* shared TX/RX FIFO? */
+               if ((reg & 0xf0) == 0xf0) {
+                       hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
+                       hw_ep->is_shared_fifo = true;
+                       continue;
+               } else {
+                       hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
+                       hw_ep->is_shared_fifo = false;
+               }
+
+               /* FIXME set up hw_ep->{rx,tx}_double_buffered */
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+               /* pick an RX/TX endpoint for bulk */
+               if (hw_ep->max_packet_sz_tx < 512
+                               || hw_ep->max_packet_sz_rx < 512)
+                       continue;
+
+               /* REVISIT:  this algorithm is lazy, we should at least
+                * try to pick a double buffered endpoint.
+                */
+               if (musb->bulk_ep)
+                       continue;
+               musb->bulk_ep = hw_ep;
+#endif
+       }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       if (!musb->bulk_ep) {
+               pr_debug("%s: missing bulk\n", musb_driver_name);
+               return -EINVAL;
+       }
+#endif
+
+       return 0;
+}
+
+enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
+
+/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
+ * configure endpoints, or take their config from silicon
+ */
+static int __init musb_core_init(u16 musb_type, struct musb *musb)
+{
+#ifdef MUSB_AHB_ID
+       u32 data;
+#endif
+       u8 reg;
+       char *type;
+       u16 hwvers, rev_major, rev_minor;
+       char aInfo[78], aRevision[32], aDate[12];
+       void __iomem    *mbase = musb->mregs;
+       int             status = 0;
+       int             i;
+
+       /* log core options (read using indexed model) */
+       musb_ep_select(mbase, 0);
+       reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
+
+       strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
+       if (reg & MUSB_CONFIGDATA_DYNFIFO)
+               strcat(aInfo, ", dyn FIFOs");
+       if (reg & MUSB_CONFIGDATA_MPRXE) {
+               strcat(aInfo, ", bulk combine");
+#ifdef C_MP_RX
+               musb->bulk_combine = true;
+#else
+               strcat(aInfo, " (X)");          /* no driver support */
+#endif
+       }
+       if (reg & MUSB_CONFIGDATA_MPTXE) {
+               strcat(aInfo, ", bulk split");
+#ifdef C_MP_TX
+               musb->bulk_split = true;
+#else
+               strcat(aInfo, " (X)");          /* no driver support */
+#endif
+       }
+       if (reg & MUSB_CONFIGDATA_HBRXE) {
+               strcat(aInfo, ", HB-ISO Rx");
+               strcat(aInfo, " (X)");          /* no driver support */
+       }
+       if (reg & MUSB_CONFIGDATA_HBTXE) {
+               strcat(aInfo, ", HB-ISO Tx");
+               strcat(aInfo, " (X)");          /* no driver support */
+       }
+       if (reg & MUSB_CONFIGDATA_SOFTCONE)
+               strcat(aInfo, ", SoftConn");
+
+       printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
+                       musb_driver_name, reg, aInfo);
+
+#ifdef MUSB_AHB_ID
+       data = musb_readl(mbase, 0x404);
+       sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
+               (data >> 16) & 0xff, (data >> 24) & 0xff);
+       /* FIXME ID2 and ID3 are unused */
+       data = musb_readl(mbase, 0x408);
+       printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
+       data = musb_readl(mbase, 0x40c);
+       printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
+       reg = musb_readb(mbase, 0x400);
+       musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
+#else
+       aDate[0] = 0;
+#endif
+       if (MUSB_CONTROLLER_MHDRC == musb_type) {
+               musb->is_multipoint = 1;
+               type = "M";
+       } else {
+               musb->is_multipoint = 0;
+               type = "";
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#ifndef        CONFIG_USB_OTG_BLACKLIST_HUB
+               printk(KERN_ERR
+                       "%s: kernel must blacklist external hubs\n",
+                       musb_driver_name);
+#endif
+#endif
+       }
+
+       /* log release info */
+       hwvers = musb_readw(mbase, MUSB_HWVERS);
+       rev_major = (hwvers >> 10) & 0x1f;
+       rev_minor = hwvers & 0x3ff;
+       snprintf(aRevision, 32, "%d.%d%s", rev_major,
+               rev_minor, (hwvers & 0x8000) ? "RC" : "");
+       printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
+                       musb_driver_name, type, aRevision, aDate);
+
+       /* configure ep0 */
+       musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
+       musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+
+       /* discover endpoint configuration */
+       musb->nr_endpoints = 1;
+       musb->epmask = 1;
+
+       if (reg & MUSB_CONFIGDATA_DYNFIFO) {
+               if (can_dynfifo())
+                       status = ep_config_from_table(musb);
+               else {
+                       ERR("reconfigure software for Dynamic FIFOs\n");
+                       status = -ENODEV;
+               }
+       } else {
+               if (!can_dynfifo())
+                       status = ep_config_from_hw(musb);
+               else {
+                       ERR("reconfigure software for static FIFOs\n");
+                       return -ENODEV;
+               }
+       }
+
+       if (status < 0)
+               return status;
+
+       /* finish init, and print endpoint config */
+       for (i = 0; i < musb->nr_endpoints; i++) {
+               struct musb_hw_ep       *hw_ep = musb->endpoints + i;
+
+               hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
+#ifdef CONFIG_USB_TUSB6010
+               hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
+               hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
+               hw_ep->fifo_sync_va =
+                       musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i);
+
+               if (i == 0)
+                       hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
+               else
+                       hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2);
+#endif
+
+               hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+               hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase;
+               hw_ep->rx_reinit = 1;
+               hw_ep->tx_reinit = 1;
+#endif
+
+               if (hw_ep->max_packet_sz_tx) {
+                       printk(KERN_DEBUG
+                               "%s: hw_ep %d%s, %smax %d\n",
+                               musb_driver_name, i,
+                               hw_ep->is_shared_fifo ? "shared" : "tx",
+                               hw_ep->tx_double_buffered
+                                       ? "doublebuffer, " : "",
+                               hw_ep->max_packet_sz_tx);
+               }
+               if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
+                       printk(KERN_DEBUG
+                               "%s: hw_ep %d%s, %smax %d\n",
+                               musb_driver_name, i,
+                               "rx",
+                               hw_ep->rx_double_buffered
+                                       ? "doublebuffer, " : "",
+                               hw_ep->max_packet_sz_rx);
+               }
+               if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
+                       DBG(1, "hw_ep %d not configured\n", i);
+       }
+
+       return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+       unsigned long   flags;
+       irqreturn_t     retval = IRQ_NONE;
+       struct musb     *musb = __hci;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+       musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+       musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+       if (musb->int_usb || musb->int_tx || musb->int_rx)
+               retval = musb_interrupt(musb);
+
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       /* REVISIT we sometimes get spurious IRQs on g_ep0
+        * not clear why...
+        */
+       if (retval != IRQ_HANDLED)
+               DBG(5, "spurious?\n");
+
+       return IRQ_HANDLED;
+}
+
+#else
+#define generic_interrupt      NULL
+#endif
+
+/*
+ * handle all the irqs defined by the HDRC core. for now we expect:  other
+ * irq sources (phy, dma, etc) will be handled first, musb->int_* values
+ * will be assigned, and the irq will already have been acked.
+ *
+ * called in irq context with spinlock held, irqs blocked
+ */
+irqreturn_t musb_interrupt(struct musb *musb)
+{
+       irqreturn_t     retval = IRQ_NONE;
+       u8              devctl, power;
+       int             ep_num;
+       u32             reg;
+
+       devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+       power = musb_readb(musb->mregs, MUSB_POWER);
+
+       DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n",
+               (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
+               musb->int_usb, musb->int_tx, musb->int_rx);
+
+       /* the core can interrupt us for multiple reasons; docs have
+        * a generic interrupt flowchart to follow
+        */
+       if (musb->int_usb & STAGE0_MASK)
+               retval |= musb_stage0_irq(musb, musb->int_usb,
+                               devctl, power);
+
+       /* "stage 1" is handling endpoint irqs */
+
+       /* handle endpoint 0 first */
+       if (musb->int_tx & 1) {
+               if (devctl & MUSB_DEVCTL_HM)
+                       retval |= musb_h_ep0_irq(musb);
+               else
+                       retval |= musb_g_ep0_irq(musb);
+       }
+
+       /* RX on endpoints 1-15 */
+       reg = musb->int_rx >> 1;
+       ep_num = 1;
+       while (reg) {
+               if (reg & 1) {
+                       /* musb_ep_select(musb->mregs, ep_num); */
+                       /* REVISIT just retval = ep->rx_irq(...) */
+                       retval = IRQ_HANDLED;
+                       if (devctl & MUSB_DEVCTL_HM) {
+                               if (is_host_capable())
+                                       musb_host_rx(musb, ep_num);
+                       } else {
+                               if (is_peripheral_capable())
+                                       musb_g_rx(musb, ep_num);
+                       }
+               }
+
+               reg >>= 1;
+               ep_num++;
+       }
+
+       /* TX on endpoints 1-15 */
+       reg = musb->int_tx >> 1;
+       ep_num = 1;
+       while (reg) {
+               if (reg & 1) {
+                       /* musb_ep_select(musb->mregs, ep_num); */
+                       /* REVISIT just retval |= ep->tx_irq(...) */
+                       retval = IRQ_HANDLED;
+                       if (devctl & MUSB_DEVCTL_HM) {
+                               if (is_host_capable())
+                                       musb_host_tx(musb, ep_num);
+                       } else {
+                               if (is_peripheral_capable())
+                                       musb_g_tx(musb, ep_num);
+                       }
+               }
+               reg >>= 1;
+               ep_num++;
+       }
+
+       /* finish handling "global" interrupts after handling fifos */
+       if (musb->int_usb)
+               retval |= musb_stage2_irq(musb,
+                               musb->int_usb, devctl, power);
+
+       return retval;
+}
+
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static int __initdata use_dma = 1;
+
+/* "modprobe ... use_dma=0" etc */
+module_param(use_dma, bool, 0);
+MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
+
+void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
+{
+       u8      devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+       /* called with controller lock already held */
+
+       if (!epnum) {
+#ifndef CONFIG_USB_TUSB_OMAP_DMA
+               if (!is_cppi_enabled()) {
+                       /* endpoint 0 */
+                       if (devctl & MUSB_DEVCTL_HM)
+                               musb_h_ep0_irq(musb);
+                       else
+                               musb_g_ep0_irq(musb);
+               }
+#endif
+       } else {
+               /* endpoints 1..15 */
+               if (transmit) {
+                       if (devctl & MUSB_DEVCTL_HM) {
+                               if (is_host_capable())
+                                       musb_host_tx(musb, epnum);
+                       } else {
+                               if (is_peripheral_capable())
+                                       musb_g_tx(musb, epnum);
+                       }
+               } else {
+                       /* receive */
+                       if (devctl & MUSB_DEVCTL_HM) {
+                               if (is_host_capable())
+                                       musb_host_rx(musb, epnum);
+                       } else {
+                               if (is_peripheral_capable())
+                                       musb_g_rx(musb, epnum);
+                       }
+               }
+       }
+}
+
+#else
+#define use_dma                        0
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_SYSFS
+
+static ssize_t
+musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct musb *musb = dev_to_musb(dev);
+       unsigned long flags;
+       int ret = -EINVAL;
+
+       spin_lock_irqsave(&musb->lock, flags);
+       ret = sprintf(buf, "%s\n", otg_state_string(musb));
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       return ret;
+}
+
+static ssize_t
+musb_mode_store(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t n)
+{
+       struct musb     *musb = dev_to_musb(dev);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&musb->lock, flags);
+       if (!strncmp(buf, "host", 4))
+               musb_platform_set_mode(musb, MUSB_HOST);
+       if (!strncmp(buf, "peripheral", 10))
+               musb_platform_set_mode(musb, MUSB_PERIPHERAL);
+       if (!strncmp(buf, "otg", 3))
+               musb_platform_set_mode(musb, MUSB_OTG);
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       return n;
+}
+static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
+
+static ssize_t
+musb_vbus_store(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t n)
+{
+       struct musb     *musb = dev_to_musb(dev);
+       unsigned long   flags;
+       unsigned long   val;
+
+       if (sscanf(buf, "%lu", &val) < 1) {
+               printk(KERN_ERR "Invalid VBUS timeout ms value\n");
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&musb->lock, flags);
+       musb->a_wait_bcon = val;
+       if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
+               musb->is_active = 0;
+       musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       return n;
+}
+
+static ssize_t
+musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct musb     *musb = dev_to_musb(dev);
+       unsigned long   flags;
+       unsigned long   val;
+       int             vbus;
+
+       spin_lock_irqsave(&musb->lock, flags);
+       val = musb->a_wait_bcon;
+       vbus = musb_platform_get_vbus_status(musb);
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       return sprintf(buf, "Vbus %s, timeout %lu\n",
+                       vbus ? "on" : "off", val);
+}
+static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+/* Gadget drivers can't know that a host is connected so they might want
+ * to start SRP, but users can.  This allows userspace to trigger SRP.
+ */
+static ssize_t
+musb_srp_store(struct device *dev, struct device_attribute *attr,
+               const char *buf, size_t n)
+{
+       struct musb     *musb = dev_to_musb(dev);
+       unsigned short  srp;
+
+       if (sscanf(buf, "%hu", &srp) != 1
+                       || (srp != 1)) {
+               printk(KERN_ERR "SRP: Value must be 1\n");
+               return -EINVAL;
+       }
+
+       if (srp == 1)
+               musb_g_wakeup(musb);
+
+       return n;
+}
+static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
+
+#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+
+#endif /* sysfs */
+
+/* Only used to provide driver mode change events */
+static void musb_irq_work(struct work_struct *data)
+{
+       struct musb *musb = container_of(data, struct musb, irq_work);
+       static int old_state;
+
+       if (musb->xceiv.state != old_state) {
+               old_state = musb->xceiv.state;
+               sysfs_notify(&musb->controller->kobj, NULL, "mode");
+       }
+}
+
+/* --------------------------------------------------------------------------
+ * Init support
+ */
+
+static struct musb *__init
+allocate_instance(struct device *dev, void __iomem *mbase)
+{
+       struct musb             *musb;
+       struct musb_hw_ep       *ep;
+       int                     epnum;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       struct usb_hcd  *hcd;
+
+       hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id);
+       if (!hcd)
+               return NULL;
+       /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+
+       musb = hcd_to_musb(hcd);
+       INIT_LIST_HEAD(&musb->control);
+       INIT_LIST_HEAD(&musb->in_bulk);
+       INIT_LIST_HEAD(&musb->out_bulk);
+
+       hcd->uses_new_polling = 1;
+
+       musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+#else
+       musb = kzalloc(sizeof *musb, GFP_KERNEL);
+       if (!musb)
+               return NULL;
+       dev_set_drvdata(dev, musb);
+
+#endif
+
+       musb->mregs = mbase;
+       musb->ctrl_base = mbase;
+       musb->nIrq = -ENODEV;
+       for (epnum = 0, ep = musb->endpoints;
+                       epnum < MUSB_C_NUM_EPS;
+                       epnum++, ep++) {
+
+               ep->musb = musb;
+               ep->epnum = epnum;
+       }
+
+       musb->controller = dev;
+       return musb;
+}
+
+static void musb_free(struct musb *musb)
+{
+       /* this has multiple entry modes. it handles fault cleanup after
+        * probe(), where things may be partially set up, as well as rmmod
+        * cleanup after everything's been de-activated.
+        */
+
+#ifdef CONFIG_SYSFS
+       device_remove_file(musb->controller, &dev_attr_mode);
+       device_remove_file(musb->controller, &dev_attr_vbus);
+#ifdef CONFIG_USB_MUSB_OTG
+       device_remove_file(musb->controller, &dev_attr_srp);
+#endif
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+       musb_gadget_cleanup(musb);
+#endif
+
+       if (musb->nIrq >= 0) {
+               disable_irq_wake(musb->nIrq);
+               free_irq(musb->nIrq, musb);
+       }
+       if (is_dma_capable() && musb->dma_controller) {
+               struct dma_controller   *c = musb->dma_controller;
+
+               (void) c->stop(c);
+               dma_controller_destroy(c);
+       }
+
+       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+       musb_platform_exit(musb);
+       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
+       if (musb->clock) {
+               clk_disable(musb->clock);
+               clk_put(musb->clock);
+       }
+
+#ifdef CONFIG_USB_MUSB_OTG
+       put_device(musb->xceiv.dev);
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       usb_put_hcd(musb_to_hcd(musb));
+#else
+       kfree(musb);
+#endif
+}
+
+/*
+ * Perform generic per-controller initialization.
+ *
+ * @pDevice: the controller (already clocked, etc)
+ * @nIrq: irq
+ * @mregs: virtual address of controller registers,
+ *     not yet corrected for platform-specific offsets
+ */
+static int __init
+musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+{
+       int                     status;
+       struct musb             *musb;
+       struct musb_hdrc_platform_data *plat = dev->platform_data;
+
+       /* The driver might handle more features than the board; OK.
+        * Fail when the board needs a feature that's not enabled.
+        */
+       if (!plat) {
+               dev_dbg(dev, "no platform_data?\n");
+               return -ENODEV;
+       }
+       switch (plat->mode) {
+       case MUSB_HOST:
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+               break;
+#else
+               goto bad_config;
+#endif
+       case MUSB_PERIPHERAL:
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+               break;
+#else
+               goto bad_config;
+#endif
+       case MUSB_OTG:
+#ifdef CONFIG_USB_MUSB_OTG
+               break;
+#else
+bad_config:
+#endif
+       default:
+               dev_err(dev, "incompatible Kconfig role setting\n");
+               return -EINVAL;
+       }
+
+       /* allocate */
+       musb = allocate_instance(dev, ctrl);
+       if (!musb)
+               return -ENOMEM;
+
+       spin_lock_init(&musb->lock);
+       musb->board_mode = plat->mode;
+       musb->board_set_power = plat->set_power;
+       musb->set_clock = plat->set_clock;
+       musb->min_power = plat->min_power;
+
+       /* Clock usage is chip-specific ... functional clock (DaVinci,
+        * OMAP2430), or PHY ref (some TUSB6010 boards).  All this core
+        * code does is make sure a clock handle is available; platform
+        * code manages it during start/stop and suspend/resume.
+        */
+       if (plat->clock) {
+               musb->clock = clk_get(dev, plat->clock);
+               if (IS_ERR(musb->clock)) {
+                       status = PTR_ERR(musb->clock);
+                       musb->clock = NULL;
+                       goto fail;
+               }
+       }
+
+       /* assume vbus is off */
+
+       /* platform adjusts musb->mregs and musb->isr if needed,
+        * and activates clocks
+        */
+       musb->isr = generic_interrupt;
+       status = musb_platform_init(musb);
+
+       if (status < 0)
+               goto fail;
+       if (!musb->isr) {
+               status = -ENODEV;
+               goto fail2;
+       }
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+       if (use_dma && dev->dma_mask) {
+               struct dma_controller   *c;
+
+               c = dma_controller_create(musb, musb->mregs);
+               musb->dma_controller = c;
+               if (c)
+                       (void) c->start(c);
+       }
+#endif
+       /* ideally this would be abstracted in platform setup */
+       if (!is_dma_capable() || !musb->dma_controller)
+               dev->dma_mask = NULL;
+
+       /* be sure interrupts are disabled before connecting ISR */
+       musb_platform_disable(musb);
+       musb_generic_disable(musb);
+
+       /* setup musb parts of the core (especially endpoints) */
+       status = musb_core_init(plat->multipoint
+                       ? MUSB_CONTROLLER_MHDRC
+                       : MUSB_CONTROLLER_HDRC, musb);
+       if (status < 0)
+               goto fail2;
+
+       /* Init IRQ workqueue before request_irq */
+       INIT_WORK(&musb->irq_work, musb_irq_work);
+
+       /* attach to the IRQ */
+       if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) {
+               dev_err(dev, "request_irq %d failed!\n", nIrq);
+               status = -ENODEV;
+               goto fail2;
+       }
+       musb->nIrq = nIrq;
+/* FIXME this handles wakeup irqs wrong */
+       if (enable_irq_wake(nIrq) == 0)
+               device_init_wakeup(dev, 1);
+
+       pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
+                       musb_driver_name,
+                       ({char *s;
+                       switch (musb->board_mode) {
+                       case MUSB_HOST:         s = "Host"; break;
+                       case MUSB_PERIPHERAL:   s = "Peripheral"; break;
+                       default:                s = "OTG"; break;
+                       }; s; }),
+                       ctrl,
+                       (is_dma_capable() && musb->dma_controller)
+                               ? "DMA" : "PIO",
+                       musb->nIrq);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       /* host side needs more setup, except for no-host modes */
+       if (musb->board_mode != MUSB_PERIPHERAL) {
+               struct usb_hcd  *hcd = musb_to_hcd(musb);
+
+               if (musb->board_mode == MUSB_OTG)
+                       hcd->self.otg_port = 1;
+               musb->xceiv.host = &hcd->self;
+               hcd->power_budget = 2 * (plat->power ? : 250);
+       }
+#endif                         /* CONFIG_USB_MUSB_HDRC_HCD */
+
+       /* For the host-only role, we can activate right away.
+        * (We expect the ID pin to be forcibly grounded!!)
+        * Otherwise, wait till the gadget driver hooks up.
+        */
+       if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
+               MUSB_HST_MODE(musb);
+               musb->xceiv.default_a = 1;
+               musb->xceiv.state = OTG_STATE_A_IDLE;
+
+               status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+
+               DBG(1, "%s mode, status %d, devctl %02x %c\n",
+                       "HOST", status,
+                       musb_readb(musb->mregs, MUSB_DEVCTL),
+                       (musb_readb(musb->mregs, MUSB_DEVCTL)
+                                       & MUSB_DEVCTL_BDEVICE
+                               ? 'B' : 'A'));
+
+       } else /* peripheral is enabled */ {
+               MUSB_DEV_MODE(musb);
+               musb->xceiv.default_a = 0;
+               musb->xceiv.state = OTG_STATE_B_IDLE;
+
+               status = musb_gadget_setup(musb);
+
+               DBG(1, "%s mode, status %d, dev%02x\n",
+                       is_otg_enabled(musb) ? "OTG" : "PERIPHERAL",
+                       status,
+                       musb_readb(musb->mregs, MUSB_DEVCTL));
+
+       }
+
+       if (status == 0)
+               musb_debug_create("driver/musb_hdrc", musb);
+       else {
+fail:
+               if (musb->clock)
+                       clk_put(musb->clock);
+               device_init_wakeup(dev, 0);
+               musb_free(musb);
+               return status;
+       }
+
+#ifdef CONFIG_SYSFS
+       status = device_create_file(dev, &dev_attr_mode);
+       status = device_create_file(dev, &dev_attr_vbus);
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+       status = device_create_file(dev, &dev_attr_srp);
+#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+       status = 0;
+#endif
+
+       return status;
+
+fail2:
+       musb_platform_exit(musb);
+       goto fail;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
+ * bridge to a platform device; this driver then suffices.
+ */
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static u64     *orig_dma_mask;
+#endif
+
+static int __init musb_probe(struct platform_device *pdev)
+{
+       struct device   *dev = &pdev->dev;
+       int             irq = platform_get_irq(pdev, 0);
+       struct resource *iomem;
+       void __iomem    *base;
+
+       iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!iomem || irq == 0)
+               return -ENODEV;
+
+       base = ioremap(iomem->start, iomem->end - iomem->start + 1);
+       if (!base) {
+               dev_err(dev, "ioremap failed\n");
+               return -ENOMEM;
+       }
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+       /* clobbered by use_dma=n */
+       orig_dma_mask = dev->dma_mask;
+#endif
+       return musb_init_controller(dev, irq, base);
+}
+
+static int __devexit musb_remove(struct platform_device *pdev)
+{
+       struct musb     *musb = dev_to_musb(&pdev->dev);
+       void __iomem    *ctrl_base = musb->ctrl_base;
+
+       /* this gets called on rmmod.
+        *  - Host mode: host may still be active
+        *  - Peripheral mode: peripheral is deactivated (or never-activated)
+        *  - OTG mode: both roles are deactivated (or never-activated)
+        */
+       musb_shutdown(pdev);
+       musb_debug_delete("driver/musb_hdrc", musb);
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       if (musb->board_mode == MUSB_HOST)
+               usb_remove_hcd(musb_to_hcd(musb));
+#endif
+       musb_free(musb);
+       iounmap(ctrl_base);
+       device_init_wakeup(&pdev->dev, 0);
+#ifndef CONFIG_MUSB_PIO_ONLY
+       pdev->dev.dma_mask = orig_dma_mask;
+#endif
+       return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int musb_suspend(struct platform_device *pdev, pm_message_t message)
+{
+       unsigned long   flags;
+       struct musb     *musb = dev_to_musb(&pdev->dev);
+
+       if (!musb->clock)
+               return 0;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       if (is_peripheral_active(musb)) {
+               /* FIXME force disconnect unless we know USB will wake
+                * the system up quickly enough to respond ...
+                */
+       } else if (is_host_active(musb)) {
+               /* we know all the children are suspended; sometimes
+                * they will even be wakeup-enabled.
+                */
+       }
+
+       if (musb->set_clock)
+               musb->set_clock(musb->clock, 0);
+       else
+               clk_disable(musb->clock);
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return 0;
+}
+
+static int musb_resume(struct platform_device *pdev)
+{
+       unsigned long   flags;
+       struct musb     *musb = dev_to_musb(&pdev->dev);
+
+       if (!musb->clock)
+               return 0;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       if (musb->set_clock)
+               musb->set_clock(musb->clock, 1);
+       else
+               clk_enable(musb->clock);
+
+       /* for static cmos like DaVinci, register values were preserved
+        * unless for some reason the whole soc powered down and we're
+        * not treating that as a whole-system restart (e.g. swsusp)
+        */
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return 0;
+}
+
+#else
+#define        musb_suspend    NULL
+#define        musb_resume     NULL
+#endif
+
+static struct platform_driver musb_driver = {
+       .driver = {
+               .name           = (char *)musb_driver_name,
+               .bus            = &platform_bus_type,
+               .owner          = THIS_MODULE,
+       },
+       .remove         = __devexit_p(musb_remove),
+       .shutdown       = musb_shutdown,
+       .suspend        = musb_suspend,
+       .resume         = musb_resume,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init musb_init(void)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       if (usb_disabled())
+               return 0;
+#endif
+
+       pr_info("%s: version " MUSB_VERSION ", "
+#ifdef CONFIG_MUSB_PIO_ONLY
+               "pio"
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+               "cppi-dma"
+#elif defined(CONFIG_USB_INVENTRA_DMA)
+               "musb-dma"
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+               "tusb-omap-dma"
+#else
+               "?dma?"
+#endif
+               ", "
+#ifdef CONFIG_USB_MUSB_OTG
+               "otg (peripheral+host)"
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+               "peripheral"
+#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+               "host"
+#endif
+               ", debug=%d\n",
+               musb_driver_name, debug);
+       return platform_driver_probe(&musb_driver, musb_probe);
+}
+
+/* make us init after usbcore and before usb
+ * gadget and host-side drivers start to register
+ */
+subsys_initcall(musb_init);
+
+static void __exit musb_cleanup(void)
+{
+       platform_driver_unregister(&musb_driver);
+}
+module_exit(musb_cleanup);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
new file mode 100644 (file)
index 0000000..90035c1
--- /dev/null
@@ -0,0 +1,517 @@
+/*
+ * MUSB OTG driver defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_CORE_H__
+#define __MUSB_CORE_H__
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/musb.h>
+
+struct musb;
+struct musb_hw_ep;
+struct musb_ep;
+
+
+#include "musb_debug.h"
+#include "musb_dma.h"
+
+#ifdef CONFIG_USB_MUSB_SOC
+/*
+ * Get core configuration from a header converted (by cfg_conv)
+ * from the Verilog config file generated by the core config utility
+ *
+ * For now we assume that header is provided along with other
+ * arch-specific files.  Discrete chips will need a build tweak.
+ * So will using AHB IDs from silicon that provides them.
+ */
+#include <asm/arch/hdrc_cnf.h>
+#endif
+
+#include "musb_io.h"
+#include "musb_regs.h"
+
+#include "musb_gadget.h"
+#include "../core/hcd.h"
+#include "musb_host.h"
+
+
+
+#ifdef CONFIG_USB_MUSB_OTG
+
+#define        is_peripheral_enabled(musb)     ((musb)->board_mode != MUSB_HOST)
+#define        is_host_enabled(musb)           ((musb)->board_mode != MUSB_PERIPHERAL)
+#define        is_otg_enabled(musb)            ((musb)->board_mode == MUSB_OTG)
+
+/* NOTE:  otg and peripheral-only state machines start at B_IDLE.
+ * OTG or host-only go to A_IDLE when ID is sensed.
+ */
+#define is_peripheral_active(m)                (!(m)->is_host)
+#define is_host_active(m)              ((m)->is_host)
+
+#else
+#define        is_peripheral_enabled(musb)     is_peripheral_capable()
+#define        is_host_enabled(musb)           is_host_capable()
+#define        is_otg_enabled(musb)            0
+
+#define        is_peripheral_active(musb)      is_peripheral_capable()
+#define        is_host_active(musb)            is_host_capable()
+#endif
+
+#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL)
+/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always
+ * override that choice selection (often USB_GADGET_DUMMY_HCD).
+ */
+#ifndef CONFIG_USB_GADGET_MUSB_HDRC
+#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC
+#endif
+#endif /* need MUSB gadget selection */
+
+
+#ifdef CONFIG_PROC_FS
+#include <linux/fs.h>
+#define MUSB_CONFIG_PROC_FS
+#endif
+
+/****************************** PERIPHERAL ROLE *****************************/
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+#define        is_peripheral_capable() (1)
+
+extern irqreturn_t musb_g_ep0_irq(struct musb *);
+extern void musb_g_tx(struct musb *, u8);
+extern void musb_g_rx(struct musb *, u8);
+extern void musb_g_reset(struct musb *);
+extern void musb_g_suspend(struct musb *);
+extern void musb_g_resume(struct musb *);
+extern void musb_g_wakeup(struct musb *);
+extern void musb_g_disconnect(struct musb *);
+
+#else
+
+#define        is_peripheral_capable() (0)
+
+static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_g_reset(struct musb *m) {}
+static inline void musb_g_suspend(struct musb *m) {}
+static inline void musb_g_resume(struct musb *m) {}
+static inline void musb_g_wakeup(struct musb *m) {}
+static inline void musb_g_disconnect(struct musb *m) {}
+
+#endif
+
+/****************************** HOST ROLE ***********************************/
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+#define        is_host_capable()       (1)
+
+extern irqreturn_t musb_h_ep0_irq(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
+
+#else
+
+#define        is_host_capable()       (0)
+
+static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_host_tx(struct musb *m, u8 e) {}
+static inline void musb_host_rx(struct musb *m, u8 e) {}
+
+#endif
+
+
+/****************************** CONSTANTS ********************************/
+
+#ifndef MUSB_C_NUM_EPS
+#define MUSB_C_NUM_EPS ((u8)16)
+#endif
+
+#ifndef MUSB_MAX_END0_PACKET
+#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE)
+#endif
+
+/* host side ep0 states */
+enum musb_h_ep0_state {
+       MUSB_EP0_IDLE,
+       MUSB_EP0_START,                 /* expect ack of setup */
+       MUSB_EP0_IN,                    /* expect IN DATA */
+       MUSB_EP0_OUT,                   /* expect ack of OUT DATA */
+       MUSB_EP0_STATUS,                /* expect ack of STATUS */
+} __attribute__ ((packed));
+
+/* peripheral side ep0 states */
+enum musb_g_ep0_state {
+       MUSB_EP0_STAGE_SETUP,           /* idle, waiting for setup */
+       MUSB_EP0_STAGE_TX,              /* IN data */
+       MUSB_EP0_STAGE_RX,              /* OUT data */
+       MUSB_EP0_STAGE_STATUSIN,        /* (after OUT data) */
+       MUSB_EP0_STAGE_STATUSOUT,       /* (after IN data) */
+       MUSB_EP0_STAGE_ACKWAIT,         /* after zlp, before statusin */
+} __attribute__ ((packed));
+
+/* OTG protocol constants */
+#define OTG_TIME_A_WAIT_VRISE  100             /* msec (max) */
+#define OTG_TIME_A_WAIT_BCON   0               /* 0=infinite; min 1000 msec */
+#define OTG_TIME_A_IDLE_BDIS   200             /* msec (min) */
+
+/*************************** REGISTER ACCESS ********************************/
+
+/* Endpoint registers (other than dynfifo setup) can be accessed either
+ * directly with the "flat" model, or after setting up an index register.
+ */
+
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
+               || defined(CONFIG_ARCH_OMAP3430)
+/* REVISIT indexed access seemed to
+ * misbehave (on DaVinci) for at least peripheral IN ...
+ */
+#define        MUSB_FLAT_REG
+#endif
+
+/* TUSB mapping: "flat" plus ep0 special cases */
+#if    defined(CONFIG_USB_TUSB6010)
+#define musb_ep_select(_mbase, _epnum) \
+       musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+#define        MUSB_EP_OFFSET                  MUSB_TUSB_OFFSET
+
+/* "flat" mapping: each endpoint has its own i/o address */
+#elif  defined(MUSB_FLAT_REG)
+#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum)))
+#define        MUSB_EP_OFFSET                  MUSB_FLAT_OFFSET
+
+/* "indexed" mapping: INDEX register controls register bank select */
+#else
+#define musb_ep_select(_mbase, _epnum) \
+       musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+#define        MUSB_EP_OFFSET                  MUSB_INDEXED_OFFSET
+#endif
+
+/****************************** FUNCTIONS ********************************/
+
+#define MUSB_HST_MODE(_musb)\
+       { (_musb)->is_host = true; }
+#define MUSB_DEV_MODE(_musb) \
+       { (_musb)->is_host = false; }
+
+#define test_devctl_hst_mode(_x) \
+       (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
+
+#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral")
+
+/******************************** TYPES *************************************/
+
+/*
+ * struct musb_hw_ep - endpoint hardware (bidirectional)
+ *
+ * Ordered slightly for better cacheline locality.
+ */
+struct musb_hw_ep {
+       struct musb             *musb;
+       void __iomem            *fifo;
+       void __iomem            *regs;
+
+#ifdef CONFIG_USB_TUSB6010
+       void __iomem            *conf;
+#endif
+
+       /* index in musb->endpoints[]  */
+       u8                      epnum;
+
+       /* hardware configuration, possibly dynamic */
+       bool                    is_shared_fifo;
+       bool                    tx_double_buffered;
+       bool                    rx_double_buffered;
+       u16                     max_packet_sz_tx;
+       u16                     max_packet_sz_rx;
+
+       struct dma_channel      *tx_channel;
+       struct dma_channel      *rx_channel;
+
+#ifdef CONFIG_USB_TUSB6010
+       /* TUSB has "asynchronous" and "synchronous" dma modes */
+       dma_addr_t              fifo_async;
+       dma_addr_t              fifo_sync;
+       void __iomem            *fifo_sync_va;
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       void __iomem            *target_regs;
+
+       /* currently scheduled peripheral endpoint */
+       struct musb_qh          *in_qh;
+       struct musb_qh          *out_qh;
+
+       u8                      rx_reinit;
+       u8                      tx_reinit;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+       /* peripheral side */
+       struct musb_ep          ep_in;                  /* TX */
+       struct musb_ep          ep_out;                 /* RX */
+#endif
+};
+
+static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+       return next_request(&hw_ep->ep_in);
+#else
+       return NULL;
+#endif
+}
+
+static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+       return next_request(&hw_ep->ep_out);
+#else
+       return NULL;
+#endif
+}
+
+/*
+ * struct musb - Driver instance data.
+ */
+struct musb {
+       /* device lock */
+       spinlock_t              lock;
+       struct clk              *clock;
+       irqreturn_t             (*isr)(int, void *);
+       struct work_struct      irq_work;
+
+/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
+#define MUSB_PORT_STAT_RESUME  (1 << 31)
+
+       u32                     port1_status;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       unsigned long           rh_timer;
+
+       enum musb_h_ep0_state   ep0_stage;
+
+       /* bulk traffic normally dedicates endpoint hardware, and each
+        * direction has its own ring of host side endpoints.
+        * we try to progress the transfer at the head of each endpoint's
+        * queue until it completes or NAKs too much; then we try the next
+        * endpoint.
+        */
+       struct musb_hw_ep       *bulk_ep;
+
+       struct list_head        control;        /* of musb_qh */
+       struct list_head        in_bulk;        /* of musb_qh */
+       struct list_head        out_bulk;       /* of musb_qh */
+       struct musb_qh          *periodic[32];  /* tree of interrupt+iso */
+#endif
+
+       /* called with IRQs blocked; ON/nonzero implies starting a session,
+        * and waiting at least a_wait_vrise_tmout.
+        */
+       void                    (*board_set_vbus)(struct musb *, int is_on);
+
+       struct dma_controller   *dma_controller;
+
+       struct device           *controller;
+       void __iomem            *ctrl_base;
+       void __iomem            *mregs;
+
+#ifdef CONFIG_USB_TUSB6010
+       dma_addr_t              async;
+       dma_addr_t              sync;
+       void __iomem            *sync_va;
+#endif
+
+       /* passed down from chip/board specific irq handlers */
+       u8                      int_usb;
+       u16                     int_rx;
+       u16                     int_tx;
+
+       struct otg_transceiver  xceiv;
+
+       int nIrq;
+
+       struct musb_hw_ep        endpoints[MUSB_C_NUM_EPS];
+#define control_ep             endpoints
+
+#define VBUSERR_RETRY_COUNT    3
+       u16                     vbuserr_retry;
+       u16 epmask;
+       u8 nr_endpoints;
+
+       u8 board_mode;          /* enum musb_mode */
+       int                     (*board_set_power)(int state);
+
+       int                     (*set_clock)(struct clk *clk, int is_active);
+
+       u8                      min_power;      /* vbus for periph, in mA/2 */
+
+       bool                    is_host;
+
+       int                     a_wait_bcon;    /* VBUS timeout in msecs */
+       unsigned long           idle_timeout;   /* Next timeout in jiffies */
+
+       /* active means connected and not suspended */
+       unsigned                is_active:1;
+
+       unsigned is_multipoint:1;
+       unsigned ignore_disconnect:1;   /* during bus resets */
+
+#ifdef C_MP_TX
+       unsigned bulk_split:1;
+#define        can_bulk_split(musb,type) \
+               (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
+#else
+#define        can_bulk_split(musb, type)      0
+#endif
+
+#ifdef C_MP_RX
+       unsigned bulk_combine:1;
+#define        can_bulk_combine(musb,type) \
+               (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
+#else
+#define        can_bulk_combine(musb, type)    0
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+       /* is_suspended means USB B_PERIPHERAL suspend */
+       unsigned                is_suspended:1;
+
+       /* may_wakeup means remote wakeup is enabled */
+       unsigned                may_wakeup:1;
+
+       /* is_self_powered is reported in device status and the
+        * config descriptor.  is_bus_powered means B_PERIPHERAL
+        * draws some VBUS current; both can be true.
+        */
+       unsigned                is_self_powered:1;
+       unsigned                is_bus_powered:1;
+
+       unsigned                set_address:1;
+       unsigned                test_mode:1;
+       unsigned                softconnect:1;
+
+       u8                      address;
+       u8                      test_mode_nr;
+       u16                     ackpend;                /* ep0 */
+       enum musb_g_ep0_state   ep0_state;
+       struct usb_gadget       g;                      /* the gadget */
+       struct usb_gadget_driver *gadget_driver;        /* its driver */
+#endif
+
+#ifdef MUSB_CONFIG_PROC_FS
+       struct proc_dir_entry *proc_entry;
+#endif
+};
+
+static inline void musb_set_vbus(struct musb *musb, int is_on)
+{
+       musb->board_set_vbus(musb, is_on);
+}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static inline struct musb *gadget_to_musb(struct usb_gadget *g)
+{
+       return container_of(g, struct musb, g);
+}
+#endif
+
+
+/***************************** Glue it together *****************************/
+
+extern const char musb_driver_name[];
+
+extern void musb_start(struct musb *musb);
+extern void musb_stop(struct musb *musb);
+
+extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
+extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
+
+extern void musb_load_testpacket(struct musb *);
+
+extern irqreturn_t musb_interrupt(struct musb *);
+
+extern void musb_platform_enable(struct musb *musb);
+extern void musb_platform_disable(struct musb *musb);
+
+extern void musb_hnp_stop(struct musb *musb);
+
+extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode);
+
+#if defined(CONFIG_USB_TUSB6010) || \
+       defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
+#else
+#define musb_platform_try_idle(x, y)           do {} while (0)
+#endif
+
+#ifdef CONFIG_USB_TUSB6010
+extern int musb_platform_get_vbus_status(struct musb *musb);
+#else
+#define musb_platform_get_vbus_status(x)       0
+#endif
+
+extern int __init musb_platform_init(struct musb *musb);
+extern int musb_platform_exit(struct musb *musb);
+
+/*-------------------------- ProcFS definitions ---------------------*/
+
+struct proc_dir_entry;
+
+#if (MUSB_DEBUG > 0) && defined(MUSB_CONFIG_PROC_FS)
+extern struct proc_dir_entry *musb_debug_create(char *name, struct musb *data);
+extern void musb_debug_delete(char *name, struct musb *data);
+
+#else
+static inline struct proc_dir_entry *
+musb_debug_create(char *name, struct musb *data)
+{
+       return NULL;
+}
+static inline void musb_debug_delete(char *name, struct musb *data)
+{
+}
+#endif
+
+#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
new file mode 100644 (file)
index 0000000..3bdb311
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * MUSB OTG driver debug defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_LINUX_DEBUG_H__
+#define __MUSB_LINUX_DEBUG_H__
+
+#define yprintk(facility, format, args...) \
+       do { printk(facility "%s %d: " format , \
+       __func__, __LINE__ , ## args); } while (0)
+#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args)
+#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
+#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
+
+#define xprintk(level, facility, format, args...) do { \
+       if (_dbg_level(level)) { \
+               printk(facility "%s %d: " format , \
+                               __func__, __LINE__ , ## args); \
+       } } while (0)
+
+#if MUSB_DEBUG > 0
+extern unsigned debug;
+#else
+#define debug  0
+#endif
+
+static inline int _dbg_level(unsigned l)
+{
+       return debug >= l;
+}
+
+#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args)
+
+extern const char *otg_state_string(struct musb *);
+
+#endif                         /*  __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
new file mode 100644 (file)
index 0000000..0a2c4e3
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * MUSB OTG driver DMA controller abstraction
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_DMA_H__
+#define __MUSB_DMA_H__
+
+struct musb_hw_ep;
+
+/*
+ * DMA Controller Abstraction
+ *
+ * DMA Controllers are abstracted to allow use of a variety of different
+ * implementations of DMA, as allowed by the Inventra USB cores.  On the
+ * host side, usbcore sets up the DMA mappings and flushes caches; on the
+ * peripheral side, the gadget controller driver does.  Responsibilities
+ * of a DMA controller driver include:
+ *
+ *  - Handling the details of moving multiple USB packets
+ *    in cooperation with the Inventra USB core, including especially
+ *    the correct RX side treatment of short packets and buffer-full
+ *    states (both of which terminate transfers).
+ *
+ *  - Knowing the correlation between dma channels and the
+ *    Inventra core's local endpoint resources and data direction.
+ *
+ *  - Maintaining a list of allocated/available channels.
+ *
+ *  - Updating channel status on interrupts,
+ *    whether shared with the Inventra core or separate.
+ */
+
+#define        DMA_ADDR_INVALID        (~(dma_addr_t)0)
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+#define        is_dma_capable()        (1)
+#else
+#define        is_dma_capable()        (0)
+#endif
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+#define        is_cppi_enabled()       1
+#else
+#define        is_cppi_enabled()       0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap()                        1
+#else
+#define tusb_dma_omap()                        0
+#endif
+
+/*
+ * DMA channel status ... updated by the dma controller driver whenever that
+ * status changes, and protected by the overall controller spinlock.
+ */
+enum dma_channel_status {
+       /* unallocated */
+       MUSB_DMA_STATUS_UNKNOWN,
+       /* allocated ... but not busy, no errors */
+       MUSB_DMA_STATUS_FREE,
+       /* busy ... transactions are active */
+       MUSB_DMA_STATUS_BUSY,
+       /* transaction(s) aborted due to ... dma or memory bus error */
+       MUSB_DMA_STATUS_BUS_ABORT,
+       /* transaction(s) aborted due to ... core error or USB fault */
+       MUSB_DMA_STATUS_CORE_ABORT
+};
+
+struct dma_controller;
+
+/**
+ * struct dma_channel - A DMA channel.
+ * @private_data: channel-private data
+ * @max_len: the maximum number of bytes the channel can move in one
+ *     transaction (typically representing many USB maximum-sized packets)
+ * @actual_len: how many bytes have been transferred
+ * @status: current channel status (updated e.g. on interrupt)
+ * @desired_mode: true if mode 1 is desired; false if mode 0 is desired
+ *
+ * channels are associated with an endpoint for the duration of at least
+ * one usb transfer.
+ */
+struct dma_channel {
+       void                    *private_data;
+       /* FIXME not void* private_data, but a dma_controller * */
+       size_t                  max_len;
+       size_t                  actual_len;
+       enum dma_channel_status status;
+       bool                    desired_mode;
+};
+
+/*
+ * dma_channel_status - return status of dma channel
+ * @c: the channel
+ *
+ * Returns the software's view of the channel status.  If that status is BUSY
+ * then it's possible that the hardware has completed (or aborted) a transfer,
+ * so the driver needs to update that status.
+ */
+static inline enum dma_channel_status
+dma_channel_status(struct dma_channel *c)
+{
+       return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/**
+ * struct dma_controller - A DMA Controller.
+ * @start: call this to start a DMA controller;
+ *     return 0 on success, else negative errno
+ * @stop: call this to stop a DMA controller
+ *     return 0 on success, else negative errno
+ * @channel_alloc: call this to allocate a DMA channel
+ * @channel_release: call this to release a DMA channel
+ * @channel_abort: call this to abort a pending DMA transaction,
+ *     returning it to FREE (but allocated) state
+ *
+ * Controllers manage dma channels.
+ */
+struct dma_controller {
+       int                     (*start)(struct dma_controller *);
+       int                     (*stop)(struct dma_controller *);
+       struct dma_channel      *(*channel_alloc)(struct dma_controller *,
+                                       struct musb_hw_ep *, u8 is_tx);
+       void                    (*channel_release)(struct dma_channel *);
+       int                     (*channel_program)(struct dma_channel *channel,
+                                                       u16 maxpacket, u8 mode,
+                                                       dma_addr_t dma_addr,
+                                                       u32 length);
+       int                     (*channel_abort)(struct dma_channel *);
+};
+
+/* called after channel_program(), may indicate a fault */
+extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
+
+
+extern struct dma_controller *__init
+dma_controller_create(struct musb *, void __iomem *);
+
+extern void dma_controller_destroy(struct dma_controller *);
+
+#endif /* __MUSB_DMA_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
new file mode 100644 (file)
index 0000000..b3773f1
--- /dev/null
@@ -0,0 +1,2033 @@
+/*
+ * MUSB OTG driver peripheral support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/stat.h>
+#include <linux/dma-mapping.h>
+
+#include "musb_core.h"
+
+
+/* MUSB PERIPHERAL status 3-mar-2006:
+ *
+ * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
+ *   Minor glitches:
+ *
+ *     + remote wakeup to Linux hosts work, but saw USBCV failures;
+ *       in one test run (operator error?)
+ *     + endpoint halt tests -- in both usbtest and usbcv -- seem
+ *       to break when dma is enabled ... is something wrongly
+ *       clearing SENDSTALL?
+ *
+ * - Mass storage behaved ok when last tested.  Network traffic patterns
+ *   (with lots of short transfers etc) need retesting; they turn up the
+ *   worst cases of the DMA, since short packets are typical but are not
+ *   required.
+ *
+ * - TX/IN
+ *     + both pio and dma behave in with network and g_zero tests
+ *     + no cppi throughput issues other than no-hw-queueing
+ *     + failed with FLAT_REG (DaVinci)
+ *     + seems to behave with double buffering, PIO -and- CPPI
+ *     + with gadgetfs + AIO, requests got lost?
+ *
+ * - RX/OUT
+ *     + both pio and dma behave in with network and g_zero tests
+ *     + dma is slow in typical case (short_not_ok is clear)
+ *     + double buffering ok with PIO
+ *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
+ *     + request lossage observed with gadgetfs
+ *
+ * - ISO not tested ... might work, but only weakly isochronous
+ *
+ * - Gadget driver disabling of softconnect during bind() is ignored; so
+ *   drivers can't hold off host requests until userspace is ready.
+ *   (Workaround:  they can turn it off later.)
+ *
+ * - PORTABILITY (assumes PIO works):
+ *     + DaVinci, basically works with cppi dma
+ *     + OMAP 2430, ditto with mentor dma
+ *     + TUSB 6010, platform-specific dma in the works
+ */
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Immediately complete a request.
+ *
+ * @param request the request to complete
+ * @param status the status to complete the request with
+ * Context: controller locked, IRQs blocked.
+ */
+void musb_g_giveback(
+       struct musb_ep          *ep,
+       struct usb_request      *request,
+       int                     status)
+__releases(ep->musb->lock)
+__acquires(ep->musb->lock)
+{
+       struct musb_request     *req;
+       struct musb             *musb;
+       int                     busy = ep->busy;
+
+       req = to_musb_request(request);
+
+       list_del(&request->list);
+       if (req->request.status == -EINPROGRESS)
+               req->request.status = status;
+       musb = req->musb;
+
+       ep->busy = 1;
+       spin_unlock(&musb->lock);
+       if (is_dma_capable()) {
+               if (req->mapped) {
+                       dma_unmap_single(musb->controller,
+                                       req->request.dma,
+                                       req->request.length,
+                                       req->tx
+                                               ? DMA_TO_DEVICE
+                                               : DMA_FROM_DEVICE);
+                       req->request.dma = DMA_ADDR_INVALID;
+                       req->mapped = 0;
+               } else if (req->request.dma != DMA_ADDR_INVALID)
+                       dma_sync_single_for_cpu(musb->controller,
+                                       req->request.dma,
+                                       req->request.length,
+                                       req->tx
+                                               ? DMA_TO_DEVICE
+                                               : DMA_FROM_DEVICE);
+       }
+       if (request->status == 0)
+               DBG(5, "%s done request %p,  %d/%d\n",
+                               ep->end_point.name, request,
+                               req->request.actual, req->request.length);
+       else
+               DBG(2, "%s request %p, %d/%d fault %d\n",
+                               ep->end_point.name, request,
+                               req->request.actual, req->request.length,
+                               request->status);
+       req->request.complete(&req->ep->end_point, &req->request);
+       spin_lock(&musb->lock);
+       ep->busy = busy;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Abort requests queued to an endpoint using the status. Synchronous.
+ * caller locked controller and blocked irqs, and selected this ep.
+ */
+static void nuke(struct musb_ep *ep, const int status)
+{
+       struct musb_request     *req = NULL;
+       void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
+
+       ep->busy = 1;
+
+       if (is_dma_capable() && ep->dma) {
+               struct dma_controller   *c = ep->musb->dma_controller;
+               int value;
+               if (ep->is_in) {
+                       musb_writew(epio, MUSB_TXCSR,
+                                       0 | MUSB_TXCSR_FLUSHFIFO);
+                       musb_writew(epio, MUSB_TXCSR,
+                                       0 | MUSB_TXCSR_FLUSHFIFO);
+               } else {
+                       musb_writew(epio, MUSB_RXCSR,
+                                       0 | MUSB_RXCSR_FLUSHFIFO);
+                       musb_writew(epio, MUSB_RXCSR,
+                                       0 | MUSB_RXCSR_FLUSHFIFO);
+               }
+
+               value = c->channel_abort(ep->dma);
+               DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
+               c->channel_release(ep->dma);
+               ep->dma = NULL;
+       }
+
+       while (!list_empty(&(ep->req_list))) {
+               req = container_of(ep->req_list.next, struct musb_request,
+                               request.list);
+               musb_g_giveback(ep, &req->request, status);
+       }
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* Data transfers - pure PIO, pure DMA, or mixed mode */
+
+/*
+ * This assumes the separate CPPI engine is responding to DMA requests
+ * from the usb core ... sequenced a bit differently from mentor dma.
+ */
+
+static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
+{
+       if (can_bulk_split(musb, ep->type))
+               return ep->hw_ep->max_packet_sz_tx;
+       else
+               return ep->packet_sz;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral tx (IN) using Mentor DMA works as follows:
+       Only mode 0 is used for transfers <= wPktSize,
+       mode 1 is used for larger transfers,
+
+       One of the following happens:
+       - Host sends IN token which causes an endpoint interrupt
+               -> TxAvail
+                       -> if DMA is currently busy, exit.
+                       -> if queue is non-empty, txstate().
+
+       - Request is queued by the gadget driver.
+               -> if queue was previously empty, txstate()
+
+       txstate()
+               -> start
+                 /\    -> setup DMA
+                 |     (data is transferred to the FIFO, then sent out when
+                 |     IN token(s) are recd from Host.
+                 |             -> DMA interrupt on completion
+                 |                calls TxAvail.
+                 |                   -> stop DMA, ~DmaEenab,
+                 |                   -> set TxPktRdy for last short pkt or zlp
+                 |                   -> Complete Request
+                 |                   -> Continue next request (call txstate)
+                 |___________________________________|
+
+ * Non-Mentor DMA engines can of course work differently, such as by
+ * upleveling from irq-per-packet to irq-per-buffer.
+ */
+
+#endif
+
+/*
+ * An endpoint is transmitting data. This can be called either from
+ * the IRQ routine or from ep.queue() to kickstart a request on an
+ * endpoint.
+ *
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void txstate(struct musb *musb, struct musb_request *req)
+{
+       u8                      epnum = req->epnum;
+       struct musb_ep          *musb_ep;
+       void __iomem            *epio = musb->endpoints[epnum].regs;
+       struct usb_request      *request;
+       u16                     fifo_count = 0, csr;
+       int                     use_dma = 0;
+
+       musb_ep = req->ep;
+
+       /* we shouldn't get here while DMA is active ... but we do ... */
+       if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
+               DBG(4, "dma pending...\n");
+               return;
+       }
+
+       /* read TXCSR before */
+       csr = musb_readw(epio, MUSB_TXCSR);
+
+       request = &req->request;
+       fifo_count = min(max_ep_writesize(musb, musb_ep),
+                       (int)(request->length - request->actual));
+
+       if (csr & MUSB_TXCSR_TXPKTRDY) {
+               DBG(5, "%s old packet still ready , txcsr %03x\n",
+                               musb_ep->end_point.name, csr);
+               return;
+       }
+
+       if (csr & MUSB_TXCSR_P_SENDSTALL) {
+               DBG(5, "%s stalling, txcsr %03x\n",
+                               musb_ep->end_point.name, csr);
+               return;
+       }
+
+       DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
+                       epnum, musb_ep->packet_sz, fifo_count,
+                       csr);
+
+#ifndef        CONFIG_MUSB_PIO_ONLY
+       if (is_dma_capable() && musb_ep->dma) {
+               struct dma_controller   *c = musb->dma_controller;
+
+               use_dma = (request->dma != DMA_ADDR_INVALID);
+
+               /* MUSB_TXCSR_P_ISO is still set correctly */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+               {
+                       size_t request_size;
+
+                       /* setup DMA, then program endpoint CSR */
+                       request_size = min(request->length,
+                                               musb_ep->dma->max_len);
+                       if (request_size <= musb_ep->packet_sz)
+                               musb_ep->dma->desired_mode = 0;
+                       else
+                               musb_ep->dma->desired_mode = 1;
+
+                       use_dma = use_dma && c->channel_program(
+                                       musb_ep->dma, musb_ep->packet_sz,
+                                       musb_ep->dma->desired_mode,
+                                       request->dma, request_size);
+                       if (use_dma) {
+                               if (musb_ep->dma->desired_mode == 0) {
+                                       /* ASSERT: DMAENAB is clear */
+                                       csr &= ~(MUSB_TXCSR_AUTOSET |
+                                                       MUSB_TXCSR_DMAMODE);
+                                       csr |= (MUSB_TXCSR_DMAENAB |
+                                                       MUSB_TXCSR_MODE);
+                                       /* against programming guide */
+                               } else
+                                       csr |= (MUSB_TXCSR_AUTOSET
+                                                       | MUSB_TXCSR_DMAENAB
+                                                       | MUSB_TXCSR_DMAMODE
+                                                       | MUSB_TXCSR_MODE);
+
+                               csr &= ~MUSB_TXCSR_P_UNDERRUN;
+                               musb_writew(epio, MUSB_TXCSR, csr);
+                       }
+               }
+
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+               /* program endpoint CSR first, then setup DMA */
+               csr &= ~(MUSB_TXCSR_AUTOSET
+                               | MUSB_TXCSR_DMAMODE
+                               | MUSB_TXCSR_P_UNDERRUN
+                               | MUSB_TXCSR_TXPKTRDY);
+               csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
+               musb_writew(epio, MUSB_TXCSR,
+                       (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
+                               | csr);
+
+               /* ensure writebuffer is empty */
+               csr = musb_readw(epio, MUSB_TXCSR);
+
+               /* NOTE host side sets DMAENAB later than this; both are
+                * OK since the transfer dma glue (between CPPI and Mentor
+                * fifos) just tells CPPI it could start.  Data only moves
+                * to the USB TX fifo when both fifos are ready.
+                */
+
+               /* "mode" is irrelevant here; handle terminating ZLPs like
+                * PIO does, since the hardware RNDIS mode seems unreliable
+                * except for the last-packet-is-already-short case.
+                */
+               use_dma = use_dma && c->channel_program(
+                               musb_ep->dma, musb_ep->packet_sz,
+                               0,
+                               request->dma,
+                               request->length);
+               if (!use_dma) {
+                       c->channel_release(musb_ep->dma);
+                       musb_ep->dma = NULL;
+                       /* ASSERT: DMAENAB clear */
+                       csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+                       /* invariant: prequest->buf is non-null */
+               }
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+               use_dma = use_dma && c->channel_program(
+                               musb_ep->dma, musb_ep->packet_sz,
+                               request->zero,
+                               request->dma,
+                               request->length);
+#endif
+       }
+#endif
+
+       if (!use_dma) {
+               musb_write_fifo(musb_ep->hw_ep, fifo_count,
+                               (u8 *) (request->buf + request->actual));
+               request->actual += fifo_count;
+               csr |= MUSB_TXCSR_TXPKTRDY;
+               csr &= ~MUSB_TXCSR_P_UNDERRUN;
+               musb_writew(epio, MUSB_TXCSR, csr);
+       }
+
+       /* host may already have the data when this message shows... */
+       DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
+                       musb_ep->end_point.name, use_dma ? "dma" : "pio",
+                       request->actual, request->length,
+                       musb_readw(epio, MUSB_TXCSR),
+                       fifo_count,
+                       musb_readw(epio, MUSB_TXMAXP));
+}
+
+/*
+ * FIFO state update (e.g. data ready).
+ * Called from IRQ,  with controller locked.
+ */
+void musb_g_tx(struct musb *musb, u8 epnum)
+{
+       u16                     csr;
+       struct usb_request      *request;
+       u8 __iomem              *mbase = musb->mregs;
+       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_in;
+       void __iomem            *epio = musb->endpoints[epnum].regs;
+       struct dma_channel      *dma;
+
+       musb_ep_select(mbase, epnum);
+       request = next_request(musb_ep);
+
+       csr = musb_readw(epio, MUSB_TXCSR);
+       DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
+
+       dma = is_dma_capable() ? musb_ep->dma : NULL;
+       do {
+               /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
+                * probably rates reporting as a host error
+                */
+               if (csr & MUSB_TXCSR_P_SENTSTALL) {
+                       csr |= MUSB_TXCSR_P_WZC_BITS;
+                       csr &= ~MUSB_TXCSR_P_SENTSTALL;
+                       musb_writew(epio, MUSB_TXCSR, csr);
+                       if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+                               dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+                               musb->dma_controller->channel_abort(dma);
+                       }
+
+                       if (request)
+                               musb_g_giveback(musb_ep, request, -EPIPE);
+
+                       break;
+               }
+
+               if (csr & MUSB_TXCSR_P_UNDERRUN) {
+                       /* we NAKed, no big deal ... little reason to care */
+                       csr |= MUSB_TXCSR_P_WZC_BITS;
+                       csr &= ~(MUSB_TXCSR_P_UNDERRUN
+                                       | MUSB_TXCSR_TXPKTRDY);
+                       musb_writew(epio, MUSB_TXCSR, csr);
+                       DBG(20, "underrun on ep%d, req %p\n", epnum, request);
+               }
+
+               if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+                       /* SHOULD NOT HAPPEN ... has with cppi though, after
+                        * changing SENDSTALL (and other cases); harmless?
+                        */
+                       DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
+                       break;
+               }
+
+               if (request) {
+                       u8      is_dma = 0;
+
+                       if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+                               is_dma = 1;
+                               csr |= MUSB_TXCSR_P_WZC_BITS;
+                               csr &= ~(MUSB_TXCSR_DMAENAB
+                                               | MUSB_TXCSR_P_UNDERRUN
+                                               | MUSB_TXCSR_TXPKTRDY);
+                               musb_writew(epio, MUSB_TXCSR, csr);
+                               /* ensure writebuffer is empty */
+                               csr = musb_readw(epio, MUSB_TXCSR);
+                               request->actual += musb_ep->dma->actual_len;
+                               DBG(4, "TXCSR%d %04x, dma off, "
+                                               "len %zu, req %p\n",
+                                       epnum, csr,
+                                       musb_ep->dma->actual_len,
+                                       request);
+                       }
+
+                       if (is_dma || request->actual == request->length) {
+
+                               /* First, maybe a terminating short packet.
+                                * Some DMA engines might handle this by
+                                * themselves.
+                                */
+                               if ((request->zero
+                                               && request->length
+                                               && (request->length
+                                                       % musb_ep->packet_sz)
+                                                       == 0)
+#ifdef CONFIG_USB_INVENTRA_DMA
+                                       || (is_dma &&
+                                               ((!dma->desired_mode) ||
+                                                   (request->actual &
+                                                   (musb_ep->packet_sz - 1))))
+#endif
+                               ) {
+                                       /* on dma completion, fifo may not
+                                        * be available yet ...
+                                        */
+                                       if (csr & MUSB_TXCSR_TXPKTRDY)
+                                               break;
+
+                                       DBG(4, "sending zero pkt\n");
+                                       musb_writew(epio, MUSB_TXCSR,
+                                                       MUSB_TXCSR_MODE
+                                                       | MUSB_TXCSR_TXPKTRDY);
+                                       request->zero = 0;
+                               }
+
+                               /* ... or if not, then complete it */
+                               musb_g_giveback(musb_ep, request, 0);
+
+                               /* kickstart next transfer if appropriate;
+                                * the packet that just completed might not
+                                * be transmitted for hours or days.
+                                * REVISIT for double buffering...
+                                * FIXME revisit for stalls too...
+                                */
+                               musb_ep_select(mbase, epnum);
+                               csr = musb_readw(epio, MUSB_TXCSR);
+                               if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+                                       break;
+                               request = musb_ep->desc
+                                               ? next_request(musb_ep)
+                                               : NULL;
+                               if (!request) {
+                                       DBG(4, "%s idle now\n",
+                                               musb_ep->end_point.name);
+                                       break;
+                               }
+                       }
+
+                       txstate(musb, to_musb_request(request));
+               }
+
+       } while (0);
+}
+
+/* ------------------------------------------------------------ */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral rx (OUT) using Mentor DMA works as follows:
+       - Only mode 0 is used.
+
+       - Request is queued by the gadget class driver.
+               -> if queue was previously empty, rxstate()
+
+       - Host sends OUT token which causes an endpoint interrupt
+         /\      -> RxReady
+         |           -> if request queued, call rxstate
+         |             /\      -> setup DMA
+         |             |            -> DMA interrupt on completion
+         |             |               -> RxReady
+         |             |                     -> stop DMA
+         |             |                     -> ack the read
+         |             |                     -> if data recd = max expected
+         |             |                               by the request, or host
+         |             |                               sent a short packet,
+         |             |                               complete the request,
+         |             |                               and start the next one.
+         |             |_____________________________________|
+         |                                      else just wait for the host
+         |                                         to send the next OUT token.
+         |__________________________________________________|
+
+ * Non-Mentor DMA engines can of course work differently.
+ */
+
+#endif
+
+/*
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void rxstate(struct musb *musb, struct musb_request *req)
+{
+       u16                     csr = 0;
+       const u8                epnum = req->epnum;
+       struct usb_request      *request = &req->request;
+       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_out;
+       void __iomem            *epio = musb->endpoints[epnum].regs;
+       u16                     fifo_count = 0;
+       u16                     len = musb_ep->packet_sz;
+
+       csr = musb_readw(epio, MUSB_RXCSR);
+
+       if (is_cppi_enabled() && musb_ep->dma) {
+               struct dma_controller   *c = musb->dma_controller;
+               struct dma_channel      *channel = musb_ep->dma;
+
+               /* NOTE:  CPPI won't actually stop advancing the DMA
+                * queue after short packet transfers, so this is almost
+                * always going to run as IRQ-per-packet DMA so that
+                * faults will be handled correctly.
+                */
+               if (c->channel_program(channel,
+                               musb_ep->packet_sz,
+                               !request->short_not_ok,
+                               request->dma + request->actual,
+                               request->length - request->actual)) {
+
+                       /* make sure that if an rxpkt arrived after the irq,
+                        * the cppi engine will be ready to take it as soon
+                        * as DMA is enabled
+                        */
+                       csr &= ~(MUSB_RXCSR_AUTOCLEAR
+                                       | MUSB_RXCSR_DMAMODE);
+                       csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
+                       musb_writew(epio, MUSB_RXCSR, csr);
+                       return;
+               }
+       }
+
+       if (csr & MUSB_RXCSR_RXPKTRDY) {
+               len = musb_readw(epio, MUSB_RXCOUNT);
+               if (request->actual < request->length) {
+#ifdef CONFIG_USB_INVENTRA_DMA
+                       if (is_dma_capable() && musb_ep->dma) {
+                               struct dma_controller   *c;
+                               struct dma_channel      *channel;
+                               int                     use_dma = 0;
+
+                               c = musb->dma_controller;
+                               channel = musb_ep->dma;
+
+       /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
+        * mode 0 only. So we do not get endpoint interrupts due to DMA
+        * completion. We only get interrupts from DMA controller.
+        *
+        * We could operate in DMA mode 1 if we knew the size of the tranfer
+        * in advance. For mass storage class, request->length = what the host
+        * sends, so that'd work.  But for pretty much everything else,
+        * request->length is routinely more than what the host sends. For
+        * most these gadgets, end of is signified either by a short packet,
+        * or filling the last byte of the buffer.  (Sending extra data in
+        * that last pckate should trigger an overflow fault.)  But in mode 1,
+        * we don't get DMA completion interrrupt for short packets.
+        *
+        * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
+        * to get endpoint interrupt on every DMA req, but that didn't seem
+        * to work reliably.
+        *
+        * REVISIT an updated g_file_storage can set req->short_not_ok, which
+        * then becomes usable as a runtime "use mode 1" hint...
+        */
+
+                               csr |= MUSB_RXCSR_DMAENAB;
+#ifdef USE_MODE1
+                               csr |= MUSB_RXCSR_AUTOCLEAR;
+                               /* csr |= MUSB_RXCSR_DMAMODE; */
+
+                               /* this special sequence (enabling and then
+                                * disabling MUSB_RXCSR_DMAMODE) is required
+                                * to get DMAReq to activate
+                                */
+                               musb_writew(epio, MUSB_RXCSR,
+                                       csr | MUSB_RXCSR_DMAMODE);
+#endif
+                               musb_writew(epio, MUSB_RXCSR, csr);
+
+                               if (request->actual < request->length) {
+                                       int transfer_size = 0;
+#ifdef USE_MODE1
+                                       transfer_size = min(request->length,
+                                                       channel->max_len);
+#else
+                                       transfer_size = len;
+#endif
+                                       if (transfer_size <= musb_ep->packet_sz)
+                                               musb_ep->dma->desired_mode = 0;
+                                       else
+                                               musb_ep->dma->desired_mode = 1;
+
+                                       use_dma = c->channel_program(
+                                                       channel,
+                                                       musb_ep->packet_sz,
+                                                       channel->desired_mode,
+                                                       request->dma
+                                                       + request->actual,
+                                                       transfer_size);
+                               }
+
+                               if (use_dma)
+                                       return;
+                       }
+#endif /* Mentor's DMA */
+
+                       fifo_count = request->length - request->actual;
+                       DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+                                       musb_ep->end_point.name,
+                                       len, fifo_count,
+                                       musb_ep->packet_sz);
+
+                       fifo_count = min(len, fifo_count);
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+                       if (tusb_dma_omap() && musb_ep->dma) {
+                               struct dma_controller *c = musb->dma_controller;
+                               struct dma_channel *channel = musb_ep->dma;
+                               u32 dma_addr = request->dma + request->actual;
+                               int ret;
+
+                               ret = c->channel_program(channel,
+                                               musb_ep->packet_sz,
+                                               channel->desired_mode,
+                                               dma_addr,
+                                               fifo_count);
+                               if (ret)
+                                       return;
+                       }
+#endif
+
+                       musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
+                                       (request->buf + request->actual));
+                       request->actual += fifo_count;
+
+                       /* REVISIT if we left anything in the fifo, flush
+                        * it and report -EOVERFLOW
+                        */
+
+                       /* ack the read! */
+                       csr |= MUSB_RXCSR_P_WZC_BITS;
+                       csr &= ~MUSB_RXCSR_RXPKTRDY;
+                       musb_writew(epio, MUSB_RXCSR, csr);
+               }
+       }
+
+       /* reach the end or short packet detected */
+       if (request->actual == request->length || len < musb_ep->packet_sz)
+               musb_g_giveback(musb_ep, request, 0);
+}
+
+/*
+ * Data ready for a request; called from IRQ
+ */
+void musb_g_rx(struct musb *musb, u8 epnum)
+{
+       u16                     csr;
+       struct usb_request      *request;
+       void __iomem            *mbase = musb->mregs;
+       struct musb_ep          *musb_ep = &musb->endpoints[epnum].ep_out;
+       void __iomem            *epio = musb->endpoints[epnum].regs;
+       struct dma_channel      *dma;
+
+       musb_ep_select(mbase, epnum);
+
+       request = next_request(musb_ep);
+
+       csr = musb_readw(epio, MUSB_RXCSR);
+       dma = is_dma_capable() ? musb_ep->dma : NULL;
+
+       DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
+                       csr, dma ? " (dma)" : "", request);
+
+       if (csr & MUSB_RXCSR_P_SENTSTALL) {
+               if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+                       dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+                       (void) musb->dma_controller->channel_abort(dma);
+                       request->actual += musb_ep->dma->actual_len;
+               }
+
+               csr |= MUSB_RXCSR_P_WZC_BITS;
+               csr &= ~MUSB_RXCSR_P_SENTSTALL;
+               musb_writew(epio, MUSB_RXCSR, csr);
+
+               if (request)
+                       musb_g_giveback(musb_ep, request, -EPIPE);
+               goto done;
+       }
+
+       if (csr & MUSB_RXCSR_P_OVERRUN) {
+               /* csr |= MUSB_RXCSR_P_WZC_BITS; */
+               csr &= ~MUSB_RXCSR_P_OVERRUN;
+               musb_writew(epio, MUSB_RXCSR, csr);
+
+               DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
+               if (request && request->status == -EINPROGRESS)
+                       request->status = -EOVERFLOW;
+       }
+       if (csr & MUSB_RXCSR_INCOMPRX) {
+               /* REVISIT not necessarily an error */
+               DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
+       }
+
+       if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+               /* "should not happen"; likely RXPKTRDY pending for DMA */
+               DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
+                       "%s busy, csr %04x\n",
+                       musb_ep->end_point.name, csr);
+               goto done;
+       }
+
+       if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
+               csr &= ~(MUSB_RXCSR_AUTOCLEAR
+                               | MUSB_RXCSR_DMAENAB
+                               | MUSB_RXCSR_DMAMODE);
+               musb_writew(epio, MUSB_RXCSR,
+                       MUSB_RXCSR_P_WZC_BITS | csr);
+
+               request->actual += musb_ep->dma->actual_len;
+
+               DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
+                       epnum, csr,
+                       musb_readw(epio, MUSB_RXCSR),
+                       musb_ep->dma->actual_len, request);
+
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
+               /* Autoclear doesn't clear RxPktRdy for short packets */
+               if ((dma->desired_mode == 0)
+                               || (dma->actual_len
+                                       & (musb_ep->packet_sz - 1))) {
+                       /* ack the read! */
+                       csr &= ~MUSB_RXCSR_RXPKTRDY;
+                       musb_writew(epio, MUSB_RXCSR, csr);
+               }
+
+               /* incomplete, and not short? wait for next IN packet */
+               if ((request->actual < request->length)
+                               && (musb_ep->dma->actual_len
+                                       == musb_ep->packet_sz))
+                       goto done;
+#endif
+               musb_g_giveback(musb_ep, request, 0);
+
+               request = next_request(musb_ep);
+               if (!request)
+                       goto done;
+
+               /* don't start more i/o till the stall clears */
+               musb_ep_select(mbase, epnum);
+               csr = musb_readw(epio, MUSB_RXCSR);
+               if (csr & MUSB_RXCSR_P_SENDSTALL)
+                       goto done;
+       }
+
+
+       /* analyze request if the ep is hot */
+       if (request)
+               rxstate(musb, to_musb_request(request));
+       else
+               DBG(3, "packet waiting for %s%s request\n",
+                               musb_ep->desc ? "" : "inactive ",
+                               musb_ep->end_point.name);
+
+done:
+       return;
+}
+
+/* ------------------------------------------------------------ */
+
+static int musb_gadget_enable(struct usb_ep *ep,
+                       const struct usb_endpoint_descriptor *desc)
+{
+       unsigned long           flags;
+       struct musb_ep          *musb_ep;
+       struct musb_hw_ep       *hw_ep;
+       void __iomem            *regs;
+       struct musb             *musb;
+       void __iomem    *mbase;
+       u8              epnum;
+       u16             csr;
+       unsigned        tmp;
+       int             status = -EINVAL;
+
+       if (!ep || !desc)
+               return -EINVAL;
+
+       musb_ep = to_musb_ep(ep);
+       hw_ep = musb_ep->hw_ep;
+       regs = hw_ep->regs;
+       musb = musb_ep->musb;
+       mbase = musb->mregs;
+       epnum = musb_ep->current_epnum;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       if (musb_ep->desc) {
+               status = -EBUSY;
+               goto fail;
+       }
+       musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+       /* check direction and (later) maxpacket size against endpoint */
+       if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum)
+               goto fail;
+
+       /* REVISIT this rules out high bandwidth periodic transfers */
+       tmp = le16_to_cpu(desc->wMaxPacketSize);
+       if (tmp & ~0x07ff)
+               goto fail;
+       musb_ep->packet_sz = tmp;
+
+       /* enable the interrupts for the endpoint, set the endpoint
+        * packet size (or fail), set the mode, clear the fifo
+        */
+       musb_ep_select(mbase, epnum);
+       if (desc->bEndpointAddress & USB_DIR_IN) {
+               u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
+
+               if (hw_ep->is_shared_fifo)
+                       musb_ep->is_in = 1;
+               if (!musb_ep->is_in)
+                       goto fail;
+               if (tmp > hw_ep->max_packet_sz_tx)
+                       goto fail;
+
+               int_txe |= (1 << epnum);
+               musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+               /* REVISIT if can_bulk_split(), use by updating "tmp";
+                * likewise high bandwidth periodic tx
+                */
+               musb_writew(regs, MUSB_TXMAXP, tmp);
+
+               csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
+               if (musb_readw(regs, MUSB_TXCSR)
+                               & MUSB_TXCSR_FIFONOTEMPTY)
+                       csr |= MUSB_TXCSR_FLUSHFIFO;
+               if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+                       csr |= MUSB_TXCSR_P_ISO;
+
+               /* set twice in case of double buffering */
+               musb_writew(regs, MUSB_TXCSR, csr);
+               /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+               musb_writew(regs, MUSB_TXCSR, csr);
+
+       } else {
+               u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
+
+               if (hw_ep->is_shared_fifo)
+                       musb_ep->is_in = 0;
+               if (musb_ep->is_in)
+                       goto fail;
+               if (tmp > hw_ep->max_packet_sz_rx)
+                       goto fail;
+
+               int_rxe |= (1 << epnum);
+               musb_writew(mbase, MUSB_INTRRXE, int_rxe);
+
+               /* REVISIT if can_bulk_combine() use by updating "tmp"
+                * likewise high bandwidth periodic rx
+                */
+               musb_writew(regs, MUSB_RXMAXP, tmp);
+
+               /* force shared fifo to OUT-only mode */
+               if (hw_ep->is_shared_fifo) {
+                       csr = musb_readw(regs, MUSB_TXCSR);
+                       csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
+                       musb_writew(regs, MUSB_TXCSR, csr);
+               }
+
+               csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
+               if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+                       csr |= MUSB_RXCSR_P_ISO;
+               else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
+                       csr |= MUSB_RXCSR_DISNYET;
+
+               /* set twice in case of double buffering */
+               musb_writew(regs, MUSB_RXCSR, csr);
+               musb_writew(regs, MUSB_RXCSR, csr);
+       }
+
+       /* NOTE:  all the I/O code _should_ work fine without DMA, in case
+        * for some reason you run out of channels here.
+        */
+       if (is_dma_capable() && musb->dma_controller) {
+               struct dma_controller   *c = musb->dma_controller;
+
+               musb_ep->dma = c->channel_alloc(c, hw_ep,
+                               (desc->bEndpointAddress & USB_DIR_IN));
+       } else
+               musb_ep->dma = NULL;
+
+       musb_ep->desc = desc;
+       musb_ep->busy = 0;
+       status = 0;
+
+       pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
+                       musb_driver_name, musb_ep->end_point.name,
+                       ({ char *s; switch (musb_ep->type) {
+                       case USB_ENDPOINT_XFER_BULK:    s = "bulk"; break;
+                       case USB_ENDPOINT_XFER_INT:     s = "int"; break;
+                       default:                        s = "iso"; break;
+                       }; s; }),
+                       musb_ep->is_in ? "IN" : "OUT",
+                       musb_ep->dma ? "dma, " : "",
+                       musb_ep->packet_sz);
+
+       schedule_work(&musb->irq_work);
+
+fail:
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return status;
+}
+
+/*
+ * Disable an endpoint flushing all requests queued.
+ */
+static int musb_gadget_disable(struct usb_ep *ep)
+{
+       unsigned long   flags;
+       struct musb     *musb;
+       u8              epnum;
+       struct musb_ep  *musb_ep;
+       void __iomem    *epio;
+       int             status = 0;
+
+       musb_ep = to_musb_ep(ep);
+       musb = musb_ep->musb;
+       epnum = musb_ep->current_epnum;
+       epio = musb->endpoints[epnum].regs;
+
+       spin_lock_irqsave(&musb->lock, flags);
+       musb_ep_select(musb->mregs, epnum);
+
+       /* zero the endpoint sizes */
+       if (musb_ep->is_in) {
+               u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
+               int_txe &= ~(1 << epnum);
+               musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
+               musb_writew(epio, MUSB_TXMAXP, 0);
+       } else {
+               u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
+               int_rxe &= ~(1 << epnum);
+               musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
+               musb_writew(epio, MUSB_RXMAXP, 0);
+       }
+
+       musb_ep->desc = NULL;
+
+       /* abort all pending DMA and requests */
+       nuke(musb_ep, -ESHUTDOWN);
+
+       schedule_work(&musb->irq_work);
+
+       spin_unlock_irqrestore(&(musb->lock), flags);
+
+       DBG(2, "%s\n", musb_ep->end_point.name);
+
+       return status;
+}
+
+/*
+ * Allocate a request for an endpoint.
+ * Reused by ep0 code.
+ */
+struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+       struct musb_ep          *musb_ep = to_musb_ep(ep);
+       struct musb_request     *request = NULL;
+
+       request = kzalloc(sizeof *request, gfp_flags);
+       if (request) {
+               INIT_LIST_HEAD(&request->request.list);
+               request->request.dma = DMA_ADDR_INVALID;
+               request->epnum = musb_ep->current_epnum;
+               request->ep = musb_ep;
+       }
+
+       return &request->request;
+}
+
+/*
+ * Free a request
+ * Reused by ep0 code.
+ */
+void musb_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+       kfree(to_musb_request(req));
+}
+
+static LIST_HEAD(buffers);
+
+struct free_record {
+       struct list_head        list;
+       struct device           *dev;
+       unsigned                bytes;
+       dma_addr_t              dma;
+};
+
+/*
+ * Context: controller locked, IRQs blocked.
+ */
+static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+{
+       DBG(3, "<== %s request %p len %u on hw_ep%d\n",
+               req->tx ? "TX/IN" : "RX/OUT",
+               &req->request, req->request.length, req->epnum);
+
+       musb_ep_select(musb->mregs, req->epnum);
+       if (req->tx)
+               txstate(musb, req);
+       else
+               rxstate(musb, req);
+}
+
+static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
+                       gfp_t gfp_flags)
+{
+       struct musb_ep          *musb_ep;
+       struct musb_request     *request;
+       struct musb             *musb;
+       int                     status = 0;
+       unsigned long           lockflags;
+
+       if (!ep || !req)
+               return -EINVAL;
+       if (!req->buf)
+               return -ENODATA;
+
+       musb_ep = to_musb_ep(ep);
+       musb = musb_ep->musb;
+
+       request = to_musb_request(req);
+       request->musb = musb;
+
+       if (request->ep != musb_ep)
+               return -EINVAL;
+
+       DBG(4, "<== to %s request=%p\n", ep->name, req);
+
+       /* request is mine now... */
+       request->request.actual = 0;
+       request->request.status = -EINPROGRESS;
+       request->epnum = musb_ep->current_epnum;
+       request->tx = musb_ep->is_in;
+
+       if (is_dma_capable() && musb_ep->dma) {
+               if (request->request.dma == DMA_ADDR_INVALID) {
+                       request->request.dma = dma_map_single(
+                                       musb->controller,
+                                       request->request.buf,
+                                       request->request.length,
+                                       request->tx
+                                               ? DMA_TO_DEVICE
+                                               : DMA_FROM_DEVICE);
+                       request->mapped = 1;
+               } else {
+                       dma_sync_single_for_device(musb->controller,
+                                       request->request.dma,
+                                       request->request.length,
+                                       request->tx
+                                               ? DMA_TO_DEVICE
+                                               : DMA_FROM_DEVICE);
+                       request->mapped = 0;
+               }
+       } else if (!req->buf) {
+               return -ENODATA;
+       } else
+               request->mapped = 0;
+
+       spin_lock_irqsave(&musb->lock, lockflags);
+
+       /* don't queue if the ep is down */
+       if (!musb_ep->desc) {
+               DBG(4, "req %p queued to %s while ep %s\n",
+                               req, ep->name, "disabled");
+               status = -ESHUTDOWN;
+               goto cleanup;
+       }
+
+       /* add request to the list */
+       list_add_tail(&(request->request.list), &(musb_ep->req_list));
+
+       /* it this is the head of the queue, start i/o ... */
+       if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
+               musb_ep_restart(musb, request);
+
+cleanup:
+       spin_unlock_irqrestore(&musb->lock, lockflags);
+       return status;
+}
+
+static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
+{
+       struct musb_ep          *musb_ep = to_musb_ep(ep);
+       struct usb_request      *r;
+       unsigned long           flags;
+       int                     status = 0;
+       struct musb             *musb = musb_ep->musb;
+
+       if (!ep || !request || to_musb_request(request)->ep != musb_ep)
+               return -EINVAL;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       list_for_each_entry(r, &musb_ep->req_list, list) {
+               if (r == request)
+                       break;
+       }
+       if (r != request) {
+               DBG(3, "request %p not queued to %s\n", request, ep->name);
+               status = -EINVAL;
+               goto done;
+       }
+
+       /* if the hardware doesn't have the request, easy ... */
+       if (musb_ep->req_list.next != &request->list || musb_ep->busy)
+               musb_g_giveback(musb_ep, request, -ECONNRESET);
+
+       /* ... else abort the dma transfer ... */
+       else if (is_dma_capable() && musb_ep->dma) {
+               struct dma_controller   *c = musb->dma_controller;
+
+               musb_ep_select(musb->mregs, musb_ep->current_epnum);
+               if (c->channel_abort)
+                       status = c->channel_abort(musb_ep->dma);
+               else
+                       status = -EBUSY;
+               if (status == 0)
+                       musb_g_giveback(musb_ep, request, -ECONNRESET);
+       } else {
+               /* NOTE: by sticking to easily tested hardware/driver states,
+                * we leave counting of in-flight packets imprecise.
+                */
+               musb_g_giveback(musb_ep, request, -ECONNRESET);
+       }
+
+done:
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return status;
+}
+
+/*
+ * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
+ * data but will queue requests.
+ *
+ * exported to ep0 code
+ */
+int musb_gadget_set_halt(struct usb_ep *ep, int value)
+{
+       struct musb_ep          *musb_ep = to_musb_ep(ep);
+       u8                      epnum = musb_ep->current_epnum;
+       struct musb             *musb = musb_ep->musb;
+       void __iomem            *epio = musb->endpoints[epnum].regs;
+       void __iomem            *mbase;
+       unsigned long           flags;
+       u16                     csr;
+       struct musb_request     *request = NULL;
+       int                     status = 0;
+
+       if (!ep)
+               return -EINVAL;
+       mbase = musb->mregs;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
+               status = -EINVAL;
+               goto done;
+       }
+
+       musb_ep_select(mbase, epnum);
+
+       /* cannot portably stall with non-empty FIFO */
+       request = to_musb_request(next_request(musb_ep));
+       if (value && musb_ep->is_in) {
+               csr = musb_readw(epio, MUSB_TXCSR);
+               if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+                       DBG(3, "%s fifo busy, cannot halt\n", ep->name);
+                       spin_unlock_irqrestore(&musb->lock, flags);
+                       return -EAGAIN;
+               }
+
+       }
+
+       /* set/clear the stall and toggle bits */
+       DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
+       if (musb_ep->is_in) {
+               csr = musb_readw(epio, MUSB_TXCSR);
+               if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+                       csr |= MUSB_TXCSR_FLUSHFIFO;
+               csr |= MUSB_TXCSR_P_WZC_BITS
+                       | MUSB_TXCSR_CLRDATATOG;
+               if (value)
+                       csr |= MUSB_TXCSR_P_SENDSTALL;
+               else
+                       csr &= ~(MUSB_TXCSR_P_SENDSTALL
+                               | MUSB_TXCSR_P_SENTSTALL);
+               csr &= ~MUSB_TXCSR_TXPKTRDY;
+               musb_writew(epio, MUSB_TXCSR, csr);
+       } else {
+               csr = musb_readw(epio, MUSB_RXCSR);
+               csr |= MUSB_RXCSR_P_WZC_BITS
+                       | MUSB_RXCSR_FLUSHFIFO
+                       | MUSB_RXCSR_CLRDATATOG;
+               if (value)
+                       csr |= MUSB_RXCSR_P_SENDSTALL;
+               else
+                       csr &= ~(MUSB_RXCSR_P_SENDSTALL
+                               | MUSB_RXCSR_P_SENTSTALL);
+               musb_writew(epio, MUSB_RXCSR, csr);
+       }
+
+done:
+
+       /* maybe start the first request in the queue */
+       if (!musb_ep->busy && !value && request) {
+               DBG(3, "restarting the request\n");
+               musb_ep_restart(musb, request);
+       }
+
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return status;
+}
+
+static int musb_gadget_fifo_status(struct usb_ep *ep)
+{
+       struct musb_ep          *musb_ep = to_musb_ep(ep);
+       void __iomem            *epio = musb_ep->hw_ep->regs;
+       int                     retval = -EINVAL;
+
+       if (musb_ep->desc && !musb_ep->is_in) {
+               struct musb             *musb = musb_ep->musb;
+               int                     epnum = musb_ep->current_epnum;
+               void __iomem            *mbase = musb->mregs;
+               unsigned long           flags;
+
+               spin_lock_irqsave(&musb->lock, flags);
+
+               musb_ep_select(mbase, epnum);
+               /* FIXME return zero unless RXPKTRDY is set */
+               retval = musb_readw(epio, MUSB_RXCOUNT);
+
+               spin_unlock_irqrestore(&musb->lock, flags);
+       }
+       return retval;
+}
+
+static void musb_gadget_fifo_flush(struct usb_ep *ep)
+{
+       struct musb_ep  *musb_ep = to_musb_ep(ep);
+       struct musb     *musb = musb_ep->musb;
+       u8              epnum = musb_ep->current_epnum;
+       void __iomem    *epio = musb->endpoints[epnum].regs;
+       void __iomem    *mbase;
+       unsigned long   flags;
+       u16             csr, int_txe;
+
+       mbase = musb->mregs;
+
+       spin_lock_irqsave(&musb->lock, flags);
+       musb_ep_select(mbase, (u8) epnum);
+
+       /* disable interrupts */
+       int_txe = musb_readw(mbase, MUSB_INTRTXE);
+       musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+       if (musb_ep->is_in) {
+               csr = musb_readw(epio, MUSB_TXCSR);
+               if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+                       csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+                       musb_writew(epio, MUSB_TXCSR, csr);
+                       /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+                       musb_writew(epio, MUSB_TXCSR, csr);
+               }
+       } else {
+               csr = musb_readw(epio, MUSB_RXCSR);
+               csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
+               musb_writew(epio, MUSB_RXCSR, csr);
+               musb_writew(epio, MUSB_RXCSR, csr);
+       }
+
+       /* re-enable interrupt */
+       musb_writew(mbase, MUSB_INTRTXE, int_txe);
+       spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static const struct usb_ep_ops musb_ep_ops = {
+       .enable         = musb_gadget_enable,
+       .disable        = musb_gadget_disable,
+       .alloc_request  = musb_alloc_request,
+       .free_request   = musb_free_request,
+       .queue          = musb_gadget_queue,
+       .dequeue        = musb_gadget_dequeue,
+       .set_halt       = musb_gadget_set_halt,
+       .fifo_status    = musb_gadget_fifo_status,
+       .fifo_flush     = musb_gadget_fifo_flush
+};
+
+/* ----------------------------------------------------------------------- */
+
+static int musb_gadget_get_frame(struct usb_gadget *gadget)
+{
+       struct musb     *musb = gadget_to_musb(gadget);
+
+       return (int)musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_gadget_wakeup(struct usb_gadget *gadget)
+{
+       struct musb     *musb = gadget_to_musb(gadget);
+       void __iomem    *mregs = musb->mregs;
+       unsigned long   flags;
+       int             status = -EINVAL;
+       u8              power, devctl;
+       int             retries;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       switch (musb->xceiv.state) {
+       case OTG_STATE_B_PERIPHERAL:
+               /* NOTE:  OTG state machine doesn't include B_SUSPENDED;
+                * that's part of the standard usb 1.1 state machine, and
+                * doesn't affect OTG transitions.
+                */
+               if (musb->may_wakeup && musb->is_suspended)
+                       break;
+               goto done;
+       case OTG_STATE_B_IDLE:
+               /* Start SRP ... OTG not required. */
+               devctl = musb_readb(mregs, MUSB_DEVCTL);
+               DBG(2, "Sending SRP: devctl: %02x\n", devctl);
+               devctl |= MUSB_DEVCTL_SESSION;
+               musb_writeb(mregs, MUSB_DEVCTL, devctl);
+               devctl = musb_readb(mregs, MUSB_DEVCTL);
+               retries = 100;
+               while (!(devctl & MUSB_DEVCTL_SESSION)) {
+                       devctl = musb_readb(mregs, MUSB_DEVCTL);
+                       if (retries-- < 1)
+                               break;
+               }
+               retries = 10000;
+               while (devctl & MUSB_DEVCTL_SESSION) {
+                       devctl = musb_readb(mregs, MUSB_DEVCTL);
+                       if (retries-- < 1)
+                               break;
+               }
+
+               /* Block idling for at least 1s */
+               musb_platform_try_idle(musb,
+                       jiffies + msecs_to_jiffies(1 * HZ));
+
+               status = 0;
+               goto done;
+       default:
+               DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
+               goto done;
+       }
+
+       status = 0;
+
+       power = musb_readb(mregs, MUSB_POWER);
+       power |= MUSB_POWER_RESUME;
+       musb_writeb(mregs, MUSB_POWER, power);
+       DBG(2, "issue wakeup\n");
+
+       /* FIXME do this next chunk in a timer callback, no udelay */
+       mdelay(2);
+
+       power = musb_readb(mregs, MUSB_POWER);
+       power &= ~MUSB_POWER_RESUME;
+       musb_writeb(mregs, MUSB_POWER, power);
+done:
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return status;
+}
+
+static int
+musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
+{
+       struct musb     *musb = gadget_to_musb(gadget);
+
+       musb->is_self_powered = !!is_selfpowered;
+       return 0;
+}
+
+static void musb_pullup(struct musb *musb, int is_on)
+{
+       u8 power;
+
+       power = musb_readb(musb->mregs, MUSB_POWER);
+       if (is_on)
+               power |= MUSB_POWER_SOFTCONN;
+       else
+               power &= ~MUSB_POWER_SOFTCONN;
+
+       /* FIXME if on, HdrcStart; if off, HdrcStop */
+
+       DBG(3, "gadget %s D+ pullup %s\n",
+               musb->gadget_driver->function, is_on ? "on" : "off");
+       musb_writeb(musb->mregs, MUSB_POWER, power);
+}
+
+#if 0
+static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+       DBG(2, "<= %s =>\n", __func__);
+
+       /*
+        * FIXME iff driver's softconnect flag is set (as it is during probe,
+        * though that can clear it), just musb_pullup().
+        */
+
+       return -EINVAL;
+}
+#endif
+
+static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+       struct musb     *musb = gadget_to_musb(gadget);
+
+       if (!musb->xceiv.set_power)
+               return -EOPNOTSUPP;
+       return otg_set_power(&musb->xceiv, mA);
+}
+
+static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+       struct musb     *musb = gadget_to_musb(gadget);
+       unsigned long   flags;
+
+       is_on = !!is_on;
+
+       /* NOTE: this assumes we are sensing vbus; we'd rather
+        * not pullup unless the B-session is active.
+        */
+       spin_lock_irqsave(&musb->lock, flags);
+       if (is_on != musb->softconnect) {
+               musb->softconnect = is_on;
+               musb_pullup(musb, is_on);
+       }
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return 0;
+}
+
+static const struct usb_gadget_ops musb_gadget_operations = {
+       .get_frame              = musb_gadget_get_frame,
+       .wakeup                 = musb_gadget_wakeup,
+       .set_selfpowered        = musb_gadget_set_self_powered,
+       /* .vbus_session                = musb_gadget_vbus_session, */
+       .vbus_draw              = musb_gadget_vbus_draw,
+       .pullup                 = musb_gadget_pullup,
+};
+
+/* ----------------------------------------------------------------------- */
+
+/* Registration */
+
+/* Only this registration code "knows" the rule (from USB standards)
+ * about there being only one external upstream port.  It assumes
+ * all peripheral ports are external...
+ */
+static struct musb *the_gadget;
+
+static void musb_gadget_release(struct device *dev)
+{
+       /* kref_put(WHAT) */
+       dev_dbg(dev, "%s\n", __func__);
+}
+
+
+static void __init
+init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
+{
+       struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
+
+       memset(ep, 0, sizeof *ep);
+
+       ep->current_epnum = epnum;
+       ep->musb = musb;
+       ep->hw_ep = hw_ep;
+       ep->is_in = is_in;
+
+       INIT_LIST_HEAD(&ep->req_list);
+
+       sprintf(ep->name, "ep%d%s", epnum,
+                       (!epnum || hw_ep->is_shared_fifo) ? "" : (
+                               is_in ? "in" : "out"));
+       ep->end_point.name = ep->name;
+       INIT_LIST_HEAD(&ep->end_point.ep_list);
+       if (!epnum) {
+               ep->end_point.maxpacket = 64;
+               ep->end_point.ops = &musb_g_ep0_ops;
+               musb->g.ep0 = &ep->end_point;
+       } else {
+               if (is_in)
+                       ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
+               else
+                       ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
+               ep->end_point.ops = &musb_ep_ops;
+               list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
+       }
+}
+
+/*
+ * Initialize the endpoints exposed to peripheral drivers, with backlinks
+ * to the rest of the driver state.
+ */
+static inline void __init musb_g_init_endpoints(struct musb *musb)
+{
+       u8                      epnum;
+       struct musb_hw_ep       *hw_ep;
+       unsigned                count = 0;
+
+       /* intialize endpoint list just once */
+       INIT_LIST_HEAD(&(musb->g.ep_list));
+
+       for (epnum = 0, hw_ep = musb->endpoints;
+                       epnum < musb->nr_endpoints;
+                       epnum++, hw_ep++) {
+               if (hw_ep->is_shared_fifo /* || !epnum */) {
+                       init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
+                       count++;
+               } else {
+                       if (hw_ep->max_packet_sz_tx) {
+                               init_peripheral_ep(musb, &hw_ep->ep_in,
+                                                       epnum, 1);
+                               count++;
+                       }
+                       if (hw_ep->max_packet_sz_rx) {
+                               init_peripheral_ep(musb, &hw_ep->ep_out,
+                                                       epnum, 0);
+                               count++;
+                       }
+               }
+       }
+}
+
+/* called once during driver setup to initialize and link into
+ * the driver model; memory is zeroed.
+ */
+int __init musb_gadget_setup(struct musb *musb)
+{
+       int status;
+
+       /* REVISIT minor race:  if (erroneously) setting up two
+        * musb peripherals at the same time, only the bus lock
+        * is probably held.
+        */
+       if (the_gadget)
+               return -EBUSY;
+       the_gadget = musb;
+
+       musb->g.ops = &musb_gadget_operations;
+       musb->g.is_dualspeed = 1;
+       musb->g.speed = USB_SPEED_UNKNOWN;
+
+       /* this "gadget" abstracts/virtualizes the controller */
+       strcpy(musb->g.dev.bus_id, "gadget");
+       musb->g.dev.parent = musb->controller;
+       musb->g.dev.dma_mask = musb->controller->dma_mask;
+       musb->g.dev.release = musb_gadget_release;
+       musb->g.name = musb_driver_name;
+
+       if (is_otg_enabled(musb))
+               musb->g.is_otg = 1;
+
+       musb_g_init_endpoints(musb);
+
+       musb->is_active = 0;
+       musb_platform_try_idle(musb, 0);
+
+       status = device_register(&musb->g.dev);
+       if (status != 0)
+               the_gadget = NULL;
+       return status;
+}
+
+void musb_gadget_cleanup(struct musb *musb)
+{
+       if (musb != the_gadget)
+               return;
+
+       device_unregister(&musb->g.dev);
+       the_gadget = NULL;
+}
+
+/*
+ * Register the gadget driver. Used by gadget drivers when
+ * registering themselves with the controller.
+ *
+ * -EINVAL something went wrong (not driver)
+ * -EBUSY another gadget is already using the controller
+ * -ENOMEM no memeory to perform the operation
+ *
+ * @param driver the gadget driver
+ * @return <0 if error, 0 if everything is fine
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+       int retval;
+       unsigned long flags;
+       struct musb *musb = the_gadget;
+
+       if (!driver
+                       || driver->speed != USB_SPEED_HIGH
+                       || !driver->bind
+                       || !driver->setup)
+               return -EINVAL;
+
+       /* driver must be initialized to support peripheral mode */
+       if (!musb || !(musb->board_mode == MUSB_OTG
+                               || musb->board_mode != MUSB_OTG)) {
+               DBG(1, "%s, no dev??\n", __func__);
+               return -ENODEV;
+       }
+
+       DBG(3, "registering driver %s\n", driver->function);
+       spin_lock_irqsave(&musb->lock, flags);
+
+       if (musb->gadget_driver) {
+               DBG(1, "%s is already bound to %s\n",
+                               musb_driver_name,
+                               musb->gadget_driver->driver.name);
+               retval = -EBUSY;
+       } else {
+               musb->gadget_driver = driver;
+               musb->g.dev.driver = &driver->driver;
+               driver->driver.bus = NULL;
+               musb->softconnect = 1;
+               retval = 0;
+       }
+
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       if (retval == 0)
+               retval = driver->bind(&musb->g);
+       if (retval != 0) {
+               DBG(3, "bind to driver %s failed --> %d\n",
+                       driver->driver.name, retval);
+               musb->gadget_driver = NULL;
+               musb->g.dev.driver = NULL;
+       }
+
+       /* start peripheral and/or OTG engines */
+       if (retval == 0) {
+               spin_lock_irqsave(&musb->lock, flags);
+
+               /* REVISIT always use otg_set_peripheral(), handling
+                * issues including the root hub one below ...
+                */
+               musb->xceiv.gadget = &musb->g;
+               musb->xceiv.state = OTG_STATE_B_IDLE;
+               musb->is_active = 1;
+
+               /* FIXME this ignores the softconnect flag.  Drivers are
+                * allowed hold the peripheral inactive until for example
+                * userspace hooks up printer hardware or DSP codecs, so
+                * hosts only see fully functional devices.
+                */
+
+               if (!is_otg_enabled(musb))
+                       musb_start(musb);
+
+               spin_unlock_irqrestore(&musb->lock, flags);
+
+               if (is_otg_enabled(musb)) {
+                       DBG(3, "OTG startup...\n");
+
+                       /* REVISIT:  funcall to other code, which also
+                        * handles power budgeting ... this way also
+                        * ensures HdrcStart is indirectly called.
+                        */
+                       retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+                       if (retval < 0) {
+                               DBG(1, "add_hcd failed, %d\n", retval);
+                               spin_lock_irqsave(&musb->lock, flags);
+                               musb->xceiv.gadget = NULL;
+                               musb->xceiv.state = OTG_STATE_UNDEFINED;
+                               musb->gadget_driver = NULL;
+                               musb->g.dev.driver = NULL;
+                               spin_unlock_irqrestore(&musb->lock, flags);
+                       }
+               }
+       }
+
+       return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
+{
+       int                     i;
+       struct musb_hw_ep       *hw_ep;
+
+       /* don't disconnect if it's not connected */
+       if (musb->g.speed == USB_SPEED_UNKNOWN)
+               driver = NULL;
+       else
+               musb->g.speed = USB_SPEED_UNKNOWN;
+
+       /* deactivate the hardware */
+       if (musb->softconnect) {
+               musb->softconnect = 0;
+               musb_pullup(musb, 0);
+       }
+       musb_stop(musb);
+
+       /* killing any outstanding requests will quiesce the driver;
+        * then report disconnect
+        */
+       if (driver) {
+               for (i = 0, hw_ep = musb->endpoints;
+                               i < musb->nr_endpoints;
+                               i++, hw_ep++) {
+                       musb_ep_select(musb->mregs, i);
+                       if (hw_ep->is_shared_fifo /* || !epnum */) {
+                               nuke(&hw_ep->ep_in, -ESHUTDOWN);
+                       } else {
+                               if (hw_ep->max_packet_sz_tx)
+                                       nuke(&hw_ep->ep_in, -ESHUTDOWN);
+                               if (hw_ep->max_packet_sz_rx)
+                                       nuke(&hw_ep->ep_out, -ESHUTDOWN);
+                       }
+               }
+
+               spin_unlock(&musb->lock);
+               driver->disconnect(&musb->g);
+               spin_lock(&musb->lock);
+       }
+}
+
+/*
+ * Unregister the gadget driver. Used by gadget drivers when
+ * unregistering themselves from the controller.
+ *
+ * @param driver the gadget driver to unregister
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+       unsigned long   flags;
+       int             retval = 0;
+       struct musb     *musb = the_gadget;
+
+       if (!driver || !driver->unbind || !musb)
+               return -EINVAL;
+
+       /* REVISIT always use otg_set_peripheral() here too;
+        * this needs to shut down the OTG engine.
+        */
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+#ifdef CONFIG_USB_MUSB_OTG
+       musb_hnp_stop(musb);
+#endif
+
+       if (musb->gadget_driver == driver) {
+
+               (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+               musb->xceiv.state = OTG_STATE_UNDEFINED;
+               stop_activity(musb, driver);
+
+               DBG(3, "unregistering driver %s\n", driver->function);
+               spin_unlock_irqrestore(&musb->lock, flags);
+               driver->unbind(&musb->g);
+               spin_lock_irqsave(&musb->lock, flags);
+
+               musb->gadget_driver = NULL;
+               musb->g.dev.driver = NULL;
+
+               musb->is_active = 0;
+               musb_platform_try_idle(musb, 0);
+       } else
+               retval = -EINVAL;
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       if (is_otg_enabled(musb) && retval == 0) {
+               usb_remove_hcd(musb_to_hcd(musb));
+               /* FIXME we need to be able to register another
+                * gadget driver here and have everything work;
+                * that currently misbehaves.
+                */
+       }
+
+       return retval;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/* ----------------------------------------------------------------------- */
+
+/* lifecycle operations called through plat_uds.c */
+
+void musb_g_resume(struct musb *musb)
+{
+       musb->is_suspended = 0;
+       switch (musb->xceiv.state) {
+       case OTG_STATE_B_IDLE:
+               break;
+       case OTG_STATE_B_WAIT_ACON:
+       case OTG_STATE_B_PERIPHERAL:
+               musb->is_active = 1;
+               if (musb->gadget_driver && musb->gadget_driver->resume) {
+                       spin_unlock(&musb->lock);
+                       musb->gadget_driver->resume(&musb->g);
+                       spin_lock(&musb->lock);
+               }
+               break;
+       default:
+               WARNING("unhandled RESUME transition (%s)\n",
+                               otg_state_string(musb));
+       }
+}
+
+/* called when SOF packets stop for 3+ msec */
+void musb_g_suspend(struct musb *musb)
+{
+       u8      devctl;
+
+       devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+       DBG(3, "devctl %02x\n", devctl);
+
+       switch (musb->xceiv.state) {
+       case OTG_STATE_B_IDLE:
+               if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+                       musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+               break;
+       case OTG_STATE_B_PERIPHERAL:
+               musb->is_suspended = 1;
+               if (musb->gadget_driver && musb->gadget_driver->suspend) {
+                       spin_unlock(&musb->lock);
+                       musb->gadget_driver->suspend(&musb->g);
+                       spin_lock(&musb->lock);
+               }
+               break;
+       default:
+               /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
+                * A_PERIPHERAL may need care too
+                */
+               WARNING("unhandled SUSPEND transition (%s)\n",
+                               otg_state_string(musb));
+       }
+}
+
+/* Called during SRP */
+void musb_g_wakeup(struct musb *musb)
+{
+       musb_gadget_wakeup(&musb->g);
+}
+
+/* called when VBUS drops below session threshold, and in other cases */
+void musb_g_disconnect(struct musb *musb)
+{
+       void __iomem    *mregs = musb->mregs;
+       u8      devctl = musb_readb(mregs, MUSB_DEVCTL);
+
+       DBG(3, "devctl %02x\n", devctl);
+
+       /* clear HR */
+       musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
+
+       /* don't draw vbus until new b-default session */
+       (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+       musb->g.speed = USB_SPEED_UNKNOWN;
+       if (musb->gadget_driver && musb->gadget_driver->disconnect) {
+               spin_unlock(&musb->lock);
+               musb->gadget_driver->disconnect(&musb->g);
+               spin_lock(&musb->lock);
+       }
+
+       switch (musb->xceiv.state) {
+       default:
+#ifdef CONFIG_USB_MUSB_OTG
+               DBG(2, "Unhandled disconnect %s, setting a_idle\n",
+                       otg_state_string(musb));
+               musb->xceiv.state = OTG_STATE_A_IDLE;
+               break;
+       case OTG_STATE_A_PERIPHERAL:
+               musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+               break;
+       case OTG_STATE_B_WAIT_ACON:
+       case OTG_STATE_B_HOST:
+#endif
+       case OTG_STATE_B_PERIPHERAL:
+       case OTG_STATE_B_IDLE:
+               musb->xceiv.state = OTG_STATE_B_IDLE;
+               break;
+       case OTG_STATE_B_SRP_INIT:
+               break;
+       }
+
+       musb->is_active = 0;
+}
+
+void musb_g_reset(struct musb *musb)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+       void __iomem    *mbase = musb->mregs;
+       u8              devctl = musb_readb(mbase, MUSB_DEVCTL);
+       u8              power;
+
+       DBG(3, "<== %s addr=%x driver '%s'\n",
+                       (devctl & MUSB_DEVCTL_BDEVICE)
+                               ? "B-Device" : "A-Device",
+                       musb_readb(mbase, MUSB_FADDR),
+                       musb->gadget_driver
+                               ? musb->gadget_driver->driver.name
+                               : NULL
+                       );
+
+       /* report disconnect, if we didn't already (flushing EP state) */
+       if (musb->g.speed != USB_SPEED_UNKNOWN)
+               musb_g_disconnect(musb);
+
+       /* clear HR */
+       else if (devctl & MUSB_DEVCTL_HR)
+               musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+
+
+       /* what speed did we negotiate? */
+       power = musb_readb(mbase, MUSB_POWER);
+       musb->g.speed = (power & MUSB_POWER_HSMODE)
+                       ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+       /* start in USB_STATE_DEFAULT */
+       musb->is_active = 1;
+       musb->is_suspended = 0;
+       MUSB_DEV_MODE(musb);
+       musb->address = 0;
+       musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+
+       musb->may_wakeup = 0;
+       musb->g.b_hnp_enable = 0;
+       musb->g.a_alt_hnp_support = 0;
+       musb->g.a_hnp_support = 0;
+
+       /* Normal reset, as B-Device;
+        * or else after HNP, as A-Device
+        */
+       if (devctl & MUSB_DEVCTL_BDEVICE) {
+               musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+               musb->g.is_a_peripheral = 0;
+       } else if (is_otg_enabled(musb)) {
+               musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
+               musb->g.is_a_peripheral = 1;
+       } else
+               WARN_ON(1);
+
+       /* start with default limits on VBUS power draw */
+       (void) musb_gadget_vbus_draw(&musb->g,
+                       is_otg_enabled(musb) ? 8 : 100);
+}
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
new file mode 100644 (file)
index 0000000..59502da
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * MUSB OTG driver peripheral defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_GADGET_H
+#define __MUSB_GADGET_H
+
+struct musb_request {
+       struct usb_request      request;
+       struct musb_ep          *ep;
+       struct musb             *musb;
+       u8 tx;                  /* endpoint direction */
+       u8 epnum;
+       u8 mapped;
+};
+
+static inline struct musb_request *to_musb_request(struct usb_request *req)
+{
+       return req ? container_of(req, struct musb_request, request) : NULL;
+}
+
+extern struct usb_request *
+musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
+
+
+/*
+ * struct musb_ep - peripheral side view of endpoint rx or tx side
+ */
+struct musb_ep {
+       /* stuff towards the head is basically write-once. */
+       struct usb_ep                   end_point;
+       char                            name[12];
+       struct musb_hw_ep               *hw_ep;
+       struct musb                     *musb;
+       u8                              current_epnum;
+
+       /* ... when enabled/disabled ... */
+       u8                              type;
+       u8                              is_in;
+       u16                             packet_sz;
+       const struct usb_endpoint_descriptor    *desc;
+       struct dma_channel              *dma;
+
+       /* later things are modified based on usage */
+       struct list_head                req_list;
+
+       /* true if lock must be dropped but req_list may not be advanced */
+       u8                              busy;
+};
+
+static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
+{
+       return ep ? container_of(ep, struct musb_ep, end_point) : NULL;
+}
+
+static inline struct usb_request *next_request(struct musb_ep *ep)
+{
+       struct list_head        *queue = &ep->req_list;
+
+       if (list_empty(queue))
+               return NULL;
+       return container_of(queue->next, struct usb_request, list);
+}
+
+extern void musb_g_tx(struct musb *musb, u8 epnum);
+extern void musb_g_rx(struct musb *musb, u8 epnum);
+
+extern const struct usb_ep_ops musb_g_ep0_ops;
+
+extern int musb_gadget_setup(struct musb *);
+extern void musb_gadget_cleanup(struct musb *);
+
+extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
+
+extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
+
+#endif         /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
new file mode 100644 (file)
index 0000000..48d7d3c
--- /dev/null
@@ -0,0 +1,981 @@
+/*
+ * MUSB OTG peripheral driver ep0 handling
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include "musb_core.h"
+
+/* ep0 is always musb->endpoints[0].ep_in */
+#define        next_ep0_request(musb)  next_in_request(&(musb)->endpoints[0])
+
+/*
+ * locking note:  we use only the controller lock, for simpler correctness.
+ * It's always held with IRQs blocked.
+ *
+ * It protects the ep0 request queue as well as ep0_state, not just the
+ * controller and indexed registers.  And that lock stays held unless it
+ * needs to be dropped to allow reentering this driver ... like upcalls to
+ * the gadget driver, or adjusting endpoint halt status.
+ */
+
+static char *decode_ep0stage(u8 stage)
+{
+       switch (stage) {
+       case MUSB_EP0_STAGE_SETUP:      return "idle";
+       case MUSB_EP0_STAGE_TX:         return "in";
+       case MUSB_EP0_STAGE_RX:         return "out";
+       case MUSB_EP0_STAGE_ACKWAIT:    return "wait";
+       case MUSB_EP0_STAGE_STATUSIN:   return "in/status";
+       case MUSB_EP0_STAGE_STATUSOUT:  return "out/status";
+       default:                        return "?";
+       }
+}
+
+/* handle a standard GET_STATUS request
+ * Context:  caller holds controller lock
+ */
+static int service_tx_status_request(
+       struct musb *musb,
+       const struct usb_ctrlrequest *ctrlrequest)
+{
+       void __iomem    *mbase = musb->mregs;
+       int handled = 1;
+       u8 result[2], epnum = 0;
+       const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+       result[1] = 0;
+
+       switch (recip) {
+       case USB_RECIP_DEVICE:
+               result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
+               result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+#ifdef CONFIG_USB_MUSB_OTG
+               if (musb->g.is_otg) {
+                       result[0] |= musb->g.b_hnp_enable
+                               << USB_DEVICE_B_HNP_ENABLE;
+                       result[0] |= musb->g.a_alt_hnp_support
+                               << USB_DEVICE_A_ALT_HNP_SUPPORT;
+                       result[0] |= musb->g.a_hnp_support
+                               << USB_DEVICE_A_HNP_SUPPORT;
+               }
+#endif
+               break;
+
+       case USB_RECIP_INTERFACE:
+               result[0] = 0;
+               break;
+
+       case USB_RECIP_ENDPOINT: {
+               int             is_in;
+               struct musb_ep  *ep;
+               u16             tmp;
+               void __iomem    *regs;
+
+               epnum = (u8) ctrlrequest->wIndex;
+               if (!epnum) {
+                       result[0] = 0;
+                       break;
+               }
+
+               is_in = epnum & USB_DIR_IN;
+               if (is_in) {
+                       epnum &= 0x0f;
+                       ep = &musb->endpoints[epnum].ep_in;
+               } else {
+                       ep = &musb->endpoints[epnum].ep_out;
+               }
+               regs = musb->endpoints[epnum].regs;
+
+               if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
+                       handled = -EINVAL;
+                       break;
+               }
+
+               musb_ep_select(mbase, epnum);
+               if (is_in)
+                       tmp = musb_readw(regs, MUSB_TXCSR)
+                                               & MUSB_TXCSR_P_SENDSTALL;
+               else
+                       tmp = musb_readw(regs, MUSB_RXCSR)
+                                               & MUSB_RXCSR_P_SENDSTALL;
+               musb_ep_select(mbase, 0);
+
+               result[0] = tmp ? 1 : 0;
+               } break;
+
+       default:
+               /* class, vendor, etc ... delegate */
+               handled = 0;
+               break;
+       }
+
+       /* fill up the fifo; caller updates csr0 */
+       if (handled > 0) {
+               u16     len = le16_to_cpu(ctrlrequest->wLength);
+
+               if (len > 2)
+                       len = 2;
+               musb_write_fifo(&musb->endpoints[0], len, result);
+       }
+
+       return handled;
+}
+
+/*
+ * handle a control-IN request, the end0 buffer contains the current request
+ * that is supposed to be a standard control request. Assumes the fifo to
+ * be at least 2 bytes long.
+ *
+ * @return 0 if the request was NOT HANDLED,
+ * < 0 when error
+ * > 0 when the request is processed
+ *
+ * Context:  caller holds controller lock
+ */
+static int
+service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+{
+       int handled = 0;        /* not handled */
+
+       if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+                       == USB_TYPE_STANDARD) {
+               switch (ctrlrequest->bRequest) {
+               case USB_REQ_GET_STATUS:
+                       handled = service_tx_status_request(musb,
+                                       ctrlrequest);
+                       break;
+
+               /* case USB_REQ_SYNC_FRAME: */
+
+               default:
+                       break;
+               }
+       }
+       return handled;
+}
+
+/*
+ * Context:  caller holds controller lock
+ */
+static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
+{
+       musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
+       musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+}
+
+/*
+ * Tries to start B-device HNP negotiation if enabled via sysfs
+ */
+static inline void musb_try_b_hnp_enable(struct musb *musb)
+{
+       void __iomem    *mbase = musb->mregs;
+       u8              devctl;
+
+       DBG(1, "HNP: Setting HR\n");
+       devctl = musb_readb(mbase, MUSB_DEVCTL);
+       musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
+}
+
+/*
+ * Handle all control requests with no DATA stage, including standard
+ * requests such as:
+ * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
+ *     always delegated to the gadget driver
+ * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
+ *     always handled here, except for class/vendor/... features
+ *
+ * Context:  caller holds controller lock
+ */
+static int
+service_zero_data_request(struct musb *musb,
+               struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+       int handled = -EINVAL;
+       void __iomem *mbase = musb->mregs;
+       const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+       /* the gadget driver handles everything except what we MUST handle */
+       if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+                       == USB_TYPE_STANDARD) {
+               switch (ctrlrequest->bRequest) {
+               case USB_REQ_SET_ADDRESS:
+                       /* change it after the status stage */
+                       musb->set_address = true;
+                       musb->address = (u8) (ctrlrequest->wValue & 0x7f);
+                       handled = 1;
+                       break;
+
+               case USB_REQ_CLEAR_FEATURE:
+                       switch (recip) {
+                       case USB_RECIP_DEVICE:
+                               if (ctrlrequest->wValue
+                                               != USB_DEVICE_REMOTE_WAKEUP)
+                                       break;
+                               musb->may_wakeup = 0;
+                               handled = 1;
+                               break;
+                       case USB_RECIP_INTERFACE:
+                               break;
+                       case USB_RECIP_ENDPOINT:{
+                               const u8 num = ctrlrequest->wIndex & 0x0f;
+                               struct musb_ep *musb_ep;
+
+                               if (num == 0
+                                               || num >= MUSB_C_NUM_EPS
+                                               || ctrlrequest->wValue
+                                                       != USB_ENDPOINT_HALT)
+                                       break;
+
+                               if (ctrlrequest->wIndex & USB_DIR_IN)
+                                       musb_ep = &musb->endpoints[num].ep_in;
+                               else
+                                       musb_ep = &musb->endpoints[num].ep_out;
+                               if (!musb_ep->desc)
+                                       break;
+
+                               /* REVISIT do it directly, no locking games */
+                               spin_unlock(&musb->lock);
+                               musb_gadget_set_halt(&musb_ep->end_point, 0);
+                               spin_lock(&musb->lock);
+
+                               /* select ep0 again */
+                               musb_ep_select(mbase, 0);
+                               handled = 1;
+                               } break;
+                       default:
+                               /* class, vendor, etc ... delegate */
+                               handled = 0;
+                               break;
+                       }
+                       break;
+
+               case USB_REQ_SET_FEATURE:
+                       switch (recip) {
+                       case USB_RECIP_DEVICE:
+                               handled = 1;
+                               switch (ctrlrequest->wValue) {
+                               case USB_DEVICE_REMOTE_WAKEUP:
+                                       musb->may_wakeup = 1;
+                                       break;
+                               case USB_DEVICE_TEST_MODE:
+                                       if (musb->g.speed != USB_SPEED_HIGH)
+                                               goto stall;
+                                       if (ctrlrequest->wIndex & 0xff)
+                                               goto stall;
+
+                                       switch (ctrlrequest->wIndex >> 8) {
+                                       case 1:
+                                               pr_debug("TEST_J\n");
+                                               /* TEST_J */
+                                               musb->test_mode_nr =
+                                                       MUSB_TEST_J;
+                                               break;
+                                       case 2:
+                                               /* TEST_K */
+                                               pr_debug("TEST_K\n");
+                                               musb->test_mode_nr =
+                                                       MUSB_TEST_K;
+                                               break;
+                                       case 3:
+                                               /* TEST_SE0_NAK */
+                                               pr_debug("TEST_SE0_NAK\n");
+                                               musb->test_mode_nr =
+                                                       MUSB_TEST_SE0_NAK;
+                                               break;
+                                       case 4:
+                                               /* TEST_PACKET */
+                                               pr_debug("TEST_PACKET\n");
+                                               musb->test_mode_nr =
+                                                       MUSB_TEST_PACKET;
+                                               break;
+                                       default:
+                                               goto stall;
+                                       }
+
+                                       /* enter test mode after irq */
+                                       if (handled > 0)
+                                               musb->test_mode = true;
+                                       break;
+#ifdef CONFIG_USB_MUSB_OTG
+                               case USB_DEVICE_B_HNP_ENABLE:
+                                       if (!musb->g.is_otg)
+                                               goto stall;
+                                       musb->g.b_hnp_enable = 1;
+                                       musb_try_b_hnp_enable(musb);
+                                       break;
+                               case USB_DEVICE_A_HNP_SUPPORT:
+                                       if (!musb->g.is_otg)
+                                               goto stall;
+                                       musb->g.a_hnp_support = 1;
+                                       break;
+                               case USB_DEVICE_A_ALT_HNP_SUPPORT:
+                                       if (!musb->g.is_otg)
+                                               goto stall;
+                                       musb->g.a_alt_hnp_support = 1;
+                                       break;
+#endif
+stall:
+                               default:
+                                       handled = -EINVAL;
+                                       break;
+                               }
+                               break;
+
+                       case USB_RECIP_INTERFACE:
+                               break;
+
+                       case USB_RECIP_ENDPOINT:{
+                               const u8                epnum =
+                                       ctrlrequest->wIndex & 0x0f;
+                               struct musb_ep          *musb_ep;
+                               struct musb_hw_ep       *ep;
+                               void __iomem            *regs;
+                               int                     is_in;
+                               u16                     csr;
+
+                               if (epnum == 0
+                                               || epnum >= MUSB_C_NUM_EPS
+                                               || ctrlrequest->wValue
+                                                       != USB_ENDPOINT_HALT)
+                                       break;
+
+                               ep = musb->endpoints + epnum;
+                               regs = ep->regs;
+                               is_in = ctrlrequest->wIndex & USB_DIR_IN;
+                               if (is_in)
+                                       musb_ep = &ep->ep_in;
+                               else
+                                       musb_ep = &ep->ep_out;
+                               if (!musb_ep->desc)
+                                       break;
+
+                               musb_ep_select(mbase, epnum);
+                               if (is_in) {
+                                       csr = musb_readw(regs,
+                                                       MUSB_TXCSR);
+                                       if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+                                               csr |= MUSB_TXCSR_FLUSHFIFO;
+                                       csr |= MUSB_TXCSR_P_SENDSTALL
+                                               | MUSB_TXCSR_CLRDATATOG
+                                               | MUSB_TXCSR_P_WZC_BITS;
+                                       musb_writew(regs, MUSB_TXCSR,
+                                                       csr);
+                               } else {
+                                       csr = musb_readw(regs,
+                                                       MUSB_RXCSR);
+                                       csr |= MUSB_RXCSR_P_SENDSTALL
+                                               | MUSB_RXCSR_FLUSHFIFO
+                                               | MUSB_RXCSR_CLRDATATOG
+                                               | MUSB_TXCSR_P_WZC_BITS;
+                                       musb_writew(regs, MUSB_RXCSR,
+                                                       csr);
+                               }
+
+                               /* select ep0 again */
+                               musb_ep_select(mbase, 0);
+                               handled = 1;
+                               } break;
+
+                       default:
+                               /* class, vendor, etc ... delegate */
+                               handled = 0;
+                               break;
+                       }
+                       break;
+               default:
+                       /* delegate SET_CONFIGURATION, etc */
+                       handled = 0;
+               }
+       } else
+               handled = 0;
+       return handled;
+}
+
+/* we have an ep0out data packet
+ * Context:  caller holds controller lock
+ */
+static void ep0_rxstate(struct musb *musb)
+{
+       void __iomem            *regs = musb->control_ep->regs;
+       struct usb_request      *req;
+       u16                     tmp;
+
+       req = next_ep0_request(musb);
+
+       /* read packet and ack; or stall because of gadget driver bug:
+        * should have provided the rx buffer before setup() returned.
+        */
+       if (req) {
+               void            *buf = req->buf + req->actual;
+               unsigned        len = req->length - req->actual;
+
+               /* read the buffer */
+               tmp = musb_readb(regs, MUSB_COUNT0);
+               if (tmp > len) {
+                       req->status = -EOVERFLOW;
+                       tmp = len;
+               }
+               musb_read_fifo(&musb->endpoints[0], tmp, buf);
+               req->actual += tmp;
+               tmp = MUSB_CSR0_P_SVDRXPKTRDY;
+               if (tmp < 64 || req->actual == req->length) {
+                       musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+                       tmp |= MUSB_CSR0_P_DATAEND;
+               } else
+                       req = NULL;
+       } else
+               tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
+
+
+       /* Completion handler may choose to stall, e.g. because the
+        * message just received holds invalid data.
+        */
+       if (req) {
+               musb->ackpend = tmp;
+               musb_g_ep0_giveback(musb, req);
+               if (!musb->ackpend)
+                       return;
+               musb->ackpend = 0;
+       }
+       musb_writew(regs, MUSB_CSR0, tmp);
+}
+
+/*
+ * transmitting to the host (IN), this code might be called from IRQ
+ * and from kernel thread.
+ *
+ * Context:  caller holds controller lock
+ */
+static void ep0_txstate(struct musb *musb)
+{
+       void __iomem            *regs = musb->control_ep->regs;
+       struct usb_request      *request = next_ep0_request(musb);
+       u16                     csr = MUSB_CSR0_TXPKTRDY;
+       u8                      *fifo_src;
+       u8                      fifo_count;
+
+       if (!request) {
+               /* WARN_ON(1); */
+               DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
+               return;
+       }
+
+       /* load the data */
+       fifo_src = (u8 *) request->buf + request->actual;
+       fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
+               request->length - request->actual);
+       musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
+       request->actual += fifo_count;
+
+       /* update the flags */
+       if (fifo_count < MUSB_MAX_END0_PACKET
+                       || request->actual == request->length) {
+               musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+               csr |= MUSB_CSR0_P_DATAEND;
+       } else
+               request = NULL;
+
+       /* report completions as soon as the fifo's loaded; there's no
+        * win in waiting till this last packet gets acked.  (other than
+        * very precise fault reporting, needed by USB TMC; possible with
+        * this hardware, but not usable from portable gadget drivers.)
+        */
+       if (request) {
+               musb->ackpend = csr;
+               musb_g_ep0_giveback(musb, request);
+               if (!musb->ackpend)
+                       return;
+               musb->ackpend = 0;
+       }
+
+       /* send it out, triggering a "txpktrdy cleared" irq */
+       musb_writew(regs, MUSB_CSR0, csr);
+}
+
+/*
+ * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
+ * Fields are left in USB byte-order.
+ *
+ * Context:  caller holds controller lock.
+ */
+static void
+musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
+{
+       struct usb_request      *r;
+       void __iomem            *regs = musb->control_ep->regs;
+
+       musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
+
+       /* NOTE:  earlier 2.6 versions changed setup packets to host
+        * order, but now USB packets always stay in USB byte order.
+        */
+       DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n",
+               req->bRequestType,
+               req->bRequest,
+               le16_to_cpu(req->wValue),
+               le16_to_cpu(req->wIndex),
+               le16_to_cpu(req->wLength));
+
+       /* clean up any leftover transfers */
+       r = next_ep0_request(musb);
+       if (r)
+               musb_g_ep0_giveback(musb, r);
+
+       /* For zero-data requests we want to delay the STATUS stage to
+        * avoid SETUPEND errors.  If we read data (OUT), delay accepting
+        * packets until there's a buffer to store them in.
+        *
+        * If we write data, the controller acts happier if we enable
+        * the TX FIFO right away, and give the controller a moment
+        * to switch modes...
+        */
+       musb->set_address = false;
+       musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
+       if (req->wLength == 0) {
+               if (req->bRequestType & USB_DIR_IN)
+                       musb->ackpend |= MUSB_CSR0_TXPKTRDY;
+               musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
+       } else if (req->bRequestType & USB_DIR_IN) {
+               musb->ep0_state = MUSB_EP0_STAGE_TX;
+               musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
+               while ((musb_readw(regs, MUSB_CSR0)
+                               & MUSB_CSR0_RXPKTRDY) != 0)
+                       cpu_relax();
+               musb->ackpend = 0;
+       } else
+               musb->ep0_state = MUSB_EP0_STAGE_RX;
+}
+
+static int
+forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+       int retval;
+       if (!musb->gadget_driver)
+               return -EOPNOTSUPP;
+       spin_unlock(&musb->lock);
+       retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
+       spin_lock(&musb->lock);
+       return retval;
+}
+
+/*
+ * Handle peripheral ep0 interrupt
+ *
+ * Context: irq handler; we won't re-enter the driver that way.
+ */
+irqreturn_t musb_g_ep0_irq(struct musb *musb)
+{
+       u16             csr;
+       u16             len;
+       void __iomem    *mbase = musb->mregs;
+       void __iomem    *regs = musb->endpoints[0].regs;
+       irqreturn_t     retval = IRQ_NONE;
+
+       musb_ep_select(mbase, 0);       /* select ep0 */
+       csr = musb_readw(regs, MUSB_CSR0);
+       len = musb_readb(regs, MUSB_COUNT0);
+
+       DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
+                       csr, len,
+                       musb_readb(mbase, MUSB_FADDR),
+                       decode_ep0stage(musb->ep0_state));
+
+       /* I sent a stall.. need to acknowledge it now.. */
+       if (csr & MUSB_CSR0_P_SENTSTALL) {
+               musb_writew(regs, MUSB_CSR0,
+                               csr & ~MUSB_CSR0_P_SENTSTALL);
+               retval = IRQ_HANDLED;
+               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+               csr = musb_readw(regs, MUSB_CSR0);
+       }
+
+       /* request ended "early" */
+       if (csr & MUSB_CSR0_P_SETUPEND) {
+               musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
+               retval = IRQ_HANDLED;
+               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+               csr = musb_readw(regs, MUSB_CSR0);
+               /* NOTE:  request may need completion */
+       }
+
+       /* docs from Mentor only describe tx, rx, and idle/setup states.
+        * we need to handle nuances around status stages, and also the
+        * case where status and setup stages come back-to-back ...
+        */
+       switch (musb->ep0_state) {
+
+       case MUSB_EP0_STAGE_TX:
+               /* irq on clearing txpktrdy */
+               if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
+                       ep0_txstate(musb);
+                       retval = IRQ_HANDLED;
+               }
+               break;
+
+       case MUSB_EP0_STAGE_RX:
+               /* irq on set rxpktrdy */
+               if (csr & MUSB_CSR0_RXPKTRDY) {
+                       ep0_rxstate(musb);
+                       retval = IRQ_HANDLED;
+               }
+               break;
+
+       case MUSB_EP0_STAGE_STATUSIN:
+               /* end of sequence #2 (OUT/RX state) or #3 (no data) */
+
+               /* update address (if needed) only @ the end of the
+                * status phase per usb spec, which also guarantees
+                * we get 10 msec to receive this irq... until this
+                * is done we won't see the next packet.
+                */
+               if (musb->set_address) {
+                       musb->set_address = false;
+                       musb_writeb(mbase, MUSB_FADDR, musb->address);
+               }
+
+               /* enter test mode if needed (exit by reset) */
+               else if (musb->test_mode) {
+                       DBG(1, "entering TESTMODE\n");
+
+                       if (MUSB_TEST_PACKET == musb->test_mode_nr)
+                               musb_load_testpacket(musb);
+
+                       musb_writeb(mbase, MUSB_TESTMODE,
+                                       musb->test_mode_nr);
+               }
+               /* FALLTHROUGH */
+
+       case MUSB_EP0_STAGE_STATUSOUT:
+               /* end of sequence #1: write to host (TX state) */
+               {
+                       struct usb_request      *req;
+
+                       req = next_ep0_request(musb);
+                       if (req)
+                               musb_g_ep0_giveback(musb, req);
+               }
+               retval = IRQ_HANDLED;
+               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+               /* FALLTHROUGH */
+
+       case MUSB_EP0_STAGE_SETUP:
+               if (csr & MUSB_CSR0_RXPKTRDY) {
+                       struct usb_ctrlrequest  setup;
+                       int                     handled = 0;
+
+                       if (len != 8) {
+                               ERR("SETUP packet len %d != 8 ?\n", len);
+                               break;
+                       }
+                       musb_read_setup(musb, &setup);
+                       retval = IRQ_HANDLED;
+
+                       /* sometimes the RESET won't be reported */
+                       if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
+                               u8      power;
+
+                               printk(KERN_NOTICE "%s: peripheral reset "
+                                               "irq lost!\n",
+                                               musb_driver_name);
+                               power = musb_readb(mbase, MUSB_POWER);
+                               musb->g.speed = (power & MUSB_POWER_HSMODE)
+                                       ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+                       }
+
+                       switch (musb->ep0_state) {
+
+                       /* sequence #3 (no data stage), includes requests
+                        * we can't forward (notably SET_ADDRESS and the
+                        * device/endpoint feature set/clear operations)
+                        * plus SET_CONFIGURATION and others we must
+                        */
+                       case MUSB_EP0_STAGE_ACKWAIT:
+                               handled = service_zero_data_request(
+                                               musb, &setup);
+
+                               /* status stage might be immediate */
+                               if (handled > 0) {
+                                       musb->ackpend |= MUSB_CSR0_P_DATAEND;
+                                       musb->ep0_state =
+                                               MUSB_EP0_STAGE_STATUSIN;
+                               }
+                               break;
+
+                       /* sequence #1 (IN to host), includes GET_STATUS
+                        * requests that we can't forward, GET_DESCRIPTOR
+                        * and others that we must
+                        */
+                       case MUSB_EP0_STAGE_TX:
+                               handled = service_in_request(musb, &setup);
+                               if (handled > 0) {
+                                       musb->ackpend = MUSB_CSR0_TXPKTRDY
+                                               | MUSB_CSR0_P_DATAEND;
+                                       musb->ep0_state =
+                                               MUSB_EP0_STAGE_STATUSOUT;
+                               }
+                               break;
+
+                       /* sequence #2 (OUT from host), always forward */
+                       default:                /* MUSB_EP0_STAGE_RX */
+                               break;
+                       }
+
+                       DBG(3, "handled %d, csr %04x, ep0stage %s\n",
+                               handled, csr,
+                               decode_ep0stage(musb->ep0_state));
+
+                       /* unless we need to delegate this to the gadget
+                        * driver, we know how to wrap this up:  csr0 has
+                        * not yet been written.
+                        */
+                       if (handled < 0)
+                               goto stall;
+                       else if (handled > 0)
+                               goto finish;
+
+                       handled = forward_to_driver(musb, &setup);
+                       if (handled < 0) {
+                               musb_ep_select(mbase, 0);
+stall:
+                               DBG(3, "stall (%d)\n", handled);
+                               musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
+                               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+finish:
+                               musb_writew(regs, MUSB_CSR0,
+                                               musb->ackpend);
+                               musb->ackpend = 0;
+                       }
+               }
+               break;
+
+       case MUSB_EP0_STAGE_ACKWAIT:
+               /* This should not happen. But happens with tusb6010 with
+                * g_file_storage and high speed. Do nothing.
+                */
+               retval = IRQ_HANDLED;
+               break;
+
+       default:
+               /* "can't happen" */
+               WARN_ON(1);
+               musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
+               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+               break;
+       }
+
+       return retval;
+}
+
+
+static int
+musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
+{
+       /* always enabled */
+       return -EINVAL;
+}
+
+static int musb_g_ep0_disable(struct usb_ep *e)
+{
+       /* always enabled */
+       return -EINVAL;
+}
+
+static int
+musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
+{
+       struct musb_ep          *ep;
+       struct musb_request     *req;
+       struct musb             *musb;
+       int                     status;
+       unsigned long           lockflags;
+       void __iomem            *regs;
+
+       if (!e || !r)
+               return -EINVAL;
+
+       ep = to_musb_ep(e);
+       musb = ep->musb;
+       regs = musb->control_ep->regs;
+
+       req = to_musb_request(r);
+       req->musb = musb;
+       req->request.actual = 0;
+       req->request.status = -EINPROGRESS;
+       req->tx = ep->is_in;
+
+       spin_lock_irqsave(&musb->lock, lockflags);
+
+       if (!list_empty(&ep->req_list)) {
+               status = -EBUSY;
+               goto cleanup;
+       }
+
+       switch (musb->ep0_state) {
+       case MUSB_EP0_STAGE_RX:         /* control-OUT data */
+       case MUSB_EP0_STAGE_TX:         /* control-IN data */
+       case MUSB_EP0_STAGE_ACKWAIT:    /* zero-length data */
+               status = 0;
+               break;
+       default:
+               DBG(1, "ep0 request queued in state %d\n",
+                               musb->ep0_state);
+               status = -EINVAL;
+               goto cleanup;
+       }
+
+       /* add request to the list */
+       list_add_tail(&(req->request.list), &(ep->req_list));
+
+       DBG(3, "queue to %s (%s), length=%d\n",
+                       ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
+                       req->request.length);
+
+       musb_ep_select(musb->mregs, 0);
+
+       /* sequence #1, IN ... start writing the data */
+       if (musb->ep0_state == MUSB_EP0_STAGE_TX)
+               ep0_txstate(musb);
+
+       /* sequence #3, no-data ... issue IN status */
+       else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
+               if (req->request.length)
+                       status = -EINVAL;
+               else {
+                       musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+                       musb_writew(regs, MUSB_CSR0,
+                                       musb->ackpend | MUSB_CSR0_P_DATAEND);
+                       musb->ackpend = 0;
+                       musb_g_ep0_giveback(ep->musb, r);
+               }
+
+       /* else for sequence #2 (OUT), caller provides a buffer
+        * before the next packet arrives.  deferred responses
+        * (after SETUP is acked) are racey.
+        */
+       } else if (musb->ackpend) {
+               musb_writew(regs, MUSB_CSR0, musb->ackpend);
+               musb->ackpend = 0;
+       }
+
+cleanup:
+       spin_unlock_irqrestore(&musb->lock, lockflags);
+       return status;
+}
+
+static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+       /* we just won't support this */
+       return -EINVAL;
+}
+
+static int musb_g_ep0_halt(struct usb_ep *e, int value)
+{
+       struct musb_ep          *ep;
+       struct musb             *musb;
+       void __iomem            *base, *regs;
+       unsigned long           flags;
+       int                     status;
+       u16                     csr;
+
+       if (!e || !value)
+               return -EINVAL;
+
+       ep = to_musb_ep(e);
+       musb = ep->musb;
+       base = musb->mregs;
+       regs = musb->control_ep->regs;
+       status = 0;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       if (!list_empty(&ep->req_list)) {
+               status = -EBUSY;
+               goto cleanup;
+       }
+
+       musb_ep_select(base, 0);
+       csr = musb->ackpend;
+
+       switch (musb->ep0_state) {
+
+       /* Stalls are usually issued after parsing SETUP packet, either
+        * directly in irq context from setup() or else later.
+        */
+       case MUSB_EP0_STAGE_TX:         /* control-IN data */
+       case MUSB_EP0_STAGE_ACKWAIT:    /* STALL for zero-length data */
+       case MUSB_EP0_STAGE_RX:         /* control-OUT data */
+               csr = musb_readw(regs, MUSB_CSR0);
+               /* FALLTHROUGH */
+
+       /* It's also OK to issue stalls during callbacks when a non-empty
+        * DATA stage buffer has been read (or even written).
+        */
+       case MUSB_EP0_STAGE_STATUSIN:   /* control-OUT status */
+       case MUSB_EP0_STAGE_STATUSOUT:  /* control-IN status */
+
+               csr |= MUSB_CSR0_P_SENDSTALL;
+               musb_writew(regs, MUSB_CSR0, csr);
+               musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+               musb->ackpend = 0;
+               break;
+       default:
+               DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state);
+               status = -EINVAL;
+       }
+
+cleanup:
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return status;
+}
+
+const struct usb_ep_ops musb_g_ep0_ops = {
+       .enable         = musb_g_ep0_enable,
+       .disable        = musb_g_ep0_disable,
+       .alloc_request  = musb_alloc_request,
+       .free_request   = musb_free_request,
+       .queue          = musb_g_ep0_queue,
+       .dequeue        = musb_g_ep0_dequeue,
+       .set_halt       = musb_g_ep0_halt,
+};
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
new file mode 100644 (file)
index 0000000..8b4be01
--- /dev/null
@@ -0,0 +1,2170 @@
+/*
+ * MUSB OTG driver host support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include "musb_core.h"
+#include "musb_host.h"
+
+
+/* MUSB HOST status 22-mar-2006
+ *
+ * - There's still lots of partial code duplication for fault paths, so
+ *   they aren't handled as consistently as they need to be.
+ *
+ * - PIO mostly behaved when last tested.
+ *     + including ep0, with all usbtest cases 9, 10
+ *     + usbtest 14 (ep0out) doesn't seem to run at all
+ *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
+ *       configurations, but otherwise double buffering passes basic tests.
+ *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
+ *
+ * - DMA (CPPI) ... partially behaves, not currently recommended
+ *     + about 1/15 the speed of typical EHCI implementations (PCI)
+ *     + RX, all too often reqpkt seems to misbehave after tx
+ *     + TX, no known issues (other than evident silicon issue)
+ *
+ * - DMA (Mentor/OMAP) ...has at least toggle update problems
+ *
+ * - Still no traffic scheduling code to make NAKing for bulk or control
+ *   transfers unable to starve other requests; or to make efficient use
+ *   of hardware with periodic transfers.  (Note that network drivers
+ *   commonly post bulk reads that stay pending for a long time; these
+ *   would make very visible trouble.)
+ *
+ * - Not tested with HNP, but some SRP paths seem to behave.
+ *
+ * NOTE 24-August-2006:
+ *
+ * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
+ *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
+ *   mostly works, except that with "usbnet" it's easy to trigger cases
+ *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
+ *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
+ *   although ARP RX wins.  (That test was done with a full speed link.)
+ */
+
+
+/*
+ * NOTE on endpoint usage:
+ *
+ * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
+ * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
+ *
+ * (Yes, bulk _could_ use more of the endpoints than that, and would even
+ * benefit from it ... one remote device may easily be NAKing while others
+ * need to perform transfers in that same direction.  The same thing could
+ * be done in software though, assuming dma cooperates.)
+ *
+ * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
+ * So far that scheduling is both dumb and optimistic:  the endpoint will be
+ * "claimed" until its software queue is no longer refilled.  No multiplexing
+ * of transfers between endpoints, or anything clever.
+ */
+
+
+static void musb_ep_program(struct musb *musb, u8 epnum,
+                       struct urb *urb, unsigned int nOut,
+                       u8 *buf, u32 len);
+
+/*
+ * Clear TX fifo. Needed to avoid BABBLE errors.
+ */
+static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
+{
+       void __iomem    *epio = ep->regs;
+       u16             csr;
+       int             retries = 1000;
+
+       csr = musb_readw(epio, MUSB_TXCSR);
+       while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+               DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+               csr |= MUSB_TXCSR_FLUSHFIFO;
+               musb_writew(epio, MUSB_TXCSR, csr);
+               csr = musb_readw(epio, MUSB_TXCSR);
+               if (retries-- < 1) {
+                       ERR("Could not flush host TX fifo: csr: %04x\n", csr);
+                       return;
+               }
+               mdelay(1);
+       }
+}
+
+/*
+ * Start transmit. Caller is responsible for locking shared resources.
+ * musb must be locked.
+ */
+static inline void musb_h_tx_start(struct musb_hw_ep *ep)
+{
+       u16     txcsr;
+
+       /* NOTE: no locks here; caller should lock and select EP */
+       if (ep->epnum) {
+               txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+               txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
+               musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+       } else {
+               txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
+               musb_writew(ep->regs, MUSB_CSR0, txcsr);
+       }
+
+}
+
+static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
+{
+       u16     txcsr;
+
+       /* NOTE: no locks here; caller should lock and select EP */
+       txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+       txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
+       musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+}
+
+/*
+ * Start the URB at the front of an endpoint's queue
+ * end must be claimed from the caller.
+ *
+ * Context: controller locked, irqs blocked
+ */
+static void
+musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
+{
+       u16                     frame;
+       u32                     len;
+       void                    *buf;
+       void __iomem            *mbase =  musb->mregs;
+       struct urb              *urb = next_urb(qh);
+       struct musb_hw_ep       *hw_ep = qh->hw_ep;
+       unsigned                pipe = urb->pipe;
+       u8                      address = usb_pipedevice(pipe);
+       int                     epnum = hw_ep->epnum;
+
+       /* initialize software qh state */
+       qh->offset = 0;
+       qh->segsize = 0;
+
+       /* gather right source of data */
+       switch (qh->type) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               /* control transfers always start with SETUP */
+               is_in = 0;
+               hw_ep->out_qh = qh;
+               musb->ep0_stage = MUSB_EP0_START;
+               buf = urb->setup_packet;
+               len = 8;
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               qh->iso_idx = 0;
+               qh->frame = 0;
+               buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
+               len = urb->iso_frame_desc[0].length;
+               break;
+       default:                /* bulk, interrupt */
+               buf = urb->transfer_buffer;
+               len = urb->transfer_buffer_length;
+       }
+
+       DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
+                       qh, urb, address, qh->epnum,
+                       is_in ? "in" : "out",
+                       ({char *s; switch (qh->type) {
+                       case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
+                       case USB_ENDPOINT_XFER_BULK:    s = "-bulk"; break;
+                       case USB_ENDPOINT_XFER_ISOC:    s = "-iso"; break;
+                       default:                        s = "-intr"; break;
+                       }; s; }),
+                       epnum, buf, len);
+
+       /* Configure endpoint */
+       if (is_in || hw_ep->is_shared_fifo)
+               hw_ep->in_qh = qh;
+       else
+               hw_ep->out_qh = qh;
+       musb_ep_program(musb, epnum, urb, !is_in, buf, len);
+
+       /* transmit may have more work: start it when it is time */
+       if (is_in)
+               return;
+
+       /* determine if the time is right for a periodic transfer */
+       switch (qh->type) {
+       case USB_ENDPOINT_XFER_ISOC:
+       case USB_ENDPOINT_XFER_INT:
+               DBG(3, "check whether there's still time for periodic Tx\n");
+               qh->iso_idx = 0;
+               frame = musb_readw(mbase, MUSB_FRAME);
+               /* FIXME this doesn't implement that scheduling policy ...
+                * or handle framecounter wrapping
+                */
+               if ((urb->transfer_flags & URB_ISO_ASAP)
+                               || (frame >= urb->start_frame)) {
+                       /* REVISIT the SOF irq handler shouldn't duplicate
+                        * this code; and we don't init urb->start_frame...
+                        */
+                       qh->frame = 0;
+                       goto start;
+               } else {
+                       qh->frame = urb->start_frame;
+                       /* enable SOF interrupt so we can count down */
+                       DBG(1, "SOF for %d\n", epnum);
+#if 1 /* ifndef        CONFIG_ARCH_DAVINCI */
+                       musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
+#endif
+               }
+               break;
+       default:
+start:
+               DBG(4, "Start TX%d %s\n", epnum,
+                       hw_ep->tx_channel ? "dma" : "pio");
+
+               if (!hw_ep->tx_channel)
+                       musb_h_tx_start(hw_ep);
+               else if (is_cppi_enabled() || tusb_dma_omap())
+                       cppi_host_txdma_start(hw_ep);
+       }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static void
+__musb_giveback(struct musb *musb, struct urb *urb, int status)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+       DBG(({ int level; switch (urb->status) {
+                               case 0:
+                                       level = 4;
+                                       break;
+                               /* common/boring faults */
+                               case -EREMOTEIO:
+                               case -ESHUTDOWN:
+                               case -ECONNRESET:
+                               case -EPIPE:
+                                       level = 3;
+                                       break;
+                               default:
+                                       level = 2;
+                                       break;
+                               }; level; }),
+                       "complete %p (%d), dev%d ep%d%s, %d/%d\n",
+                       urb, urb->status,
+                       usb_pipedevice(urb->pipe),
+                       usb_pipeendpoint(urb->pipe),
+                       usb_pipein(urb->pipe) ? "in" : "out",
+                       urb->actual_length, urb->transfer_buffer_length
+                       );
+
+       spin_unlock(&musb->lock);
+       usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+       spin_lock(&musb->lock);
+}
+
+/* for bulk/interrupt endpoints only */
+static inline void
+musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+{
+       struct usb_device       *udev = urb->dev;
+       u16                     csr;
+       void __iomem            *epio = ep->regs;
+       struct musb_qh          *qh;
+
+       /* FIXME:  the current Mentor DMA code seems to have
+        * problems getting toggle correct.
+        */
+
+       if (is_in || ep->is_shared_fifo)
+               qh = ep->in_qh;
+       else
+               qh = ep->out_qh;
+
+       if (!is_in) {
+               csr = musb_readw(epio, MUSB_TXCSR);
+               usb_settoggle(udev, qh->epnum, 1,
+                       (csr & MUSB_TXCSR_H_DATATOGGLE)
+                               ? 1 : 0);
+       } else {
+               csr = musb_readw(epio, MUSB_RXCSR);
+               usb_settoggle(udev, qh->epnum, 0,
+                       (csr & MUSB_RXCSR_H_DATATOGGLE)
+                               ? 1 : 0);
+       }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static struct musb_qh *
+musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+{
+       int                     is_in;
+       struct musb_hw_ep       *ep = qh->hw_ep;
+       struct musb             *musb = ep->musb;
+       int                     ready = qh->is_ready;
+
+       if (ep->is_shared_fifo)
+               is_in = 1;
+       else
+               is_in = usb_pipein(urb->pipe);
+
+       /* save toggle eagerly, for paranoia */
+       switch (qh->type) {
+       case USB_ENDPOINT_XFER_BULK:
+       case USB_ENDPOINT_XFER_INT:
+               musb_save_toggle(ep, is_in, urb);
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               if (status == 0 && urb->error_count)
+                       status = -EXDEV;
+               break;
+       }
+
+       usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+
+       qh->is_ready = 0;
+       __musb_giveback(musb, urb, status);
+       qh->is_ready = ready;
+
+       /* reclaim resources (and bandwidth) ASAP; deschedule it, and
+        * invalidate qh as soon as list_empty(&hep->urb_list)
+        */
+       if (list_empty(&qh->hep->urb_list)) {
+               struct list_head        *head;
+
+               if (is_in)
+                       ep->rx_reinit = 1;
+               else
+                       ep->tx_reinit = 1;
+
+               /* clobber old pointers to this qh */
+               if (is_in || ep->is_shared_fifo)
+                       ep->in_qh = NULL;
+               else
+                       ep->out_qh = NULL;
+               qh->hep->hcpriv = NULL;
+
+               switch (qh->type) {
+
+               case USB_ENDPOINT_XFER_ISOC:
+               case USB_ENDPOINT_XFER_INT:
+                       /* this is where periodic bandwidth should be
+                        * de-allocated if it's tracked and allocated;
+                        * and where we'd update the schedule tree...
+                        */
+                       musb->periodic[ep->epnum] = NULL;
+                       kfree(qh);
+                       qh = NULL;
+                       break;
+
+               case USB_ENDPOINT_XFER_CONTROL:
+               case USB_ENDPOINT_XFER_BULK:
+                       /* fifo policy for these lists, except that NAKing
+                        * should rotate a qh to the end (for fairness).
+                        */
+                       head = qh->ring.prev;
+                       list_del(&qh->ring);
+                       kfree(qh);
+                       qh = first_qh(head);
+                       break;
+               }
+       }
+       return qh;
+}
+
+/*
+ * Advance this hardware endpoint's queue, completing the specified urb and
+ * advancing to either the next urb queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, irqs are blocked
+ */
+static void
+musb_advance_schedule(struct musb *musb, struct urb *urb,
+               struct musb_hw_ep *hw_ep, int is_in)
+{
+       struct musb_qh  *qh;
+
+       if (is_in || hw_ep->is_shared_fifo)
+               qh = hw_ep->in_qh;
+       else
+               qh = hw_ep->out_qh;
+
+       if (urb->status == -EINPROGRESS)
+               qh = musb_giveback(qh, urb, 0);
+       else
+               qh = musb_giveback(qh, urb, urb->status);
+
+       if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
+               DBG(4, "... next ep%d %cX urb %p\n",
+                               hw_ep->epnum, is_in ? 'R' : 'T',
+                               next_urb(qh));
+               musb_start_urb(musb, is_in, qh);
+       }
+}
+
+static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
+{
+       /* we don't want fifo to fill itself again;
+        * ignore dma (various models),
+        * leave toggle alone (may not have been saved yet)
+        */
+       csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
+       csr &= ~(MUSB_RXCSR_H_REQPKT
+               | MUSB_RXCSR_H_AUTOREQ
+               | MUSB_RXCSR_AUTOCLEAR);
+
+       /* write 2x to allow double buffering */
+       musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+       musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+
+       /* flush writebuffer */
+       return musb_readw(hw_ep->regs, MUSB_RXCSR);
+}
+
+/*
+ * PIO RX for a packet (or part of it).
+ */
+static bool
+musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
+{
+       u16                     rx_count;
+       u8                      *buf;
+       u16                     csr;
+       bool                    done = false;
+       u32                     length;
+       int                     do_flush = 0;
+       struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
+       void __iomem            *epio = hw_ep->regs;
+       struct musb_qh          *qh = hw_ep->in_qh;
+       int                     pipe = urb->pipe;
+       void                    *buffer = urb->transfer_buffer;
+
+       /* musb_ep_select(mbase, epnum); */
+       rx_count = musb_readw(epio, MUSB_RXCOUNT);
+       DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+                       urb->transfer_buffer, qh->offset,
+                       urb->transfer_buffer_length);
+
+       /* unload FIFO */
+       if (usb_pipeisoc(pipe)) {
+               int                                     status = 0;
+               struct usb_iso_packet_descriptor        *d;
+
+               if (iso_err) {
+                       status = -EILSEQ;
+                       urb->error_count++;
+               }
+
+               d = urb->iso_frame_desc + qh->iso_idx;
+               buf = buffer + d->offset;
+               length = d->length;
+               if (rx_count > length) {
+                       if (status == 0) {
+                               status = -EOVERFLOW;
+                               urb->error_count++;
+                       }
+                       DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+                       do_flush = 1;
+               } else
+                       length = rx_count;
+               urb->actual_length += length;
+               d->actual_length = length;
+
+               d->status = status;
+
+               /* see if we are done */
+               done = (++qh->iso_idx >= urb->number_of_packets);
+       } else {
+               /* non-isoch */
+               buf = buffer + qh->offset;
+               length = urb->transfer_buffer_length - qh->offset;
+               if (rx_count > length) {
+                       if (urb->status == -EINPROGRESS)
+                               urb->status = -EOVERFLOW;
+                       DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+                       do_flush = 1;
+               } else
+                       length = rx_count;
+               urb->actual_length += length;
+               qh->offset += length;
+
+               /* see if we are done */
+               done = (urb->actual_length == urb->transfer_buffer_length)
+                       || (rx_count < qh->maxpacket)
+                       || (urb->status != -EINPROGRESS);
+               if (done
+                               && (urb->status == -EINPROGRESS)
+                               && (urb->transfer_flags & URB_SHORT_NOT_OK)
+                               && (urb->actual_length
+                                       < urb->transfer_buffer_length))
+                       urb->status = -EREMOTEIO;
+       }
+
+       musb_read_fifo(hw_ep, length, buf);
+
+       csr = musb_readw(epio, MUSB_RXCSR);
+       csr |= MUSB_RXCSR_H_WZC_BITS;
+       if (unlikely(do_flush))
+               musb_h_flush_rxfifo(hw_ep, csr);
+       else {
+               /* REVISIT this assumes AUTOCLEAR is never set */
+               csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
+               if (!done)
+                       csr |= MUSB_RXCSR_H_REQPKT;
+               musb_writew(epio, MUSB_RXCSR, csr);
+       }
+
+       return done;
+}
+
+/* we don't always need to reinit a given side of an endpoint...
+ * when we do, use tx/rx reinit routine and then construct a new CSR
+ * to address data toggle, NYET, and DMA or PIO.
+ *
+ * it's possible that driver bugs (especially for DMA) or aborting a
+ * transfer might have left the endpoint busier than it should be.
+ * the busy/not-empty tests are basically paranoia.
+ */
+static void
+musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+{
+       u16     csr;
+
+       /* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
+        * That always uses tx_reinit since ep0 repurposes TX register
+        * offsets; the initial SETUP packet is also a kind of OUT.
+        */
+
+       /* if programmed for Tx, put it in RX mode */
+       if (ep->is_shared_fifo) {
+               csr = musb_readw(ep->regs, MUSB_TXCSR);
+               if (csr & MUSB_TXCSR_MODE) {
+                       musb_h_tx_flush_fifo(ep);
+                       musb_writew(ep->regs, MUSB_TXCSR,
+                                       MUSB_TXCSR_FRCDATATOG);
+               }
+               /* clear mode (and everything else) to enable Rx */
+               musb_writew(ep->regs, MUSB_TXCSR, 0);
+
+       /* scrub all previous state, clearing toggle */
+       } else {
+               csr = musb_readw(ep->regs, MUSB_RXCSR);
+               if (csr & MUSB_RXCSR_RXPKTRDY)
+                       WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+                               musb_readw(ep->regs, MUSB_RXCOUNT));
+
+               musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+       }
+
+       /* target addr and (for multipoint) hub addr/port */
+       if (musb->is_multipoint) {
+               musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
+                       qh->addr_reg);
+               musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
+                       qh->h_addr_reg);
+               musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
+                       qh->h_port_reg);
+       } else
+               musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
+
+       /* protocol/endpoint, interval/NAKlimit, i/o size */
+       musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
+       musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
+       /* NOTE: bulk combining rewrites high bits of maxpacket */
+       musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
+
+       ep->rx_reinit = 0;
+}
+
+
+/*
+ * Program an HDRC endpoint as per the given URB
+ * Context: irqs blocked, controller lock held
+ */
+static void musb_ep_program(struct musb *musb, u8 epnum,
+                       struct urb *urb, unsigned int is_out,
+                       u8 *buf, u32 len)
+{
+       struct dma_controller   *dma_controller;
+       struct dma_channel      *dma_channel;
+       u8                      dma_ok;
+       void __iomem            *mbase = musb->mregs;
+       struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
+       void __iomem            *epio = hw_ep->regs;
+       struct musb_qh          *qh;
+       u16                     packet_sz;
+
+       if (!is_out || hw_ep->is_shared_fifo)
+               qh = hw_ep->in_qh;
+       else
+               qh = hw_ep->out_qh;
+
+       packet_sz = qh->maxpacket;
+
+       DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
+                               "h_addr%02x h_port%02x bytes %d\n",
+                       is_out ? "-->" : "<--",
+                       epnum, urb, urb->dev->speed,
+                       qh->addr_reg, qh->epnum, is_out ? "out" : "in",
+                       qh->h_addr_reg, qh->h_port_reg,
+                       len);
+
+       musb_ep_select(mbase, epnum);
+
+       /* candidate for DMA? */
+       dma_controller = musb->dma_controller;
+       if (is_dma_capable() && epnum && dma_controller) {
+               dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
+               if (!dma_channel) {
+                       dma_channel = dma_controller->channel_alloc(
+                                       dma_controller, hw_ep, is_out);
+                       if (is_out)
+                               hw_ep->tx_channel = dma_channel;
+                       else
+                               hw_ep->rx_channel = dma_channel;
+               }
+       } else
+               dma_channel = NULL;
+
+       /* make sure we clear DMAEnab, autoSet bits from previous run */
+
+       /* OUT/transmit/EP0 or IN/receive? */
+       if (is_out) {
+               u16     csr;
+               u16     int_txe;
+               u16     load_count;
+
+               csr = musb_readw(epio, MUSB_TXCSR);
+
+               /* disable interrupt in case we flush */
+               int_txe = musb_readw(mbase, MUSB_INTRTXE);
+               musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+               /* general endpoint setup */
+               if (epnum) {
+                       /* ASSERT:  TXCSR_DMAENAB was already cleared */
+
+                       /* flush all old state, set default */
+                       musb_h_tx_flush_fifo(hw_ep);
+                       csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
+                                       | MUSB_TXCSR_DMAMODE
+                                       | MUSB_TXCSR_FRCDATATOG
+                                       | MUSB_TXCSR_H_RXSTALL
+                                       | MUSB_TXCSR_H_ERROR
+                                       | MUSB_TXCSR_TXPKTRDY
+                                       );
+                       csr |= MUSB_TXCSR_MODE;
+
+                       if (usb_gettoggle(urb->dev,
+                                       qh->epnum, 1))
+                               csr |= MUSB_TXCSR_H_WR_DATATOGGLE
+                                       | MUSB_TXCSR_H_DATATOGGLE;
+                       else
+                               csr |= MUSB_TXCSR_CLRDATATOG;
+
+                       /* twice in case of double packet buffering */
+                       musb_writew(epio, MUSB_TXCSR, csr);
+                       /* REVISIT may need to clear FLUSHFIFO ... */
+                       musb_writew(epio, MUSB_TXCSR, csr);
+                       csr = musb_readw(epio, MUSB_TXCSR);
+               } else {
+                       /* endpoint 0: just flush */
+                       musb_writew(epio, MUSB_CSR0,
+                               csr | MUSB_CSR0_FLUSHFIFO);
+                       musb_writew(epio, MUSB_CSR0,
+                               csr | MUSB_CSR0_FLUSHFIFO);
+               }
+
+               /* target addr and (for multipoint) hub addr/port */
+               if (musb->is_multipoint) {
+                       musb_writeb(mbase,
+                               MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
+                               qh->addr_reg);
+                       musb_writeb(mbase,
+                               MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
+                               qh->h_addr_reg);
+                       musb_writeb(mbase,
+                               MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
+                               qh->h_port_reg);
+/* FIXME if !epnum, do the same for RX ... */
+               } else
+                       musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
+
+               /* protocol/endpoint/interval/NAKlimit */
+               if (epnum) {
+                       musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
+                       if (can_bulk_split(musb, qh->type))
+                               musb_writew(epio, MUSB_TXMAXP,
+                                       packet_sz
+                                       | ((hw_ep->max_packet_sz_tx /
+                                               packet_sz) - 1) << 11);
+                       else
+                               musb_writew(epio, MUSB_TXMAXP,
+                                       packet_sz);
+                       musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
+               } else {
+                       musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
+                       if (musb->is_multipoint)
+                               musb_writeb(epio, MUSB_TYPE0,
+                                               qh->type_reg);
+               }
+
+               if (can_bulk_split(musb, qh->type))
+                       load_count = min((u32) hw_ep->max_packet_sz_tx,
+                                               len);
+               else
+                       load_count = min((u32) packet_sz, len);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+               if (dma_channel) {
+
+                       /* clear previous state */
+                       csr = musb_readw(epio, MUSB_TXCSR);
+                       csr &= ~(MUSB_TXCSR_AUTOSET
+                               | MUSB_TXCSR_DMAMODE
+                               | MUSB_TXCSR_DMAENAB);
+                       csr |= MUSB_TXCSR_MODE;
+                       musb_writew(epio, MUSB_TXCSR,
+                               csr | MUSB_TXCSR_MODE);
+
+                       qh->segsize = min(len, dma_channel->max_len);
+
+                       if (qh->segsize <= packet_sz)
+                               dma_channel->desired_mode = 0;
+                       else
+                               dma_channel->desired_mode = 1;
+
+
+                       if (dma_channel->desired_mode == 0) {
+                               csr &= ~(MUSB_TXCSR_AUTOSET
+                                       | MUSB_TXCSR_DMAMODE);
+                               csr |= (MUSB_TXCSR_DMAENAB);
+                                       /* against programming guide */
+                       } else
+                               csr |= (MUSB_TXCSR_AUTOSET
+                                       | MUSB_TXCSR_DMAENAB
+                                       | MUSB_TXCSR_DMAMODE);
+
+                       musb_writew(epio, MUSB_TXCSR, csr);
+
+                       dma_ok = dma_controller->channel_program(
+                                       dma_channel, packet_sz,
+                                       dma_channel->desired_mode,
+                                       urb->transfer_dma,
+                                       qh->segsize);
+                       if (dma_ok) {
+                               load_count = 0;
+                       } else {
+                               dma_controller->channel_release(dma_channel);
+                               if (is_out)
+                                       hw_ep->tx_channel = NULL;
+                               else
+                                       hw_ep->rx_channel = NULL;
+                               dma_channel = NULL;
+                       }
+               }
+#endif
+
+               /* candidate for DMA */
+               if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+
+                       /* program endpoint CSRs first, then setup DMA.
+                        * assume CPPI setup succeeds.
+                        * defer enabling dma.
+                        */
+                       csr = musb_readw(epio, MUSB_TXCSR);
+                       csr &= ~(MUSB_TXCSR_AUTOSET
+                                       | MUSB_TXCSR_DMAMODE
+                                       | MUSB_TXCSR_DMAENAB);
+                       csr |= MUSB_TXCSR_MODE;
+                       musb_writew(epio, MUSB_TXCSR,
+                               csr | MUSB_TXCSR_MODE);
+
+                       dma_channel->actual_len = 0L;
+                       qh->segsize = len;
+
+                       /* TX uses "rndis" mode automatically, but needs help
+                        * to identify the zero-length-final-packet case.
+                        */
+                       dma_ok = dma_controller->channel_program(
+                                       dma_channel, packet_sz,
+                                       (urb->transfer_flags
+                                                       & URB_ZERO_PACKET)
+                                               == URB_ZERO_PACKET,
+                                       urb->transfer_dma,
+                                       qh->segsize);
+                       if (dma_ok) {
+                               load_count = 0;
+                       } else {
+                               dma_controller->channel_release(dma_channel);
+                               hw_ep->tx_channel = NULL;
+                               dma_channel = NULL;
+
+                               /* REVISIT there's an error path here that
+                                * needs handling:  can't do dma, but
+                                * there's no pio buffer address...
+                                */
+                       }
+               }
+
+               if (load_count) {
+                       /* ASSERT:  TXCSR_DMAENAB was already cleared */
+
+                       /* PIO to load FIFO */
+                       qh->segsize = load_count;
+                       musb_write_fifo(hw_ep, load_count, buf);
+                       csr = musb_readw(epio, MUSB_TXCSR);
+                       csr &= ~(MUSB_TXCSR_DMAENAB
+                               | MUSB_TXCSR_DMAMODE
+                               | MUSB_TXCSR_AUTOSET);
+                       /* write CSR */
+                       csr |= MUSB_TXCSR_MODE;
+
+                       if (epnum)
+                               musb_writew(epio, MUSB_TXCSR, csr);
+               }
+
+               /* re-enable interrupt */
+               musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+       /* IN/receive */
+       } else {
+               u16     csr;
+
+               if (hw_ep->rx_reinit) {
+                       musb_rx_reinit(musb, qh, hw_ep);
+
+                       /* init new state: toggle and NYET, maybe DMA later */
+                       if (usb_gettoggle(urb->dev, qh->epnum, 0))
+                               csr = MUSB_RXCSR_H_WR_DATATOGGLE
+                                       | MUSB_RXCSR_H_DATATOGGLE;
+                       else
+                               csr = 0;
+                       if (qh->type == USB_ENDPOINT_XFER_INT)
+                               csr |= MUSB_RXCSR_DISNYET;
+
+               } else {
+                       csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+                       if (csr & (MUSB_RXCSR_RXPKTRDY
+                                       | MUSB_RXCSR_DMAENAB
+                                       | MUSB_RXCSR_H_REQPKT))
+                               ERR("broken !rx_reinit, ep%d csr %04x\n",
+                                               hw_ep->epnum, csr);
+
+                       /* scrub any stale state, leaving toggle alone */
+                       csr &= MUSB_RXCSR_DISNYET;
+               }
+
+               /* kick things off */
+
+               if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+                       /* candidate for DMA */
+                       if (dma_channel) {
+                               dma_channel->actual_len = 0L;
+                               qh->segsize = len;
+
+                               /* AUTOREQ is in a DMA register */
+                               musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+                               csr = musb_readw(hw_ep->regs,
+                                               MUSB_RXCSR);
+
+                               /* unless caller treats short rx transfers as
+                                * errors, we dare not queue multiple transfers.
+                                */
+                               dma_ok = dma_controller->channel_program(
+                                               dma_channel, packet_sz,
+                                               !(urb->transfer_flags
+                                                       & URB_SHORT_NOT_OK),
+                                               urb->transfer_dma,
+                                               qh->segsize);
+                               if (!dma_ok) {
+                                       dma_controller->channel_release(
+                                                       dma_channel);
+                                       hw_ep->rx_channel = NULL;
+                                       dma_channel = NULL;
+                               } else
+                                       csr |= MUSB_RXCSR_DMAENAB;
+                       }
+               }
+
+               csr |= MUSB_RXCSR_H_REQPKT;
+               DBG(7, "RXCSR%d := %04x\n", epnum, csr);
+               musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+               csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+       }
+}
+
+
+/*
+ * Service the default endpoint (ep0) as host.
+ * Return true until it's time to start the status stage.
+ */
+static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+{
+       bool                     more = false;
+       u8                      *fifo_dest = NULL;
+       u16                     fifo_count = 0;
+       struct musb_hw_ep       *hw_ep = musb->control_ep;
+       struct musb_qh          *qh = hw_ep->in_qh;
+       struct usb_ctrlrequest  *request;
+
+       switch (musb->ep0_stage) {
+       case MUSB_EP0_IN:
+               fifo_dest = urb->transfer_buffer + urb->actual_length;
+               fifo_count = min(len, ((u16) (urb->transfer_buffer_length
+                                       - urb->actual_length)));
+               if (fifo_count < len)
+                       urb->status = -EOVERFLOW;
+
+               musb_read_fifo(hw_ep, fifo_count, fifo_dest);
+
+               urb->actual_length += fifo_count;
+               if (len < qh->maxpacket) {
+                       /* always terminate on short read; it's
+                        * rarely reported as an error.
+                        */
+               } else if (urb->actual_length <
+                               urb->transfer_buffer_length)
+                       more = true;
+               break;
+       case MUSB_EP0_START:
+               request = (struct usb_ctrlrequest *) urb->setup_packet;
+
+               if (!request->wLength) {
+                       DBG(4, "start no-DATA\n");
+                       break;
+               } else if (request->bRequestType & USB_DIR_IN) {
+                       DBG(4, "start IN-DATA\n");
+                       musb->ep0_stage = MUSB_EP0_IN;
+                       more = true;
+                       break;
+               } else {
+                       DBG(4, "start OUT-DATA\n");
+                       musb->ep0_stage = MUSB_EP0_OUT;
+                       more = true;
+               }
+               /* FALLTHROUGH */
+       case MUSB_EP0_OUT:
+               fifo_count = min(qh->maxpacket, ((u16)
+                               (urb->transfer_buffer_length
+                               - urb->actual_length)));
+
+               if (fifo_count) {
+                       fifo_dest = (u8 *) (urb->transfer_buffer
+                                       + urb->actual_length);
+                       DBG(3, "Sending %d bytes to %p\n",
+                                       fifo_count, fifo_dest);
+                       musb_write_fifo(hw_ep, fifo_count, fifo_dest);
+
+                       urb->actual_length += fifo_count;
+                       more = true;
+               }
+               break;
+       default:
+               ERR("bogus ep0 stage %d\n", musb->ep0_stage);
+               break;
+       }
+
+       return more;
+}
+
+/*
+ * Handle default endpoint interrupt as host. Only called in IRQ time
+ * from the LinuxIsr() interrupt service routine.
+ *
+ * called with controller irqlocked
+ */
+irqreturn_t musb_h_ep0_irq(struct musb *musb)
+{
+       struct urb              *urb;
+       u16                     csr, len;
+       int                     status = 0;
+       void __iomem            *mbase = musb->mregs;
+       struct musb_hw_ep       *hw_ep = musb->control_ep;
+       void __iomem            *epio = hw_ep->regs;
+       struct musb_qh          *qh = hw_ep->in_qh;
+       bool                    complete = false;
+       irqreturn_t             retval = IRQ_NONE;
+
+       /* ep0 only has one queue, "in" */
+       urb = next_urb(qh);
+
+       musb_ep_select(mbase, 0);
+       csr = musb_readw(epio, MUSB_CSR0);
+       len = (csr & MUSB_CSR0_RXPKTRDY)
+                       ? musb_readb(epio, MUSB_COUNT0)
+                       : 0;
+
+       DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+               csr, qh, len, urb, musb->ep0_stage);
+
+       /* if we just did status stage, we are done */
+       if (MUSB_EP0_STATUS == musb->ep0_stage) {
+               retval = IRQ_HANDLED;
+               complete = true;
+       }
+
+       /* prepare status */
+       if (csr & MUSB_CSR0_H_RXSTALL) {
+               DBG(6, "STALLING ENDPOINT\n");
+               status = -EPIPE;
+
+       } else if (csr & MUSB_CSR0_H_ERROR) {
+               DBG(2, "no response, csr0 %04x\n", csr);
+               status = -EPROTO;
+
+       } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
+               DBG(2, "control NAK timeout\n");
+
+               /* NOTE:  this code path would be a good place to PAUSE a
+                * control transfer, if another one is queued, so that
+                * ep0 is more likely to stay busy.
+                *
+                * if (qh->ring.next != &musb->control), then
+                * we have a candidate... NAKing is *NOT* an error
+                */
+               musb_writew(epio, MUSB_CSR0, 0);
+               retval = IRQ_HANDLED;
+       }
+
+       if (status) {
+               DBG(6, "aborting\n");
+               retval = IRQ_HANDLED;
+               if (urb)
+                       urb->status = status;
+               complete = true;
+
+               /* use the proper sequence to abort the transfer */
+               if (csr & MUSB_CSR0_H_REQPKT) {
+                       csr &= ~MUSB_CSR0_H_REQPKT;
+                       musb_writew(epio, MUSB_CSR0, csr);
+                       csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+                       musb_writew(epio, MUSB_CSR0, csr);
+               } else {
+                       csr |= MUSB_CSR0_FLUSHFIFO;
+                       musb_writew(epio, MUSB_CSR0, csr);
+                       musb_writew(epio, MUSB_CSR0, csr);
+                       csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+                       musb_writew(epio, MUSB_CSR0, csr);
+               }
+
+               musb_writeb(epio, MUSB_NAKLIMIT0, 0);
+
+               /* clear it */
+               musb_writew(epio, MUSB_CSR0, 0);
+       }
+
+       if (unlikely(!urb)) {
+               /* stop endpoint since we have no place for its data, this
+                * SHOULD NEVER HAPPEN! */
+               ERR("no URB for end 0\n");
+
+               musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+               musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+               musb_writew(epio, MUSB_CSR0, 0);
+
+               goto done;
+       }
+
+       if (!complete) {
+               /* call common logic and prepare response */
+               if (musb_h_ep0_continue(musb, len, urb)) {
+                       /* more packets required */
+                       csr = (MUSB_EP0_IN == musb->ep0_stage)
+                               ?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
+               } else {
+                       /* data transfer complete; perform status phase */
+                       if (usb_pipeout(urb->pipe)
+                                       || !urb->transfer_buffer_length)
+                               csr = MUSB_CSR0_H_STATUSPKT
+                                       | MUSB_CSR0_H_REQPKT;
+                       else
+                               csr = MUSB_CSR0_H_STATUSPKT
+                                       | MUSB_CSR0_TXPKTRDY;
+
+                       /* flag status stage */
+                       musb->ep0_stage = MUSB_EP0_STATUS;
+
+                       DBG(5, "ep0 STATUS, csr %04x\n", csr);
+
+               }
+               musb_writew(epio, MUSB_CSR0, csr);
+               retval = IRQ_HANDLED;
+       } else
+               musb->ep0_stage = MUSB_EP0_IDLE;
+
+       /* call completion handler if done */
+       if (complete)
+               musb_advance_schedule(musb, urb, hw_ep, 1);
+done:
+       return retval;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side TX (OUT) using Mentor DMA works as follows:
+       submit_urb ->
+               - if queue was empty, Program Endpoint
+               - ... which starts DMA to fifo in mode 1 or 0
+
+       DMA Isr (transfer complete) -> TxAvail()
+               - Stop DMA (~DmaEnab)   (<--- Alert ... currently happens
+                                       only in musb_cleanup_urb)
+               - TxPktRdy has to be set in mode 0 or for
+                       short packets in mode 1.
+*/
+
+#endif
+
+/* Service a Tx-Available or dma completion irq for the endpoint */
+void musb_host_tx(struct musb *musb, u8 epnum)
+{
+       int                     pipe;
+       bool                    done = false;
+       u16                     tx_csr;
+       size_t                  wLength = 0;
+       u8                      *buf = NULL;
+       struct urb              *urb;
+       struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
+       void __iomem            *epio = hw_ep->regs;
+       struct musb_qh          *qh = hw_ep->out_qh;
+       u32                     status = 0;
+       void __iomem            *mbase = musb->mregs;
+       struct dma_channel      *dma;
+
+       urb = next_urb(qh);
+
+       musb_ep_select(mbase, epnum);
+       tx_csr = musb_readw(epio, MUSB_TXCSR);
+
+       /* with CPPI, DMA sometimes triggers "extra" irqs */
+       if (!urb) {
+               DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+               goto finish;
+       }
+
+       pipe = urb->pipe;
+       dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
+       DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
+                       dma ? ", dma" : "");
+
+       /* check for errors */
+       if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
+               /* dma was disabled, fifo flushed */
+               DBG(3, "TX end %d stall\n", epnum);
+
+               /* stall; record URB status */
+               status = -EPIPE;
+
+       } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
+               /* (NON-ISO) dma was disabled, fifo flushed */
+               DBG(3, "TX 3strikes on ep=%d\n", epnum);
+
+               status = -ETIMEDOUT;
+
+       } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
+               DBG(6, "TX end=%d device not responding\n", epnum);
+
+               /* NOTE:  this code path would be a good place to PAUSE a
+                * transfer, if there's some other (nonperiodic) tx urb
+                * that could use this fifo.  (dma complicates it...)
+                *
+                * if (bulk && qh->ring.next != &musb->out_bulk), then
+                * we have a candidate... NAKing is *NOT* an error
+                */
+               musb_ep_select(mbase, epnum);
+               musb_writew(epio, MUSB_TXCSR,
+                               MUSB_TXCSR_H_WZC_BITS
+                               | MUSB_TXCSR_TXPKTRDY);
+               goto finish;
+       }
+
+       if (status) {
+               if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+                       dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+                       (void) musb->dma_controller->channel_abort(dma);
+               }
+
+               /* do the proper sequence to abort the transfer in the
+                * usb core; the dma engine should already be stopped.
+                */
+               musb_h_tx_flush_fifo(hw_ep);
+               tx_csr &= ~(MUSB_TXCSR_AUTOSET
+                               | MUSB_TXCSR_DMAENAB
+                               | MUSB_TXCSR_H_ERROR
+                               | MUSB_TXCSR_H_RXSTALL
+                               | MUSB_TXCSR_H_NAKTIMEOUT
+                               );
+
+               musb_ep_select(mbase, epnum);
+               musb_writew(epio, MUSB_TXCSR, tx_csr);
+               /* REVISIT may need to clear FLUSHFIFO ... */
+               musb_writew(epio, MUSB_TXCSR, tx_csr);
+               musb_writeb(epio, MUSB_TXINTERVAL, 0);
+
+               done = true;
+       }
+
+       /* second cppi case */
+       if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+               DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+               goto finish;
+
+       }
+
+       /* REVISIT this looks wrong... */
+       if (!status || dma || usb_pipeisoc(pipe)) {
+               if (dma)
+                       wLength = dma->actual_len;
+               else
+                       wLength = qh->segsize;
+               qh->offset += wLength;
+
+               if (usb_pipeisoc(pipe)) {
+                       struct usb_iso_packet_descriptor        *d;
+
+                       d = urb->iso_frame_desc + qh->iso_idx;
+                       d->actual_length = qh->segsize;
+                       if (++qh->iso_idx >= urb->number_of_packets) {
+                               done = true;
+                       } else {
+                               d++;
+                               buf = urb->transfer_buffer + d->offset;
+                               wLength = d->length;
+                       }
+               } else if (dma) {
+                       done = true;
+               } else {
+                       /* see if we need to send more data, or ZLP */
+                       if (qh->segsize < qh->maxpacket)
+                               done = true;
+                       else if (qh->offset == urb->transfer_buffer_length
+                                       && !(urb->transfer_flags
+                                               & URB_ZERO_PACKET))
+                               done = true;
+                       if (!done) {
+                               buf = urb->transfer_buffer
+                                               + qh->offset;
+                               wLength = urb->transfer_buffer_length
+                                               - qh->offset;
+                       }
+               }
+       }
+
+       /* urb->status != -EINPROGRESS means request has been faulted,
+        * so we must abort this transfer after cleanup
+        */
+       if (urb->status != -EINPROGRESS) {
+               done = true;
+               if (status == 0)
+                       status = urb->status;
+       }
+
+       if (done) {
+               /* set status */
+               urb->status = status;
+               urb->actual_length = qh->offset;
+               musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
+
+       } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
+               /* WARN_ON(!buf); */
+
+               /* REVISIT:  some docs say that when hw_ep->tx_double_buffered,
+                * (and presumably, fifo is not half-full) we should write TWO
+                * packets before updating TXCSR ... other docs disagree ...
+                */
+               /* PIO:  start next packet in this URB */
+               wLength = min(qh->maxpacket, (u16) wLength);
+               musb_write_fifo(hw_ep, wLength, buf);
+               qh->segsize = wLength;
+
+               musb_ep_select(mbase, epnum);
+               musb_writew(epio, MUSB_TXCSR,
+                               MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
+       } else
+               DBG(1, "not complete, but dma enabled?\n");
+
+finish:
+       return;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side RX (IN) using Mentor DMA works as follows:
+       submit_urb ->
+               - if queue was empty, ProgramEndpoint
+               - first IN token is sent out (by setting ReqPkt)
+       LinuxIsr -> RxReady()
+       /\      => first packet is received
+       |       - Set in mode 0 (DmaEnab, ~ReqPkt)
+       |               -> DMA Isr (transfer complete) -> RxReady()
+       |                   - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
+       |                   - if urb not complete, send next IN token (ReqPkt)
+       |                          |            else complete urb.
+       |                          |
+       ---------------------------
+ *
+ * Nuances of mode 1:
+ *     For short packets, no ack (+RxPktRdy) is sent automatically
+ *     (even if AutoClear is ON)
+ *     For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
+ *     automatically => major problem, as collecting the next packet becomes
+ *     difficult. Hence mode 1 is not used.
+ *
+ * REVISIT
+ *     All we care about at this driver level is that
+ *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
+ *       (b) termination conditions are: short RX, or buffer full;
+ *       (c) fault modes include
+ *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
+ *             (and that endpoint's dma queue stops immediately)
+ *           - overflow (full, PLUS more bytes in the terminal packet)
+ *
+ *     So for example, usb-storage sets URB_SHORT_NOT_OK, and would
+ *     thus be a great candidate for using mode 1 ... for all but the
+ *     last packet of one URB's transfer.
+ */
+
+#endif
+
+/*
+ * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
+ * and high-bandwidth IN transfer cases.
+ */
+void musb_host_rx(struct musb *musb, u8 epnum)
+{
+       struct urb              *urb;
+       struct musb_hw_ep       *hw_ep = musb->endpoints + epnum;
+       void __iomem            *epio = hw_ep->regs;
+       struct musb_qh          *qh = hw_ep->in_qh;
+       size_t                  xfer_len;
+       void __iomem            *mbase = musb->mregs;
+       int                     pipe;
+       u16                     rx_csr, val;
+       bool                    iso_err = false;
+       bool                    done = false;
+       u32                     status;
+       struct dma_channel      *dma;
+
+       musb_ep_select(mbase, epnum);
+
+       urb = next_urb(qh);
+       dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
+       status = 0;
+       xfer_len = 0;
+
+       rx_csr = musb_readw(epio, MUSB_RXCSR);
+       val = rx_csr;
+
+       if (unlikely(!urb)) {
+               /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
+                * usbtest #11 (unlinks) triggers it regularly, sometimes
+                * with fifo full.  (Only with DMA??)
+                */
+               DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
+                       musb_readw(epio, MUSB_RXCOUNT));
+               musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+               return;
+       }
+
+       pipe = urb->pipe;
+
+       DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
+               epnum, rx_csr, urb->actual_length,
+               dma ? dma->actual_len : 0);
+
+       /* check for errors, concurrent stall & unlink is not really
+        * handled yet! */
+       if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
+               DBG(3, "RX end %d STALL\n", epnum);
+
+               /* stall; record URB status */
+               status = -EPIPE;
+
+       } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
+               DBG(3, "end %d RX proto error\n", epnum);
+
+               status = -EPROTO;
+               musb_writeb(epio, MUSB_RXINTERVAL, 0);
+
+       } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
+
+               if (USB_ENDPOINT_XFER_ISOC != qh->type) {
+                       /* NOTE this code path would be a good place to PAUSE a
+                        * transfer, if there's some other (nonperiodic) rx urb
+                        * that could use this fifo.  (dma complicates it...)
+                        *
+                        * if (bulk && qh->ring.next != &musb->in_bulk), then
+                        * we have a candidate... NAKing is *NOT* an error
+                        */
+                       DBG(6, "RX end %d NAK timeout\n", epnum);
+                       musb_ep_select(mbase, epnum);
+                       musb_writew(epio, MUSB_RXCSR,
+                                       MUSB_RXCSR_H_WZC_BITS
+                                       | MUSB_RXCSR_H_REQPKT);
+
+                       goto finish;
+               } else {
+                       DBG(4, "RX end %d ISO data error\n", epnum);
+                       /* packet error reported later */
+                       iso_err = true;
+               }
+       }
+
+       /* faults abort the transfer */
+       if (status) {
+               /* clean up dma and collect transfer count */
+               if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+                       dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+                       (void) musb->dma_controller->channel_abort(dma);
+                       xfer_len = dma->actual_len;
+               }
+               musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+               musb_writeb(epio, MUSB_RXINTERVAL, 0);
+               done = true;
+               goto finish;
+       }
+
+       if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
+               /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
+               ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
+               goto finish;
+       }
+
+       /* thorough shutdown for now ... given more precise fault handling
+        * and better queueing support, we might keep a DMA pipeline going
+        * while processing this irq for earlier completions.
+        */
+
+       /* FIXME this is _way_ too much in-line logic for Mentor DMA */
+
+#ifndef CONFIG_USB_INVENTRA_DMA
+       if (rx_csr & MUSB_RXCSR_H_REQPKT)  {
+               /* REVISIT this happened for a while on some short reads...
+                * the cleanup still needs investigation... looks bad...
+                * and also duplicates dma cleanup code above ... plus,
+                * shouldn't this be the "half full" double buffer case?
+                */
+               if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+                       dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+                       (void) musb->dma_controller->channel_abort(dma);
+                       xfer_len = dma->actual_len;
+                       done = true;
+               }
+
+               DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
+                               xfer_len, dma ? ", dma" : "");
+               rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+
+               musb_ep_select(mbase, epnum);
+               musb_writew(epio, MUSB_RXCSR,
+                               MUSB_RXCSR_H_WZC_BITS | rx_csr);
+       }
+#endif
+       if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
+               xfer_len = dma->actual_len;
+
+               val &= ~(MUSB_RXCSR_DMAENAB
+                       | MUSB_RXCSR_H_AUTOREQ
+                       | MUSB_RXCSR_AUTOCLEAR
+                       | MUSB_RXCSR_RXPKTRDY);
+               musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+               /* done if urb buffer is full or short packet is recd */
+               done = (urb->actual_length + xfer_len >=
+                               urb->transfer_buffer_length
+                       || dma->actual_len < qh->maxpacket);
+
+               /* send IN token for next packet, without AUTOREQ */
+               if (!done) {
+                       val |= MUSB_RXCSR_H_REQPKT;
+                       musb_writew(epio, MUSB_RXCSR,
+                               MUSB_RXCSR_H_WZC_BITS | val);
+               }
+
+               DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
+                       done ? "off" : "reset",
+                       musb_readw(epio, MUSB_RXCSR),
+                       musb_readw(epio, MUSB_RXCOUNT));
+#else
+               done = true;
+#endif
+       } else if (urb->status == -EINPROGRESS) {
+               /* if no errors, be sure a packet is ready for unloading */
+               if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
+                       status = -EPROTO;
+                       ERR("Rx interrupt with no errors or packet!\n");
+
+                       /* FIXME this is another "SHOULD NEVER HAPPEN" */
+
+/* SCRUB (RX) */
+                       /* do the proper sequence to abort the transfer */
+                       musb_ep_select(mbase, epnum);
+                       val &= ~MUSB_RXCSR_H_REQPKT;
+                       musb_writew(epio, MUSB_RXCSR, val);
+                       goto finish;
+               }
+
+               /* we are expecting IN packets */
+#ifdef CONFIG_USB_INVENTRA_DMA
+               if (dma) {
+                       struct dma_controller   *c;
+                       u16                     rx_count;
+                       int                     ret;
+
+                       rx_count = musb_readw(epio, MUSB_RXCOUNT);
+
+                       DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
+                                       epnum, rx_count,
+                                       urb->transfer_dma
+                                               + urb->actual_length,
+                                       qh->offset,
+                                       urb->transfer_buffer_length);
+
+                       c = musb->dma_controller;
+
+                       dma->desired_mode = 0;
+#ifdef USE_MODE1
+                       /* because of the issue below, mode 1 will
+                        * only rarely behave with correct semantics.
+                        */
+                       if ((urb->transfer_flags &
+                                               URB_SHORT_NOT_OK)
+                               && (urb->transfer_buffer_length -
+                                               urb->actual_length)
+                                       > qh->maxpacket)
+                               dma->desired_mode = 1;
+#endif
+
+/* Disadvantage of using mode 1:
+ *     It's basically usable only for mass storage class; essentially all
+ *     other protocols also terminate transfers on short packets.
+ *
+ * Details:
+ *     An extra IN token is sent at the end of the transfer (due to AUTOREQ)
+ *     If you try to use mode 1 for (transfer_buffer_length - 512), and try
+ *     to use the extra IN token to grab the last packet using mode 0, then
+ *     the problem is that you cannot be sure when the device will send the
+ *     last packet and RxPktRdy set. Sometimes the packet is recd too soon
+ *     such that it gets lost when RxCSR is re-set at the end of the mode 1
+ *     transfer, while sometimes it is recd just a little late so that if you
+ *     try to configure for mode 0 soon after the mode 1 transfer is
+ *     completed, you will find rxcount 0. Okay, so you might think why not
+ *     wait for an interrupt when the pkt is recd. Well, you won't get any!
+ */
+
+                       val = musb_readw(epio, MUSB_RXCSR);
+                       val &= ~MUSB_RXCSR_H_REQPKT;
+
+                       if (dma->desired_mode == 0)
+                               val &= ~MUSB_RXCSR_H_AUTOREQ;
+                       else
+                               val |= MUSB_RXCSR_H_AUTOREQ;
+                       val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
+
+                       musb_writew(epio, MUSB_RXCSR,
+                               MUSB_RXCSR_H_WZC_BITS | val);
+
+                       /* REVISIT if when actual_length != 0,
+                        * transfer_buffer_length needs to be
+                        * adjusted first...
+                        */
+                       ret = c->channel_program(
+                               dma, qh->maxpacket,
+                               dma->desired_mode,
+                               urb->transfer_dma
+                                       + urb->actual_length,
+                               (dma->desired_mode == 0)
+                                       ? rx_count
+                                       : urb->transfer_buffer_length);
+
+                       if (!ret) {
+                               c->channel_release(dma);
+                               hw_ep->rx_channel = NULL;
+                               dma = NULL;
+                               /* REVISIT reset CSR */
+                       }
+               }
+#endif /* Mentor DMA */
+
+               if (!dma) {
+                       done = musb_host_packet_rx(musb, urb,
+                                       epnum, iso_err);
+                       DBG(6, "read %spacket\n", done ? "last " : "");
+               }
+       }
+
+       if (dma && usb_pipeisoc(pipe)) {
+               struct usb_iso_packet_descriptor        *d;
+               int                                     iso_stat = status;
+
+               d = urb->iso_frame_desc + qh->iso_idx;
+               d->actual_length += xfer_len;
+               if (iso_err) {
+                       iso_stat = -EILSEQ;
+                       urb->error_count++;
+               }
+               d->status = iso_stat;
+       }
+
+finish:
+       urb->actual_length += xfer_len;
+       qh->offset += xfer_len;
+       if (done) {
+               if (urb->status == -EINPROGRESS)
+                       urb->status = status;
+               musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
+       }
+}
+
+/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
+ * the software schedule associates multiple such nodes with a given
+ * host side hardware endpoint + direction; scheduling may activate
+ * that hardware endpoint.
+ */
+static int musb_schedule(
+       struct musb             *musb,
+       struct musb_qh          *qh,
+       int                     is_in)
+{
+       int                     idle;
+       int                     best_diff;
+       int                     best_end, epnum;
+       struct musb_hw_ep       *hw_ep = NULL;
+       struct list_head        *head = NULL;
+
+       /* use fixed hardware for control and bulk */
+       switch (qh->type) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               head = &musb->control;
+               hw_ep = musb->control_ep;
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+               hw_ep = musb->bulk_ep;
+               if (is_in)
+                       head = &musb->in_bulk;
+               else
+                       head = &musb->out_bulk;
+               break;
+       }
+       if (head) {
+               idle = list_empty(head);
+               list_add_tail(&qh->ring, head);
+               goto success;
+       }
+
+       /* else, periodic transfers get muxed to other endpoints */
+
+       /* FIXME this doesn't consider direction, so it can only
+        * work for one half of the endpoint hardware, and assumes
+        * the previous cases handled all non-shared endpoints...
+        */
+
+       /* we know this qh hasn't been scheduled, so all we need to do
+        * is choose which hardware endpoint to put it on ...
+        *
+        * REVISIT what we really want here is a regular schedule tree
+        * like e.g. OHCI uses, but for now musb->periodic is just an
+        * array of the _single_ logical endpoint associated with a
+        * given physical one (identity mapping logical->physical).
+        *
+        * that simplistic approach makes TT scheduling a lot simpler;
+        * there is none, and thus none of its complexity...
+        */
+       best_diff = 4096;
+       best_end = -1;
+
+       for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+               int     diff;
+
+               if (musb->periodic[epnum])
+                       continue;
+               hw_ep = &musb->endpoints[epnum];
+               if (hw_ep == musb->bulk_ep)
+                       continue;
+
+               if (is_in)
+                       diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
+               else
+                       diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
+
+               if (diff > 0 && best_diff > diff) {
+                       best_diff = diff;
+                       best_end = epnum;
+               }
+       }
+       if (best_end < 0)
+               return -ENOSPC;
+
+       idle = 1;
+       hw_ep = musb->endpoints + best_end;
+       musb->periodic[best_end] = qh;
+       DBG(4, "qh %p periodic slot %d\n", qh, best_end);
+success:
+       qh->hw_ep = hw_ep;
+       qh->hep->hcpriv = qh;
+       if (idle)
+               musb_start_urb(musb, is_in, qh);
+       return 0;
+}
+
+static int musb_urb_enqueue(
+       struct usb_hcd                  *hcd,
+       struct urb                      *urb,
+       gfp_t                           mem_flags)
+{
+       unsigned long                   flags;
+       struct musb                     *musb = hcd_to_musb(hcd);
+       struct usb_host_endpoint        *hep = urb->ep;
+       struct musb_qh                  *qh = hep->hcpriv;
+       struct usb_endpoint_descriptor  *epd = &hep->desc;
+       int                             ret;
+       unsigned                        type_reg;
+       unsigned                        interval;
+
+       /* host role must be active */
+       if (!is_host_active(musb) || !musb->is_active)
+               return -ENODEV;
+
+       spin_lock_irqsave(&musb->lock, flags);
+       ret = usb_hcd_link_urb_to_ep(hcd, urb);
+       spin_unlock_irqrestore(&musb->lock, flags);
+       if (ret)
+               return ret;
+
+       /* DMA mapping was already done, if needed, and this urb is on
+        * hep->urb_list ... so there's little to do unless hep wasn't
+        * yet scheduled onto a live qh.
+        *
+        * REVISIT best to keep hep->hcpriv valid until the endpoint gets
+        * disabled, testing for empty qh->ring and avoiding qh setup costs
+        * except for the first urb queued after a config change.
+        */
+       if (qh) {
+               urb->hcpriv = qh;
+               return 0;
+       }
+
+       /* Allocate and initialize qh, minimizing the work done each time
+        * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
+        *
+        * REVISIT consider a dedicated qh kmem_cache, so it's harder
+        * for bugs in other kernel code to break this driver...
+        */
+       qh = kzalloc(sizeof *qh, mem_flags);
+       if (!qh) {
+               usb_hcd_unlink_urb_from_ep(hcd, urb);
+               return -ENOMEM;
+       }
+
+       qh->hep = hep;
+       qh->dev = urb->dev;
+       INIT_LIST_HEAD(&qh->ring);
+       qh->is_ready = 1;
+
+       qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+
+       /* no high bandwidth support yet */
+       if (qh->maxpacket & ~0x7ff) {
+               ret = -EMSGSIZE;
+               goto done;
+       }
+
+       qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+       qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+       /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
+       qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
+
+       /* precompute rxtype/txtype/type0 register */
+       type_reg = (qh->type << 4) | qh->epnum;
+       switch (urb->dev->speed) {
+       case USB_SPEED_LOW:
+               type_reg |= 0xc0;
+               break;
+       case USB_SPEED_FULL:
+               type_reg |= 0x80;
+               break;
+       default:
+               type_reg |= 0x40;
+       }
+       qh->type_reg = type_reg;
+
+       /* precompute rxinterval/txinterval register */
+       interval = min((u8)16, epd->bInterval); /* log encoding */
+       switch (qh->type) {
+       case USB_ENDPOINT_XFER_INT:
+               /* fullspeed uses linear encoding */
+               if (USB_SPEED_FULL == urb->dev->speed) {
+                       interval = epd->bInterval;
+                       if (!interval)
+                               interval = 1;
+               }
+               /* FALLTHROUGH */
+       case USB_ENDPOINT_XFER_ISOC:
+               /* iso always uses log encoding */
+               break;
+       default:
+               /* REVISIT we actually want to use NAK limits, hinting to the
+                * transfer scheduling logic to try some other qh, e.g. try
+                * for 2 msec first:
+                *
+                * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
+                *
+                * The downside of disabling this is that transfer scheduling
+                * gets VERY unfair for nonperiodic transfers; a misbehaving
+                * peripheral could make that hurt.  Or for reads, one that's
+                * perfectly normal:  network and other drivers keep reads
+                * posted at all times, having one pending for a week should
+                * be perfectly safe.
+                *
+                * The upside of disabling it is avoidng transfer scheduling
+                * code to put this aside for while.
+                */
+               interval = 0;
+       }
+       qh->intv_reg = interval;
+
+       /* precompute addressing for external hub/tt ports */
+       if (musb->is_multipoint) {
+               struct usb_device       *parent = urb->dev->parent;
+
+               if (parent != hcd->self.root_hub) {
+                       qh->h_addr_reg = (u8) parent->devnum;
+
+                       /* set up tt info if needed */
+                       if (urb->dev->tt) {
+                               qh->h_port_reg = (u8) urb->dev->ttport;
+                               qh->h_addr_reg |= 0x80;
+                       }
+               }
+       }
+
+       /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
+        * until we get real dma queues (with an entry for each urb/buffer),
+        * we only have work to do in the former case.
+        */
+       spin_lock_irqsave(&musb->lock, flags);
+       if (hep->hcpriv) {
+               /* some concurrent activity submitted another urb to hep...
+                * odd, rare, error prone, but legal.
+                */
+               kfree(qh);
+               ret = 0;
+       } else
+               ret = musb_schedule(musb, qh,
+                               epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
+
+       if (ret == 0) {
+               urb->hcpriv = qh;
+               /* FIXME set urb->start_frame for iso/intr, it's tested in
+                * musb_start_urb(), but otherwise only konicawc cares ...
+                */
+       }
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+done:
+       if (ret != 0) {
+               usb_hcd_unlink_urb_from_ep(hcd, urb);
+               kfree(qh);
+       }
+       return ret;
+}
+
+
+/*
+ * abort a transfer that's at the head of a hardware queue.
+ * called with controller locked, irqs blocked
+ * that hardware queue advances to the next transfer, unless prevented
+ */
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+{
+       struct musb_hw_ep       *ep = qh->hw_ep;
+       void __iomem            *epio = ep->regs;
+       unsigned                hw_end = ep->epnum;
+       void __iomem            *regs = ep->musb->mregs;
+       u16                     csr;
+       int                     status = 0;
+
+       musb_ep_select(regs, hw_end);
+
+       if (is_dma_capable()) {
+               struct dma_channel      *dma;
+
+               dma = is_in ? ep->rx_channel : ep->tx_channel;
+               if (dma) {
+                       status = ep->musb->dma_controller->channel_abort(dma);
+                       DBG(status ? 1 : 3,
+                               "abort %cX%d DMA for urb %p --> %d\n",
+                               is_in ? 'R' : 'T', ep->epnum,
+                               urb, status);
+                       urb->actual_length += dma->actual_len;
+               }
+       }
+
+       /* turn off DMA requests, discard state, stop polling ... */
+       if (is_in) {
+               /* giveback saves bulk toggle */
+               csr = musb_h_flush_rxfifo(ep, 0);
+
+               /* REVISIT we still get an irq; should likely clear the
+                * endpoint's irq status here to avoid bogus irqs.
+                * clearing that status is platform-specific...
+                */
+       } else {
+               musb_h_tx_flush_fifo(ep);
+               csr = musb_readw(epio, MUSB_TXCSR);
+               csr &= ~(MUSB_TXCSR_AUTOSET
+                       | MUSB_TXCSR_DMAENAB
+                       | MUSB_TXCSR_H_RXSTALL
+                       | MUSB_TXCSR_H_NAKTIMEOUT
+                       | MUSB_TXCSR_H_ERROR
+                       | MUSB_TXCSR_TXPKTRDY);
+               musb_writew(epio, MUSB_TXCSR, csr);
+               /* REVISIT may need to clear FLUSHFIFO ... */
+               musb_writew(epio, MUSB_TXCSR, csr);
+               /* flush cpu writebuffer */
+               csr = musb_readw(epio, MUSB_TXCSR);
+       }
+       if (status == 0)
+               musb_advance_schedule(ep->musb, urb, ep, is_in);
+       return status;
+}
+
+static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+       struct musb             *musb = hcd_to_musb(hcd);
+       struct musb_qh          *qh;
+       struct list_head        *sched;
+       unsigned long           flags;
+       int                     ret;
+
+       DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
+                       usb_pipedevice(urb->pipe),
+                       usb_pipeendpoint(urb->pipe),
+                       usb_pipein(urb->pipe) ? "in" : "out");
+
+       spin_lock_irqsave(&musb->lock, flags);
+       ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+       if (ret)
+               goto done;
+
+       qh = urb->hcpriv;
+       if (!qh)
+               goto done;
+
+       /* Any URB not actively programmed into endpoint hardware can be
+        * immediately given back.  Such an URB must be at the head of its
+        * endpoint queue, unless someday we get real DMA queues.  And even
+        * then, it might not be known to the hardware...
+        *
+        * Otherwise abort current transfer, pending dma, etc.; urb->status
+        * has already been updated.  This is a synchronous abort; it'd be
+        * OK to hold off until after some IRQ, though.
+        */
+       if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
+               ret = -EINPROGRESS;
+       else {
+               switch (qh->type) {
+               case USB_ENDPOINT_XFER_CONTROL:
+                       sched = &musb->control;
+                       break;
+               case USB_ENDPOINT_XFER_BULK:
+                       if (usb_pipein(urb->pipe))
+                               sched = &musb->in_bulk;
+                       else
+                               sched = &musb->out_bulk;
+                       break;
+               default:
+                       /* REVISIT when we get a schedule tree, periodic
+                        * transfers won't always be at the head of a
+                        * singleton queue...
+                        */
+                       sched = NULL;
+                       break;
+               }
+       }
+
+       /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
+       if (ret < 0 || (sched && qh != first_qh(sched))) {
+               int     ready = qh->is_ready;
+
+               ret = 0;
+               qh->is_ready = 0;
+               __musb_giveback(musb, urb, 0);
+               qh->is_ready = ready;
+       } else
+               ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+done:
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return ret;
+}
+
+/* disable an endpoint */
+static void
+musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+       u8                      epnum = hep->desc.bEndpointAddress;
+       unsigned long           flags;
+       struct musb             *musb = hcd_to_musb(hcd);
+       u8                      is_in = epnum & USB_DIR_IN;
+       struct musb_qh          *qh = hep->hcpriv;
+       struct urb              *urb, *tmp;
+       struct list_head        *sched;
+
+       if (!qh)
+               return;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       switch (qh->type) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               sched = &musb->control;
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+               if (is_in)
+                       sched = &musb->in_bulk;
+               else
+                       sched = &musb->out_bulk;
+               break;
+       default:
+               /* REVISIT when we get a schedule tree, periodic transfers
+                * won't always be at the head of a singleton queue...
+                */
+               sched = NULL;
+               break;
+       }
+
+       /* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
+
+       /* kick first urb off the hardware, if needed */
+       qh->is_ready = 0;
+       if (!sched || qh == first_qh(sched)) {
+               urb = next_urb(qh);
+
+               /* make software (then hardware) stop ASAP */
+               if (!urb->unlinked)
+                       urb->status = -ESHUTDOWN;
+
+               /* cleanup */
+               musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+       } else
+               urb = NULL;
+
+       /* then just nuke all the others */
+       list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
+               musb_giveback(qh, urb, -ESHUTDOWN);
+
+       spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int musb_h_get_frame_number(struct usb_hcd *hcd)
+{
+       struct musb     *musb = hcd_to_musb(hcd);
+
+       return musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_h_start(struct usb_hcd *hcd)
+{
+       struct musb     *musb = hcd_to_musb(hcd);
+
+       /* NOTE: musb_start() is called when the hub driver turns
+        * on port power, or when (OTG) peripheral starts.
+        */
+       hcd->state = HC_STATE_RUNNING;
+       musb->port1_status = 0;
+       return 0;
+}
+
+static void musb_h_stop(struct usb_hcd *hcd)
+{
+       musb_stop(hcd_to_musb(hcd));
+       hcd->state = HC_STATE_HALT;
+}
+
+static int musb_bus_suspend(struct usb_hcd *hcd)
+{
+       struct musb     *musb = hcd_to_musb(hcd);
+
+       if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
+               return 0;
+
+       if (is_host_active(musb) && musb->is_active) {
+               WARNING("trying to suspend as %s is_active=%i\n",
+                       otg_state_string(musb), musb->is_active);
+               return -EBUSY;
+       } else
+               return 0;
+}
+
+static int musb_bus_resume(struct usb_hcd *hcd)
+{
+       /* resuming child port does the work */
+       return 0;
+}
+
+const struct hc_driver musb_hc_driver = {
+       .description            = "musb-hcd",
+       .product_desc           = "MUSB HDRC host driver",
+       .hcd_priv_size          = sizeof(struct musb),
+       .flags                  = HCD_USB2 | HCD_MEMORY,
+
+       /* not using irq handler or reset hooks from usbcore, since
+        * those must be shared with peripheral code for OTG configs
+        */
+
+       .start                  = musb_h_start,
+       .stop                   = musb_h_stop,
+
+       .get_frame_number       = musb_h_get_frame_number,
+
+       .urb_enqueue            = musb_urb_enqueue,
+       .urb_dequeue            = musb_urb_dequeue,
+       .endpoint_disable       = musb_h_disable,
+
+       .hub_status_data        = musb_hub_status_data,
+       .hub_control            = musb_hub_control,
+       .bus_suspend            = musb_bus_suspend,
+       .bus_resume             = musb_bus_resume,
+       /* .start_port_reset    = NULL, */
+       /* .hub_irq_enable      = NULL, */
+};
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
new file mode 100644 (file)
index 0000000..77bcdb9
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * MUSB OTG driver host defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MUSB_HOST_H
+#define _MUSB_HOST_H
+
+static inline struct usb_hcd *musb_to_hcd(struct musb *musb)
+{
+       return container_of((void *) musb, struct usb_hcd, hcd_priv);
+}
+
+static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+       return (struct musb *) (hcd->hcd_priv);
+}
+
+/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
+struct musb_qh {
+       struct usb_host_endpoint *hep;          /* usbcore info */
+       struct usb_device       *dev;
+       struct musb_hw_ep       *hw_ep;         /* current binding */
+
+       struct list_head        ring;           /* of musb_qh */
+       /* struct musb_qh               *next; */       /* for periodic tree */
+
+       unsigned                offset;         /* in urb->transfer_buffer */
+       unsigned                segsize;        /* current xfer fragment */
+
+       u8                      type_reg;       /* {rx,tx} type register */
+       u8                      intv_reg;       /* {rx,tx} interval register */
+       u8                      addr_reg;       /* device address register */
+       u8                      h_addr_reg;     /* hub address register */
+       u8                      h_port_reg;     /* hub port register */
+
+       u8                      is_ready;       /* safe to modify hw_ep */
+       u8                      type;           /* XFERTYPE_* */
+       u8                      epnum;
+       u16                     maxpacket;
+       u16                     frame;          /* for periodic schedule */
+       unsigned                iso_idx;        /* in urb->iso_frame_desc[] */
+};
+
+/* map from control or bulk queue head to the first qh on that ring */
+static inline struct musb_qh *first_qh(struct list_head *q)
+{
+       if (list_empty(q))
+               return NULL;
+       return list_entry(q->next, struct musb_qh, ring);
+}
+
+
+extern void musb_root_disconnect(struct musb *musb);
+
+struct usb_hcd;
+
+extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf);
+extern int musb_hub_control(struct usb_hcd *hcd,
+                       u16 typeReq, u16 wValue, u16 wIndex,
+                       char *buf, u16 wLength);
+
+extern const struct hc_driver musb_hc_driver;
+
+static inline struct urb *next_urb(struct musb_qh *qh)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       struct list_head        *queue;
+
+       if (!qh)
+               return NULL;
+       queue = &qh->hep->urb_list;
+       if (list_empty(queue))
+               return NULL;
+       return list_entry(queue->next, struct urb, urb_list);
+#else
+       return NULL;
+#endif
+}
+
+#endif                         /* _MUSB_HOST_H */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
new file mode 100644 (file)
index 0000000..6bbedae
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * MUSB OTG driver register I/O
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
+#define __MUSB_LINUX_PLATFORM_ARCH_H__
+
+#include <linux/io.h>
+
+#ifndef        CONFIG_ARM
+static inline void readsl(const void __iomem *addr, void *buf, int len)
+       { insl((unsigned long)addr, buf, len); }
+static inline void readsw(const void __iomem *addr, void *buf, int len)
+       { insw((unsigned long)addr, buf, len); }
+static inline void readsb(const void __iomem *addr, void *buf, int len)
+       { insb((unsigned long)addr, buf, len); }
+
+static inline void writesl(const void __iomem *addr, const void *buf, int len)
+       { outsl((unsigned long)addr, buf, len); }
+static inline void writesw(const void __iomem *addr, const void *buf, int len)
+       { outsw((unsigned long)addr, buf, len); }
+static inline void writesb(const void __iomem *addr, const void *buf, int len)
+       { outsb((unsigned long)addr, buf, len); }
+
+#endif
+
+/* NOTE:  these offsets are all in bytes */
+
+static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
+       { return __raw_readw(addr + offset); }
+
+static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
+       { return __raw_readl(addr + offset); }
+
+
+static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
+       { __raw_writew(data, addr + offset); }
+
+static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+       { __raw_writel(data, addr + offset); }
+
+
+#ifdef CONFIG_USB_TUSB6010
+
+/*
+ * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
+ */
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+{
+       u16 tmp;
+       u8 val;
+
+       tmp = __raw_readw(addr + (offset & ~1));
+       if (offset & 1)
+               val = (tmp >> 8);
+       else
+               val = tmp & 0xff;
+
+       return val;
+}
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+       u16 tmp;
+
+       tmp = __raw_readw(addr + (offset & ~1));
+       if (offset & 1)
+               tmp = (data << 8) | (tmp & 0xff);
+       else
+               tmp = (tmp & 0xff00) | data;
+
+       __raw_writew(tmp, addr + (offset & ~1));
+}
+
+#else
+
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+       { return __raw_readb(addr + offset); }
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+       { __raw_writeb(data, addr + offset); }
+
+#endif /* CONFIG_USB_TUSB6010 */
+
+#endif
diff --git a/drivers/usb/musb/musb_procfs.c b/drivers/usb/musb/musb_procfs.c
new file mode 100644 (file)
index 0000000..55e6b78
--- /dev/null
@@ -0,0 +1,830 @@
+/*
+ * MUSB OTG driver debug support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>     /* FIXME remove procfs writes */
+#include <asm/arch/hardware.h>
+
+#include "musb_core.h"
+
+#include "davinci.h"
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+static int dump_qh(struct musb_qh *qh, char *buf, unsigned max)
+{
+       int                             count;
+       int                             tmp;
+       struct usb_host_endpoint        *hep = qh->hep;
+       struct urb                      *urb;
+
+       count = snprintf(buf, max, "    qh %p dev%d ep%d%s max%d\n",
+                       qh, qh->dev->devnum, qh->epnum,
+                       ({ char *s; switch (qh->type) {
+                       case USB_ENDPOINT_XFER_BULK:
+                               s = "-bulk"; break;
+                       case USB_ENDPOINT_XFER_INT:
+                               s = "-int"; break;
+                       case USB_ENDPOINT_XFER_CONTROL:
+                               s = ""; break;
+                       default:
+                               s = "iso"; break;
+                       }; s; }),
+                       qh->maxpacket);
+       if (count <= 0)
+               return 0;
+       buf += count;
+       max -= count;
+
+       list_for_each_entry(urb, &hep->urb_list, urb_list) {
+               tmp = snprintf(buf, max, "\t%s urb %p %d/%d\n",
+                               usb_pipein(urb->pipe) ? "in" : "out",
+                               urb, urb->actual_length,
+                               urb->transfer_buffer_length);
+               if (tmp <= 0)
+                       break;
+               tmp = min(tmp, (int)max);
+               count += tmp;
+               buf += tmp;
+               max -= tmp;
+       }
+       return count;
+}
+
+static int
+dump_queue(struct list_head *q, char *buf, unsigned max)
+{
+       int             count = 0;
+       struct musb_qh  *qh;
+
+       list_for_each_entry(qh, q, ring) {
+               int     tmp;
+
+               tmp = dump_qh(qh, buf, max);
+               if (tmp <= 0)
+                       break;
+               tmp = min(tmp, (int)max);
+               count += tmp;
+               buf += tmp;
+               max -= tmp;
+       }
+       return count;
+}
+
+#endif /* HCD */
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static int dump_ep(struct musb_ep *ep, char *buffer, unsigned max)
+{
+       char            *buf = buffer;
+       int             code = 0;
+       void __iomem    *regs = ep->hw_ep->regs;
+       char            *mode = "1buf";
+
+       if (ep->is_in) {
+               if (ep->hw_ep->tx_double_buffered)
+                       mode = "2buf";
+       } else {
+               if (ep->hw_ep->rx_double_buffered)
+                       mode = "2buf";
+       }
+
+       do {
+               struct usb_request      *req;
+
+               code = snprintf(buf, max,
+                               "\n%s (hw%d): %s%s, csr %04x maxp %04x\n",
+                               ep->name, ep->current_epnum,
+                               mode, ep->dma ? " dma" : "",
+                               musb_readw(regs,
+                                       (ep->is_in || !ep->current_epnum)
+                                               ? MUSB_TXCSR
+                                               : MUSB_RXCSR),
+                               musb_readw(regs, ep->is_in
+                                               ? MUSB_TXMAXP
+                                               : MUSB_RXMAXP)
+                               );
+               if (code <= 0)
+                       break;
+               code = min(code, (int) max);
+               buf += code;
+               max -= code;
+
+               if (is_cppi_enabled() && ep->current_epnum) {
+                       unsigned        cppi = ep->current_epnum - 1;
+                       void __iomem    *base = ep->musb->ctrl_base;
+                       unsigned        off1 = cppi << 2;
+                       void __iomem    *ram = base;
+                       char            tmp[16];
+
+                       if (ep->is_in) {
+                               ram += DAVINCI_TXCPPI_STATERAM_OFFSET(cppi);
+                               tmp[0] = 0;
+                       } else {
+                               ram += DAVINCI_RXCPPI_STATERAM_OFFSET(cppi);
+                               snprintf(tmp, sizeof tmp, "%d left, ",
+                                       musb_readl(base,
+                                       DAVINCI_RXCPPI_BUFCNT0_REG + off1));
+                       }
+
+                       code = snprintf(buf, max, "%cX DMA%d: %s"
+                                       "%08x %08x, %08x %08x; "
+                                       "%08x %08x %08x .. %08x\n",
+                               ep->is_in ? 'T' : 'R',
+                               ep->current_epnum - 1, tmp,
+                               musb_readl(ram, 0 * 4),
+                               musb_readl(ram, 1 * 4),
+                               musb_readl(ram, 2 * 4),
+                               musb_readl(ram, 3 * 4),
+                               musb_readl(ram, 4 * 4),
+                               musb_readl(ram, 5 * 4),
+                               musb_readl(ram, 6 * 4),
+                               musb_readl(ram, 7 * 4));
+                       if (code <= 0)
+                               break;
+                       code = min(code, (int) max);
+                       buf += code;
+                       max -= code;
+               }
+
+               if (list_empty(&ep->req_list)) {
+                       code = snprintf(buf, max, "\t(queue empty)\n");
+                       if (code <= 0)
+                               break;
+                       code = min(code, (int) max);
+                       buf += code;
+                       max -= code;
+                       break;
+               }
+               list_for_each_entry(req, &ep->req_list, list) {
+                       code = snprintf(buf, max, "\treq %p, %s%s%d/%d\n",
+                                       req,
+                                       req->zero ? "zero, " : "",
+                                       req->short_not_ok ? "!short, " : "",
+                                       req->actual, req->length);
+                       if (code <= 0)
+                               break;
+                       code = min(code, (int) max);
+                       buf += code;
+                       max -= code;
+               }
+       } while (0);
+       return buf - buffer;
+}
+#endif
+
+static int
+dump_end_info(struct musb *musb, u8 epnum, char *aBuffer, unsigned max)
+{
+       int                     code = 0;
+       char                    *buf = aBuffer;
+       struct musb_hw_ep       *hw_ep = &musb->endpoints[epnum];
+
+       do {
+               musb_ep_select(musb->mregs, epnum);
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+               if (is_host_active(musb)) {
+                       int             dump_rx, dump_tx;
+                       void __iomem    *regs = hw_ep->regs;
+
+                       /* TEMPORARY (!) until we have a real periodic
+                        * schedule tree ...
+                        */
+                       if (!epnum) {
+                               /* control is shared, uses RX queue
+                                * but (mostly) shadowed tx registers
+                                */
+                               dump_tx = !list_empty(&musb->control);
+                               dump_rx = 0;
+                       } else if (hw_ep == musb->bulk_ep) {
+                               dump_tx = !list_empty(&musb->out_bulk);
+                               dump_rx = !list_empty(&musb->in_bulk);
+                       } else if (musb->periodic[epnum]) {
+                               struct usb_host_endpoint        *hep;
+
+                               hep = musb->periodic[epnum]->hep;
+                               dump_rx = hep->desc.bEndpointAddress
+                                               & USB_ENDPOINT_DIR_MASK;
+                               dump_tx = !dump_rx;
+                       } else
+                               break;
+                       /* END TEMPORARY */
+
+
+                       if (dump_rx) {
+                               code = snprintf(buf, max,
+                                       "\nRX%d: %s rxcsr %04x interval %02x "
+                                       "max %04x type %02x; "
+                                       "dev %d hub %d port %d"
+                                       "\n",
+                                       epnum,
+                                       hw_ep->rx_double_buffered
+                                               ? "2buf" : "1buf",
+                                       musb_readw(regs, MUSB_RXCSR),
+                                       musb_readb(regs, MUSB_RXINTERVAL),
+                                       musb_readw(regs, MUSB_RXMAXP),
+                                       musb_readb(regs, MUSB_RXTYPE),
+                                       /* FIXME:  assumes multipoint */
+                                       musb_readb(musb->mregs,
+                                               MUSB_BUSCTL_OFFSET(epnum,
+                                               MUSB_RXFUNCADDR)),
+                                       musb_readb(musb->mregs,
+                                               MUSB_BUSCTL_OFFSET(epnum,
+                                               MUSB_RXHUBADDR)),
+                                       musb_readb(musb->mregs,
+                                               MUSB_BUSCTL_OFFSET(epnum,
+                                               MUSB_RXHUBPORT))
+                                       );
+                               if (code <= 0)
+                                       break;
+                               code = min(code, (int) max);
+                               buf += code;
+                               max -= code;
+
+                               if (is_cppi_enabled()
+                                               && epnum
+                                               && hw_ep->rx_channel) {
+                                       unsigned        cppi = epnum - 1;
+                                       unsigned        off1 = cppi << 2;
+                                       void __iomem    *base;
+                                       void __iomem    *ram;
+                                       char            tmp[16];
+
+                                       base = musb->ctrl_base;
+                                       ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
+                                                       cppi) + base;
+                                       snprintf(tmp, sizeof tmp, "%d left, ",
+                                               musb_readl(base,
+                                               DAVINCI_RXCPPI_BUFCNT0_REG
+                                                               + off1));
+
+                                       code = snprintf(buf, max,
+                                               "    rx dma%d: %s"
+                                               "%08x %08x, %08x %08x; "
+                                               "%08x %08x %08x .. %08x\n",
+                                               cppi, tmp,
+                                               musb_readl(ram, 0 * 4),
+                                               musb_readl(ram, 1 * 4),
+                                               musb_readl(ram, 2 * 4),
+                                               musb_readl(ram, 3 * 4),
+                                               musb_readl(ram, 4 * 4),
+                                               musb_readl(ram, 5 * 4),
+                                               musb_readl(ram, 6 * 4),
+                                               musb_readl(ram, 7 * 4));
+                                       if (code <= 0)
+                                               break;
+                                       code = min(code, (int) max);
+                                       buf += code;
+                                       max -= code;
+                               }
+
+                               if (hw_ep == musb->bulk_ep
+                                               && !list_empty(
+                                                       &musb->in_bulk)) {
+                                       code = dump_queue(&musb->in_bulk,
+                                                       buf, max);
+                                       if (code <= 0)
+                                               break;
+                                       code = min(code, (int) max);
+                                       buf += code;
+                                       max -= code;
+                               } else if (musb->periodic[epnum]) {
+                                       code = dump_qh(musb->periodic[epnum],
+                                                       buf, max);
+                                       if (code <= 0)
+                                               break;
+                                       code = min(code, (int) max);
+                                       buf += code;
+                                       max -= code;
+                               }
+                       }
+
+                       if (dump_tx) {
+                               code = snprintf(buf, max,
+                                       "\nTX%d: %s txcsr %04x interval %02x "
+                                       "max %04x type %02x; "
+                                       "dev %d hub %d port %d"
+                                       "\n",
+                                       epnum,
+                                       hw_ep->tx_double_buffered
+                                               ? "2buf" : "1buf",
+                                       musb_readw(regs, MUSB_TXCSR),
+                                       musb_readb(regs, MUSB_TXINTERVAL),
+                                       musb_readw(regs, MUSB_TXMAXP),
+                                       musb_readb(regs, MUSB_TXTYPE),
+                                       /* FIXME:  assumes multipoint */
+                                       musb_readb(musb->mregs,
+                                               MUSB_BUSCTL_OFFSET(epnum,
+                                               MUSB_TXFUNCADDR)),
+                                       musb_readb(musb->mregs,
+                                               MUSB_BUSCTL_OFFSET(epnum,
+                                               MUSB_TXHUBADDR)),
+                                       musb_readb(musb->mregs,
+                                               MUSB_BUSCTL_OFFSET(epnum,
+                                               MUSB_TXHUBPORT))
+                                       );
+                               if (code <= 0)
+                                       break;
+                               code = min(code, (int) max);
+                               buf += code;
+                               max -= code;
+
+                               if (is_cppi_enabled()
+                                               && epnum
+                                               && hw_ep->tx_channel) {
+                                       unsigned        cppi = epnum - 1;
+                                       void __iomem    *base;
+                                       void __iomem    *ram;
+
+                                       base = musb->ctrl_base;
+                                       ram = DAVINCI_RXCPPI_STATERAM_OFFSET(
+                                                       cppi) + base;
+                                       code = snprintf(buf, max,
+                                               "    tx dma%d: "
+                                               "%08x %08x, %08x %08x; "
+                                               "%08x %08x %08x .. %08x\n",
+                                               cppi,
+                                               musb_readl(ram, 0 * 4),
+                                               musb_readl(ram, 1 * 4),
+                                               musb_readl(ram, 2 * 4),
+                                               musb_readl(ram, 3 * 4),
+                                               musb_readl(ram, 4 * 4),
+                                               musb_readl(ram, 5 * 4),
+                                               musb_readl(ram, 6 * 4),
+                                               musb_readl(ram, 7 * 4));
+                                       if (code <= 0)
+                                               break;
+                                       code = min(code, (int) max);
+                                       buf += code;
+                                       max -= code;
+                               }
+
+                               if (hw_ep == musb->control_ep
+                                               && !list_empty(
+                                                       &musb->control)) {
+                                       code = dump_queue(&musb->control,
+                                                       buf, max);
+                                       if (code <= 0)
+                                               break;
+                                       code = min(code, (int) max);
+                                       buf += code;
+                                       max -= code;
+                               } else if (hw_ep == musb->bulk_ep
+                                               && !list_empty(
+                                                       &musb->out_bulk)) {
+                                       code = dump_queue(&musb->out_bulk,
+                                                       buf, max);
+                                       if (code <= 0)
+                                               break;
+                                       code = min(code, (int) max);
+                                       buf += code;
+                                       max -= code;
+                               } else if (musb->periodic[epnum]) {
+                                       code = dump_qh(musb->periodic[epnum],
+                                                       buf, max);
+                                       if (code <= 0)
+                                               break;
+                                       code = min(code, (int) max);
+                                       buf += code;
+                                       max -= code;
+                               }
+                       }
+               }
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+               if (is_peripheral_active(musb)) {
+                       code = 0;
+
+                       if (hw_ep->ep_in.desc || !epnum) {
+                               code = dump_ep(&hw_ep->ep_in, buf, max);
+                               if (code <= 0)
+                                       break;
+                               code = min(code, (int) max);
+                               buf += code;
+                               max -= code;
+                       }
+                       if (hw_ep->ep_out.desc) {
+                               code = dump_ep(&hw_ep->ep_out, buf, max);
+                               if (code <= 0)
+                                       break;
+                               code = min(code, (int) max);
+                               buf += code;
+                               max -= code;
+                       }
+               }
+#endif
+       } while (0);
+
+       return buf - aBuffer;
+}
+
+/* Dump the current status and compile options.
+ * @param musb the device driver instance
+ * @param buffer where to dump the status; it must be big enough to hold the
+ * result otherwise "BAD THINGS HAPPENS(TM)".
+ */
+static int dump_header_stats(struct musb *musb, char *buffer)
+{
+       int code, count = 0;
+       const void __iomem *mbase = musb->mregs;
+
+       *buffer = 0;
+       count = sprintf(buffer, "Status: %sHDRC, Mode=%s "
+                               "(Power=%02x, DevCtl=%02x)\n",
+                       (musb->is_multipoint ? "M" : ""), MUSB_MODE(musb),
+                       musb_readb(mbase, MUSB_POWER),
+                       musb_readb(mbase, MUSB_DEVCTL));
+       if (count <= 0)
+               return 0;
+       buffer += count;
+
+       code = sprintf(buffer, "OTG state: %s; %sactive\n",
+                       otg_state_string(musb),
+                       musb->is_active ? "" : "in");
+       if (code <= 0)
+               goto done;
+       buffer += code;
+       count += code;
+
+       code = sprintf(buffer,
+                       "Options: "
+#ifdef CONFIG_MUSB_PIO_ONLY
+                       "pio"
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+                       "cppi-dma"
+#elif defined(CONFIG_USB_INVENTRA_DMA)
+                       "musb-dma"
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+                       "tusb-omap-dma"
+#else
+                       "?dma?"
+#endif
+                       ", "
+#ifdef CONFIG_USB_MUSB_OTG
+                       "otg (peripheral+host)"
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+                       "peripheral"
+#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+                       "host"
+#endif
+                       ", debug=%d [eps=%d]\n",
+               debug,
+               musb->nr_endpoints);
+       if (code <= 0)
+               goto done;
+       count += code;
+       buffer += code;
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+       code = sprintf(buffer, "Peripheral address: %02x\n",
+                       musb_readb(musb->ctrl_base, MUSB_FADDR));
+       if (code <= 0)
+               goto done;
+       buffer += code;
+       count += code;
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       code = sprintf(buffer, "Root port status: %08x\n",
+                       musb->port1_status);
+       if (code <= 0)
+               goto done;
+       buffer += code;
+       count += code;
+#endif
+
+#ifdef CONFIG_ARCH_DAVINCI
+       code = sprintf(buffer,
+                       "DaVinci: ctrl=%02x stat=%1x phy=%03x\n"
+                       "\trndis=%05x auto=%04x intsrc=%08x intmsk=%08x"
+                       "\n",
+                       musb_readl(musb->ctrl_base, DAVINCI_USB_CTRL_REG),
+                       musb_readl(musb->ctrl_base, DAVINCI_USB_STAT_REG),
+                       __raw_readl((void __force __iomem *)
+                                       IO_ADDRESS(USBPHY_CTL_PADDR)),
+                       musb_readl(musb->ctrl_base, DAVINCI_RNDIS_REG),
+                       musb_readl(musb->ctrl_base, DAVINCI_AUTOREQ_REG),
+                       musb_readl(musb->ctrl_base,
+                                       DAVINCI_USB_INT_SOURCE_REG),
+                       musb_readl(musb->ctrl_base,
+                                       DAVINCI_USB_INT_MASK_REG));
+       if (code <= 0)
+               goto done;
+       count += code;
+       buffer += code;
+#endif /* DAVINCI */
+
+#ifdef CONFIG_USB_TUSB6010
+       code = sprintf(buffer,
+                       "TUSB6010: devconf %08x, phy enable %08x drive %08x"
+                       "\n\totg %03x timer %08x"
+                       "\n\tprcm conf %08x mgmt %08x; int src %08x mask %08x"
+                       "\n",
+                       musb_readl(musb->ctrl_base, TUSB_DEV_CONF),
+                       musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL_ENABLE),
+                       musb_readl(musb->ctrl_base, TUSB_PHY_OTG_CTRL),
+                       musb_readl(musb->ctrl_base, TUSB_DEV_OTG_STAT),
+                       musb_readl(musb->ctrl_base, TUSB_DEV_OTG_TIMER),
+                       musb_readl(musb->ctrl_base, TUSB_PRCM_CONF),
+                       musb_readl(musb->ctrl_base, TUSB_PRCM_MNGMT),
+                       musb_readl(musb->ctrl_base, TUSB_INT_SRC),
+                       musb_readl(musb->ctrl_base, TUSB_INT_MASK));
+       if (code <= 0)
+               goto done;
+       count += code;
+       buffer += code;
+#endif /* DAVINCI */
+
+       if (is_cppi_enabled() && musb->dma_controller) {
+               code = sprintf(buffer,
+                               "CPPI: txcr=%d txsrc=%01x txena=%01x; "
+                               "rxcr=%d rxsrc=%01x rxena=%01x "
+                               "\n",
+                               musb_readl(musb->ctrl_base,
+                                               DAVINCI_TXCPPI_CTRL_REG),
+                               musb_readl(musb->ctrl_base,
+                                               DAVINCI_TXCPPI_RAW_REG),
+                               musb_readl(musb->ctrl_base,
+                                               DAVINCI_TXCPPI_INTENAB_REG),
+                               musb_readl(musb->ctrl_base,
+                                               DAVINCI_RXCPPI_CTRL_REG),
+                               musb_readl(musb->ctrl_base,
+                                               DAVINCI_RXCPPI_RAW_REG),
+                               musb_readl(musb->ctrl_base,
+                                               DAVINCI_RXCPPI_INTENAB_REG));
+               if (code <= 0)
+                       goto done;
+               count += code;
+               buffer += code;
+       }
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+       if (is_peripheral_enabled(musb)) {
+               code = sprintf(buffer, "Gadget driver: %s\n",
+                               musb->gadget_driver
+                                       ? musb->gadget_driver->driver.name
+                                       : "(none)");
+               if (code <= 0)
+                       goto done;
+               count += code;
+               buffer += code;
+       }
+#endif
+
+done:
+       return count;
+}
+
+/* Write to ProcFS
+ *
+ * C soft-connect
+ * c soft-disconnect
+ * I enable HS
+ * i disable HS
+ * s stop session
+ * F force session (OTG-unfriendly)
+ * E rElinquish bus (OTG)
+ * H request host mode
+ * h cancel host request
+ * T start sending TEST_PACKET
+ * D<num> set/query the debug level
+ */
+static int musb_proc_write(struct file *file, const char __user *buffer,
+                       unsigned long count, void *data)
+{
+       char cmd;
+       u8 reg;
+       struct musb *musb = (struct musb *)data;
+       void __iomem *mbase = musb->mregs;
+
+       /* MOD_INC_USE_COUNT; */
+
+       if (unlikely(copy_from_user(&cmd, buffer, 1)))
+               return -EFAULT;
+
+       switch (cmd) {
+       case 'C':
+               if (mbase) {
+                       reg = musb_readb(mbase, MUSB_POWER)
+                                       | MUSB_POWER_SOFTCONN;
+                       musb_writeb(mbase, MUSB_POWER, reg);
+               }
+               break;
+
+       case 'c':
+               if (mbase) {
+                       reg = musb_readb(mbase, MUSB_POWER)
+                                       & ~MUSB_POWER_SOFTCONN;
+                       musb_writeb(mbase, MUSB_POWER, reg);
+               }
+               break;
+
+       case 'I':
+               if (mbase) {
+                       reg = musb_readb(mbase, MUSB_POWER)
+                                       | MUSB_POWER_HSENAB;
+                       musb_writeb(mbase, MUSB_POWER, reg);
+               }
+               break;
+
+       case 'i':
+               if (mbase) {
+                       reg = musb_readb(mbase, MUSB_POWER)
+                                       & ~MUSB_POWER_HSENAB;
+                       musb_writeb(mbase, MUSB_POWER, reg);
+               }
+               break;
+
+       case 'F':
+               reg = musb_readb(mbase, MUSB_DEVCTL);
+               reg |= MUSB_DEVCTL_SESSION;
+               musb_writeb(mbase, MUSB_DEVCTL, reg);
+               break;
+
+       case 'H':
+               if (mbase) {
+                       reg = musb_readb(mbase, MUSB_DEVCTL);
+                       reg |= MUSB_DEVCTL_HR;
+                       musb_writeb(mbase, MUSB_DEVCTL, reg);
+                       /* MUSB_HST_MODE( ((struct musb*)data) ); */
+                       /* WARNING("Host Mode\n"); */
+               }
+               break;
+
+       case 'h':
+               if (mbase) {
+                       reg = musb_readb(mbase, MUSB_DEVCTL);
+                       reg &= ~MUSB_DEVCTL_HR;
+                       musb_writeb(mbase, MUSB_DEVCTL, reg);
+               }
+               break;
+
+       case 'T':
+               if (mbase) {
+                       musb_load_testpacket(musb);
+                       musb_writeb(mbase, MUSB_TESTMODE,
+                                       MUSB_TEST_PACKET);
+               }
+               break;
+
+#if (MUSB_DEBUG > 0)
+               /* set/read debug level */
+       case 'D':{
+                       if (count > 1) {
+                               char digits[8], *p = digits;
+                               int i = 0, level = 0, sign = 1;
+                               int len = min(count - 1, (unsigned long)8);
+
+                               if (copy_from_user(&digits, &buffer[1], len))
+                                       return -EFAULT;
+
+                               /* optional sign */
+                               if (*p == '-') {
+                                       len -= 1;
+                                       sign = -sign;
+                                       p++;
+                               }
+
+                               /* read it */
+                               while (i++ < len && *p > '0' && *p < '9') {
+                                       level = level * 10 + (*p - '0');
+                                       p++;
+                               }
+
+                               level *= sign;
+                               DBG(1, "debug level %d\n", level);
+                               debug = level;
+                       }
+               }
+               break;
+
+
+       case '?':
+               INFO("?: you are seeing it\n");
+               INFO("C/c: soft connect enable/disable\n");
+               INFO("I/i: hispeed enable/disable\n");
+               INFO("F: force session start\n");
+               INFO("H: host mode\n");
+               INFO("T: start sending TEST_PACKET\n");
+               INFO("D: set/read dbug level\n");
+               break;
+#endif
+
+       default:
+               ERR("Command %c not implemented\n", cmd);
+               break;
+       }
+
+       musb_platform_try_idle(musb, 0);
+
+       return count;
+}
+
+static int musb_proc_read(char *page, char **start,
+                       off_t off, int count, int *eof, void *data)
+{
+       char *buffer = page;
+       int code = 0;
+       unsigned long   flags;
+       struct musb     *musb = data;
+       unsigned        epnum;
+
+       count -= off;
+       count -= 1;             /* for NUL at end */
+       if (count <= 0)
+               return -EINVAL;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       code = dump_header_stats(musb, buffer);
+       if (code > 0) {
+               buffer += code;
+               count -= code;
+       }
+
+       /* generate the report for the end points */
+       /* REVISIT ... not unless something's connected! */
+       for (epnum = 0; count >= 0 && epnum < musb->nr_endpoints;
+                       epnum++) {
+               code = dump_end_info(musb, epnum, buffer, count);
+               if (code > 0) {
+                       buffer += code;
+                       count -= code;
+               }
+       }
+
+       musb_platform_try_idle(musb, 0);
+
+       spin_unlock_irqrestore(&musb->lock, flags);
+       *eof = 1;
+
+       return buffer - page;
+}
+
+void __devexit musb_debug_delete(char *name, struct musb *musb)
+{
+       if (musb->proc_entry)
+               remove_proc_entry(name, NULL);
+}
+
+struct proc_dir_entry *__init
+musb_debug_create(char *name, struct musb *data)
+{
+       struct proc_dir_entry   *pde;
+
+       /* FIXME convert everything to seq_file; then later, debugfs */
+
+       if (!name)
+               return NULL;
+
+       pde = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, NULL);
+       data->proc_entry = pde;
+       if (pde) {
+               pde->data = data;
+               /* pde->owner = THIS_MODULE; */
+
+               pde->read_proc = musb_proc_read;
+               pde->write_proc = musb_proc_write;
+
+               pde->size = 0;
+
+               pr_debug("Registered /proc/%s\n", name);
+       } else {
+               pr_debug("Cannot create a valid proc file entry");
+       }
+
+       return pde;
+}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
new file mode 100644 (file)
index 0000000..9c22866
--- /dev/null
@@ -0,0 +1,300 @@
+/*
+ * MUSB OTG driver register defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_REGS_H__
+#define __MUSB_REGS_H__
+
+#define MUSB_EP0_FIFOSIZE      64      /* This is non-configurable */
+
+/*
+ * Common USB registers
+ */
+
+#define MUSB_FADDR             0x00    /* 8-bit */
+#define MUSB_POWER             0x01    /* 8-bit */
+
+#define MUSB_INTRTX            0x02    /* 16-bit */
+#define MUSB_INTRRX            0x04
+#define MUSB_INTRTXE           0x06
+#define MUSB_INTRRXE           0x08
+#define MUSB_INTRUSB           0x0A    /* 8 bit */
+#define MUSB_INTRUSBE          0x0B    /* 8 bit */
+#define MUSB_FRAME             0x0C
+#define MUSB_INDEX             0x0E    /* 8 bit */
+#define MUSB_TESTMODE          0x0F    /* 8 bit */
+
+/* Get offset for a given FIFO from musb->mregs */
+#ifdef CONFIG_USB_TUSB6010
+#define MUSB_FIFO_OFFSET(epnum)        (0x200 + ((epnum) * 0x20))
+#else
+#define MUSB_FIFO_OFFSET(epnum)        (0x20 + ((epnum) * 4))
+#endif
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSB_DEVCTL            0x60    /* 8 bit */
+
+/* These are always controlled through the INDEX register */
+#define MUSB_TXFIFOSZ          0x62    /* 8-bit (see masks) */
+#define MUSB_RXFIFOSZ          0x63    /* 8-bit (see masks) */
+#define MUSB_TXFIFOADD         0x64    /* 16-bit offset shifted right 3 */
+#define MUSB_RXFIFOADD         0x66    /* 16-bit offset shifted right 3 */
+
+/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
+#define MUSB_HWVERS            0x6C    /* 8 bit */
+
+#define MUSB_EPINFO            0x78    /* 8 bit */
+#define MUSB_RAMINFO           0x79    /* 8 bit */
+#define MUSB_LINKINFO          0x7a    /* 8 bit */
+#define MUSB_VPLEN             0x7b    /* 8 bit */
+#define MUSB_HS_EOF1           0x7c    /* 8 bit */
+#define MUSB_FS_EOF1           0x7d    /* 8 bit */
+#define MUSB_LS_EOF1           0x7e    /* 8 bit */
+
+/* Offsets to endpoint registers */
+#define MUSB_TXMAXP            0x00
+#define MUSB_TXCSR             0x02
+#define MUSB_CSR0              MUSB_TXCSR      /* Re-used for EP0 */
+#define MUSB_RXMAXP            0x04
+#define MUSB_RXCSR             0x06
+#define MUSB_RXCOUNT           0x08
+#define MUSB_COUNT0            MUSB_RXCOUNT    /* Re-used for EP0 */
+#define MUSB_TXTYPE            0x0A
+#define MUSB_TYPE0             MUSB_TXTYPE     /* Re-used for EP0 */
+#define MUSB_TXINTERVAL                0x0B
+#define MUSB_NAKLIMIT0         MUSB_TXINTERVAL /* Re-used for EP0 */
+#define MUSB_RXTYPE            0x0C
+#define MUSB_RXINTERVAL                0x0D
+#define MUSB_FIFOSIZE          0x0F
+#define MUSB_CONFIGDATA                MUSB_FIFOSIZE   /* Re-used for EP0 */
+
+/* Offsets to endpoint registers in indexed model (using INDEX register) */
+#define MUSB_INDEXED_OFFSET(_epnum, _offset)   \
+       (0x10 + (_offset))
+
+/* Offsets to endpoint registers in flat models */
+#define MUSB_FLAT_OFFSET(_epnum, _offset)      \
+       (0x100 + (0x10*(_epnum)) + (_offset))
+
+#ifdef CONFIG_USB_TUSB6010
+/* TUSB6010 EP0 configuration register is special */
+#define MUSB_TUSB_OFFSET(_epnum, _offset)      \
+       (0x10 + _offset)
+#include "tusb6010.h"          /* Needed "only" for TUSB_EP0_CONF */
+#endif
+
+/* "bus control"/target registers, for host side multipoint (external hubs) */
+#define MUSB_TXFUNCADDR                0x00
+#define MUSB_TXHUBADDR         0x02
+#define MUSB_TXHUBPORT         0x03
+
+#define MUSB_RXFUNCADDR                0x04
+#define MUSB_RXHUBADDR         0x06
+#define MUSB_RXHUBPORT         0x07
+
+#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
+       (0x80 + (8*(_epnum)) + (_offset))
+
+/*
+ * MUSB Register bits
+ */
+
+/* POWER */
+#define MUSB_POWER_ISOUPDATE   0x80
+#define MUSB_POWER_SOFTCONN    0x40
+#define MUSB_POWER_HSENAB      0x20
+#define MUSB_POWER_HSMODE      0x10
+#define MUSB_POWER_RESET       0x08
+#define MUSB_POWER_RESUME      0x04
+#define MUSB_POWER_SUSPENDM    0x02
+#define MUSB_POWER_ENSUSPEND   0x01
+
+/* INTRUSB */
+#define MUSB_INTR_SUSPEND      0x01
+#define MUSB_INTR_RESUME       0x02
+#define MUSB_INTR_RESET                0x04
+#define MUSB_INTR_BABBLE       0x04
+#define MUSB_INTR_SOF          0x08
+#define MUSB_INTR_CONNECT      0x10
+#define MUSB_INTR_DISCONNECT   0x20
+#define MUSB_INTR_SESSREQ      0x40
+#define MUSB_INTR_VBUSERROR    0x80    /* For SESSION end */
+
+/* DEVCTL */
+#define MUSB_DEVCTL_BDEVICE    0x80
+#define MUSB_DEVCTL_FSDEV      0x40
+#define MUSB_DEVCTL_LSDEV      0x20
+#define MUSB_DEVCTL_VBUS       0x18
+#define MUSB_DEVCTL_VBUS_SHIFT 3
+#define MUSB_DEVCTL_HM         0x04
+#define MUSB_DEVCTL_HR         0x02
+#define MUSB_DEVCTL_SESSION    0x01
+
+/* TESTMODE */
+#define MUSB_TEST_FORCE_HOST   0x80
+#define MUSB_TEST_FIFO_ACCESS  0x40
+#define MUSB_TEST_FORCE_FS     0x20
+#define MUSB_TEST_FORCE_HS     0x10
+#define MUSB_TEST_PACKET       0x08
+#define MUSB_TEST_K            0x04
+#define MUSB_TEST_J            0x02
+#define MUSB_TEST_SE0_NAK      0x01
+
+/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */
+#define MUSB_FIFOSZ_DPB        0x10
+/* Allocation size (8, 16, 32, ... 4096) */
+#define MUSB_FIFOSZ_SIZE       0x0f
+
+/* CSR0 */
+#define MUSB_CSR0_FLUSHFIFO    0x0100
+#define MUSB_CSR0_TXPKTRDY     0x0002
+#define MUSB_CSR0_RXPKTRDY     0x0001
+
+/* CSR0 in Peripheral mode */
+#define MUSB_CSR0_P_SVDSETUPEND        0x0080
+#define MUSB_CSR0_P_SVDRXPKTRDY        0x0040
+#define MUSB_CSR0_P_SENDSTALL  0x0020
+#define MUSB_CSR0_P_SETUPEND   0x0010
+#define MUSB_CSR0_P_DATAEND    0x0008
+#define MUSB_CSR0_P_SENTSTALL  0x0004
+
+/* CSR0 in Host mode */
+#define MUSB_CSR0_H_DIS_PING           0x0800
+#define MUSB_CSR0_H_WR_DATATOGGLE      0x0400  /* Set to allow setting: */
+#define MUSB_CSR0_H_DATATOGGLE         0x0200  /* Data toggle control */
+#define MUSB_CSR0_H_NAKTIMEOUT         0x0080
+#define MUSB_CSR0_H_STATUSPKT          0x0040
+#define MUSB_CSR0_H_REQPKT             0x0020
+#define MUSB_CSR0_H_ERROR              0x0010
+#define MUSB_CSR0_H_SETUPPKT           0x0008
+#define MUSB_CSR0_H_RXSTALL            0x0004
+
+/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_CSR0_P_WZC_BITS   \
+       (MUSB_CSR0_P_SENTSTALL)
+#define MUSB_CSR0_H_WZC_BITS   \
+       (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \
+       | MUSB_CSR0_RXPKTRDY)
+
+/* TxType/RxType */
+#define MUSB_TYPE_SPEED                0xc0
+#define MUSB_TYPE_SPEED_SHIFT  6
+#define MUSB_TYPE_PROTO                0x30    /* Implicitly zero for ep0 */
+#define MUSB_TYPE_PROTO_SHIFT  4
+#define MUSB_TYPE_REMOTE_END   0xf     /* Implicitly zero for ep0 */
+
+/* CONFIGDATA */
+#define MUSB_CONFIGDATA_MPRXE          0x80    /* Auto bulk pkt combining */
+#define MUSB_CONFIGDATA_MPTXE          0x40    /* Auto bulk pkt splitting */
+#define MUSB_CONFIGDATA_BIGENDIAN      0x20
+#define MUSB_CONFIGDATA_HBRXE          0x10    /* HB-ISO for RX */
+#define MUSB_CONFIGDATA_HBTXE          0x08    /* HB-ISO for TX */
+#define MUSB_CONFIGDATA_DYNFIFO                0x04    /* Dynamic FIFO sizing */
+#define MUSB_CONFIGDATA_SOFTCONE       0x02    /* SoftConnect */
+#define MUSB_CONFIGDATA_UTMIDW         0x01    /* Data width 0/1 => 8/16bits */
+
+/* TXCSR in Peripheral and Host mode */
+#define MUSB_TXCSR_AUTOSET             0x8000
+#define MUSB_TXCSR_MODE                        0x2000
+#define MUSB_TXCSR_DMAENAB             0x1000
+#define MUSB_TXCSR_FRCDATATOG          0x0800
+#define MUSB_TXCSR_DMAMODE             0x0400
+#define MUSB_TXCSR_CLRDATATOG          0x0040
+#define MUSB_TXCSR_FLUSHFIFO           0x0008
+#define MUSB_TXCSR_FIFONOTEMPTY                0x0002
+#define MUSB_TXCSR_TXPKTRDY            0x0001
+
+/* TXCSR in Peripheral mode */
+#define MUSB_TXCSR_P_ISO               0x4000
+#define MUSB_TXCSR_P_INCOMPTX          0x0080
+#define MUSB_TXCSR_P_SENTSTALL         0x0020
+#define MUSB_TXCSR_P_SENDSTALL         0x0010
+#define MUSB_TXCSR_P_UNDERRUN          0x0004
+
+/* TXCSR in Host mode */
+#define MUSB_TXCSR_H_WR_DATATOGGLE     0x0200
+#define MUSB_TXCSR_H_DATATOGGLE                0x0100
+#define MUSB_TXCSR_H_NAKTIMEOUT                0x0080
+#define MUSB_TXCSR_H_RXSTALL           0x0020
+#define MUSB_TXCSR_H_ERROR             0x0004
+
+/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_TXCSR_P_WZC_BITS  \
+       (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \
+       | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY)
+#define MUSB_TXCSR_H_WZC_BITS  \
+       (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \
+       | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY)
+
+/* RXCSR in Peripheral and Host mode */
+#define MUSB_RXCSR_AUTOCLEAR           0x8000
+#define MUSB_RXCSR_DMAENAB             0x2000
+#define MUSB_RXCSR_DISNYET             0x1000
+#define MUSB_RXCSR_PID_ERR             0x1000
+#define MUSB_RXCSR_DMAMODE             0x0800
+#define MUSB_RXCSR_INCOMPRX            0x0100
+#define MUSB_RXCSR_CLRDATATOG          0x0080
+#define MUSB_RXCSR_FLUSHFIFO           0x0010
+#define MUSB_RXCSR_DATAERROR           0x0008
+#define MUSB_RXCSR_FIFOFULL            0x0002
+#define MUSB_RXCSR_RXPKTRDY            0x0001
+
+/* RXCSR in Peripheral mode */
+#define MUSB_RXCSR_P_ISO               0x4000
+#define MUSB_RXCSR_P_SENTSTALL         0x0040
+#define MUSB_RXCSR_P_SENDSTALL         0x0020
+#define MUSB_RXCSR_P_OVERRUN           0x0004
+
+/* RXCSR in Host mode */
+#define MUSB_RXCSR_H_AUTOREQ           0x4000
+#define MUSB_RXCSR_H_WR_DATATOGGLE     0x0400
+#define MUSB_RXCSR_H_DATATOGGLE                0x0200
+#define MUSB_RXCSR_H_RXSTALL           0x0040
+#define MUSB_RXCSR_H_REQPKT            0x0020
+#define MUSB_RXCSR_H_ERROR             0x0004
+
+/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_RXCSR_P_WZC_BITS  \
+       (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \
+       | MUSB_RXCSR_RXPKTRDY)
+#define MUSB_RXCSR_H_WZC_BITS  \
+       (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \
+       | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY)
+
+/* HUBADDR */
+#define MUSB_HUBADDR_MULTI_TT          0x80
+
+#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
new file mode 100644 (file)
index 0000000..e0e9ce5
--- /dev/null
@@ -0,0 +1,425 @@
+/*
+ * MUSB OTG driver virtual root hub support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#include <asm/unaligned.h>
+
+#include "musb_core.h"
+
+
+static void musb_port_suspend(struct musb *musb, bool do_suspend)
+{
+       u8              power;
+       void __iomem    *mbase = musb->mregs;
+
+       if (!is_host_active(musb))
+               return;
+
+       /* NOTE:  this doesn't necessarily put PHY into low power mode,
+        * turning off its clock; that's a function of PHY integration and
+        * MUSB_POWER_ENSUSPEND.  PHY may need a clock (sigh) to detect
+        * SE0 changing to connect (J) or wakeup (K) states.
+        */
+       power = musb_readb(mbase, MUSB_POWER);
+       if (do_suspend) {
+               int retries = 10000;
+
+               power &= ~MUSB_POWER_RESUME;
+               power |= MUSB_POWER_SUSPENDM;
+               musb_writeb(mbase, MUSB_POWER, power);
+
+               /* Needed for OPT A tests */
+               power = musb_readb(mbase, MUSB_POWER);
+               while (power & MUSB_POWER_SUSPENDM) {
+                       power = musb_readb(mbase, MUSB_POWER);
+                       if (retries-- < 1)
+                               break;
+               }
+
+               DBG(3, "Root port suspended, power %02x\n", power);
+
+               musb->port1_status |= USB_PORT_STAT_SUSPEND;
+               switch (musb->xceiv.state) {
+               case OTG_STATE_A_HOST:
+                       musb->xceiv.state = OTG_STATE_A_SUSPEND;
+                       musb->is_active = is_otg_enabled(musb)
+                                       && musb->xceiv.host->b_hnp_enable;
+                       musb_platform_try_idle(musb, 0);
+                       break;
+#ifdef CONFIG_USB_MUSB_OTG
+               case OTG_STATE_B_HOST:
+                       musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+                       musb->is_active = is_otg_enabled(musb)
+                                       && musb->xceiv.host->b_hnp_enable;
+                       musb_platform_try_idle(musb, 0);
+                       break;
+#endif
+               default:
+                       DBG(1, "bogus rh suspend? %s\n",
+                               otg_state_string(musb));
+               }
+       } else if (power & MUSB_POWER_SUSPENDM) {
+               power &= ~MUSB_POWER_SUSPENDM;
+               power |= MUSB_POWER_RESUME;
+               musb_writeb(mbase, MUSB_POWER, power);
+
+               DBG(3, "Root port resuming, power %02x\n", power);
+
+               /* later, GetPortStatus will stop RESUME signaling */
+               musb->port1_status |= MUSB_PORT_STAT_RESUME;
+               musb->rh_timer = jiffies + msecs_to_jiffies(20);
+       }
+}
+
+static void musb_port_reset(struct musb *musb, bool do_reset)
+{
+       u8              power;
+       void __iomem    *mbase = musb->mregs;
+
+#ifdef CONFIG_USB_MUSB_OTG
+       if (musb->xceiv.state == OTG_STATE_B_IDLE) {
+               DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
+               musb->port1_status &= ~USB_PORT_STAT_RESET;
+               return;
+       }
+#endif
+
+       if (!is_host_active(musb))
+               return;
+
+       /* NOTE:  caller guarantees it will turn off the reset when
+        * the appropriate amount of time has passed
+        */
+       power = musb_readb(mbase, MUSB_POWER);
+       if (do_reset) {
+
+               /*
+                * If RESUME is set, we must make sure it stays minimum 20 ms.
+                * Then we must clear RESUME and wait a bit to let musb start
+                * generating SOFs. If we don't do this, OPT HS A 6.8 tests
+                * fail with "Error! Did not receive an SOF before suspend
+                * detected".
+                */
+               if (power &  MUSB_POWER_RESUME) {
+                       while (time_before(jiffies, musb->rh_timer))
+                               msleep(1);
+                       musb_writeb(mbase, MUSB_POWER,
+                               power & ~MUSB_POWER_RESUME);
+                       msleep(1);
+               }
+
+               musb->ignore_disconnect = true;
+               power &= 0xf0;
+               musb_writeb(mbase, MUSB_POWER,
+                               power | MUSB_POWER_RESET);
+
+               musb->port1_status |= USB_PORT_STAT_RESET;
+               musb->port1_status &= ~USB_PORT_STAT_ENABLE;
+               musb->rh_timer = jiffies + msecs_to_jiffies(50);
+       } else {
+               DBG(4, "root port reset stopped\n");
+               musb_writeb(mbase, MUSB_POWER,
+                               power & ~MUSB_POWER_RESET);
+
+               musb->ignore_disconnect = false;
+
+               power = musb_readb(mbase, MUSB_POWER);
+               if (power & MUSB_POWER_HSMODE) {
+                       DBG(4, "high-speed device connected\n");
+                       musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
+               }
+
+               musb->port1_status &= ~USB_PORT_STAT_RESET;
+               musb->port1_status |= USB_PORT_STAT_ENABLE
+                                       | (USB_PORT_STAT_C_RESET << 16)
+                                       | (USB_PORT_STAT_C_ENABLE << 16);
+               usb_hcd_poll_rh_status(musb_to_hcd(musb));
+
+               musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+       }
+}
+
+void musb_root_disconnect(struct musb *musb)
+{
+       musb->port1_status = (1 << USB_PORT_FEAT_POWER)
+                       | (1 << USB_PORT_FEAT_C_CONNECTION);
+
+       usb_hcd_poll_rh_status(musb_to_hcd(musb));
+       musb->is_active = 0;
+
+       switch (musb->xceiv.state) {
+       case OTG_STATE_A_HOST:
+       case OTG_STATE_A_SUSPEND:
+               musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+               musb->is_active = 0;
+               break;
+       case OTG_STATE_A_WAIT_VFALL:
+               musb->xceiv.state = OTG_STATE_B_IDLE;
+               break;
+       default:
+               DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
+       }
+}
+
+
+/*---------------------------------------------------------------------*/
+
+/* Caller may or may not hold musb->lock */
+int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+       struct musb     *musb = hcd_to_musb(hcd);
+       int             retval = 0;
+
+       /* called in_irq() via usb_hcd_poll_rh_status() */
+       if (musb->port1_status & 0xffff0000) {
+               *buf = 0x02;
+               retval = 1;
+       }
+       return retval;
+}
+
+int musb_hub_control(
+       struct usb_hcd  *hcd,
+       u16             typeReq,
+       u16             wValue,
+       u16             wIndex,
+       char            *buf,
+       u16             wLength)
+{
+       struct musb     *musb = hcd_to_musb(hcd);
+       u32             temp;
+       int             retval = 0;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
+               spin_unlock_irqrestore(&musb->lock, flags);
+               return -ESHUTDOWN;
+       }
+
+       /* hub features:  always zero, setting is a NOP
+        * port features: reported, sometimes updated when host is active
+        * no indicators
+        */
+       switch (typeReq) {
+       case ClearHubFeature:
+       case SetHubFeature:
+               switch (wValue) {
+               case C_HUB_OVER_CURRENT:
+               case C_HUB_LOCAL_POWER:
+                       break;
+               default:
+                       goto error;
+               }
+               break;
+       case ClearPortFeature:
+               if ((wIndex & 0xff) != 1)
+                       goto error;
+
+               switch (wValue) {
+               case USB_PORT_FEAT_ENABLE:
+                       break;
+               case USB_PORT_FEAT_SUSPEND:
+                       musb_port_suspend(musb, false);
+                       break;
+               case USB_PORT_FEAT_POWER:
+                       if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+                               musb_set_vbus(musb, 0);
+                       break;
+               case USB_PORT_FEAT_C_CONNECTION:
+               case USB_PORT_FEAT_C_ENABLE:
+               case USB_PORT_FEAT_C_OVER_CURRENT:
+               case USB_PORT_FEAT_C_RESET:
+               case USB_PORT_FEAT_C_SUSPEND:
+                       break;
+               default:
+                       goto error;
+               }
+               DBG(5, "clear feature %d\n", wValue);
+               musb->port1_status &= ~(1 << wValue);
+               break;
+       case GetHubDescriptor:
+               {
+               struct usb_hub_descriptor *desc = (void *)buf;
+
+               desc->bDescLength = 9;
+               desc->bDescriptorType = 0x29;
+               desc->bNbrPorts = 1;
+               desc->wHubCharacteristics = __constant_cpu_to_le16(
+                                 0x0001        /* per-port power switching */
+                               | 0x0010        /* no overcurrent reporting */
+                               );
+               desc->bPwrOn2PwrGood = 5;       /* msec/2 */
+               desc->bHubContrCurrent = 0;
+
+               /* workaround bogus struct definition */
+               desc->DeviceRemovable[0] = 0x02;        /* port 1 */
+               desc->DeviceRemovable[1] = 0xff;
+               }
+               break;
+       case GetHubStatus:
+               temp = 0;
+               *(__le32 *) buf = cpu_to_le32(temp);
+               break;
+       case GetPortStatus:
+               if (wIndex != 1)
+                       goto error;
+
+               /* finish RESET signaling? */
+               if ((musb->port1_status & USB_PORT_STAT_RESET)
+                               && time_after_eq(jiffies, musb->rh_timer))
+                       musb_port_reset(musb, false);
+
+               /* finish RESUME signaling? */
+               if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
+                               && time_after_eq(jiffies, musb->rh_timer)) {
+                       u8              power;
+
+                       power = musb_readb(musb->mregs, MUSB_POWER);
+                       power &= ~MUSB_POWER_RESUME;
+                       DBG(4, "root port resume stopped, power %02x\n",
+                                       power);
+                       musb_writeb(musb->mregs, MUSB_POWER, power);
+
+                       /* ISSUE:  DaVinci (RTL 1.300) disconnects after
+                        * resume of high speed peripherals (but not full
+                        * speed ones).
+                        */
+
+                       musb->is_active = 1;
+                       musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
+                                       | MUSB_PORT_STAT_RESUME);
+                       musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+                       usb_hcd_poll_rh_status(musb_to_hcd(musb));
+                       /* NOTE: it might really be A_WAIT_BCON ... */
+                       musb->xceiv.state = OTG_STATE_A_HOST;
+               }
+
+               put_unaligned(cpu_to_le32(musb->port1_status
+                                       & ~MUSB_PORT_STAT_RESUME),
+                               (__le32 *) buf);
+
+               /* port change status is more interesting */
+               DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n",
+                               musb->port1_status);
+               break;
+       case SetPortFeature:
+               if ((wIndex & 0xff) != 1)
+                       goto error;
+
+               switch (wValue) {
+               case USB_PORT_FEAT_POWER:
+                       /* NOTE: this controller has a strange state machine
+                        * that involves "requesting sessions" according to
+                        * magic side effects from incompletely-described
+                        * rules about startup...
+                        *
+                        * This call is what really starts the host mode; be
+                        * very careful about side effects if you reorder any
+                        * initialization logic, e.g. for OTG, or change any
+                        * logic relating to VBUS power-up.
+                        */
+                       if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+                               musb_start(musb);
+                       break;
+               case USB_PORT_FEAT_RESET:
+                       musb_port_reset(musb, true);
+                       break;
+               case USB_PORT_FEAT_SUSPEND:
+                       musb_port_suspend(musb, true);
+                       break;
+               case USB_PORT_FEAT_TEST:
+                       if (unlikely(is_host_active(musb)))
+                               goto error;
+
+                       wIndex >>= 8;
+                       switch (wIndex) {
+                       case 1:
+                               pr_debug("TEST_J\n");
+                               temp = MUSB_TEST_J;
+                               break;
+                       case 2:
+                               pr_debug("TEST_K\n");
+                               temp = MUSB_TEST_K;
+                               break;
+                       case 3:
+                               pr_debug("TEST_SE0_NAK\n");
+                               temp = MUSB_TEST_SE0_NAK;
+                               break;
+                       case 4:
+                               pr_debug("TEST_PACKET\n");
+                               temp = MUSB_TEST_PACKET;
+                               musb_load_testpacket(musb);
+                               break;
+                       case 5:
+                               pr_debug("TEST_FORCE_ENABLE\n");
+                               temp = MUSB_TEST_FORCE_HOST
+                                       | MUSB_TEST_FORCE_HS;
+
+                               musb_writeb(musb->mregs, MUSB_DEVCTL,
+                                               MUSB_DEVCTL_SESSION);
+                               break;
+                       case 6:
+                               pr_debug("TEST_FIFO_ACCESS\n");
+                               temp = MUSB_TEST_FIFO_ACCESS;
+                               break;
+                       default:
+                               goto error;
+                       }
+                       musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
+                       break;
+               default:
+                       goto error;
+               }
+               DBG(5, "set feature %d\n", wValue);
+               musb->port1_status |= 1 << wValue;
+               break;
+
+       default:
+error:
+               /* "protocol stall" on error */
+               retval = -EPIPE;
+       }
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return retval;
+}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
new file mode 100644 (file)
index 0000000..9ba8fb7
--- /dev/null
@@ -0,0 +1,433 @@
+/*
+ * MUSB OTG driver - support for Mentor's DMA controller
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2007 by Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include "musb_core.h"
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include "omap2430.h"
+#endif
+
+#define MUSB_HSDMA_BASE                0x200
+#define MUSB_HSDMA_INTR                (MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL             0x4
+#define MUSB_HSDMA_ADDRESS             0x8
+#define MUSB_HSDMA_COUNT               0xc
+
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset)          \
+               (MUSB_HSDMA_BASE + (_bChannel << 4) + _offset)
+
+/* control register (16-bit): */
+#define MUSB_HSDMA_ENABLE_SHIFT                0
+#define MUSB_HSDMA_TRANSMIT_SHIFT              1
+#define MUSB_HSDMA_MODE1_SHIFT         2
+#define MUSB_HSDMA_IRQENABLE_SHIFT             3
+#define MUSB_HSDMA_ENDPOINT_SHIFT              4
+#define MUSB_HSDMA_BUSERROR_SHIFT              8
+#define MUSB_HSDMA_BURSTMODE_SHIFT             9
+#define MUSB_HSDMA_BURSTMODE           (3 << MUSB_HSDMA_BURSTMODE_SHIFT)
+#define MUSB_HSDMA_BURSTMODE_UNSPEC    0
+#define MUSB_HSDMA_BURSTMODE_INCR4     1
+#define MUSB_HSDMA_BURSTMODE_INCR8     2
+#define MUSB_HSDMA_BURSTMODE_INCR16    3
+
+#define MUSB_HSDMA_CHANNELS            8
+
+struct musb_dma_controller;
+
+struct musb_dma_channel {
+       struct dma_channel              Channel;
+       struct musb_dma_controller      *controller;
+       u32                             dwStartAddress;
+       u32                             len;
+       u16                             wMaxPacketSize;
+       u8                              bIndex;
+       u8                              epnum;
+       u8                              transmit;
+};
+
+struct musb_dma_controller {
+       struct dma_controller           Controller;
+       struct musb_dma_channel         aChannel[MUSB_HSDMA_CHANNELS];
+       void                            *pDmaPrivate;
+       void __iomem                    *pCoreBase;
+       u8                              bChannelCount;
+       u8                              bmUsedChannels;
+       u8                              irq;
+};
+
+static int dma_controller_start(struct dma_controller *c)
+{
+       /* nothing to do */
+       return 0;
+}
+
+static void dma_channel_release(struct dma_channel *pChannel);
+
+static int dma_controller_stop(struct dma_controller *c)
+{
+       struct musb_dma_controller *controller =
+               container_of(c, struct musb_dma_controller, Controller);
+       struct musb *musb = (struct musb *) controller->pDmaPrivate;
+       struct dma_channel *pChannel;
+       u8 bBit;
+
+       if (controller->bmUsedChannels != 0) {
+               dev_err(musb->controller,
+                       "Stopping DMA controller while channel active\n");
+
+               for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
+                       if (controller->bmUsedChannels & (1 << bBit)) {
+                               pChannel = &controller->aChannel[bBit].Channel;
+                               dma_channel_release(pChannel);
+
+                               if (!controller->bmUsedChannels)
+                                       break;
+                       }
+               }
+       }
+       return 0;
+}
+
+static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
+                               struct musb_hw_ep *hw_ep, u8 transmit)
+{
+       u8 bBit;
+       struct dma_channel *pChannel = NULL;
+       struct musb_dma_channel *pImplChannel = NULL;
+       struct musb_dma_controller *controller =
+                       container_of(c, struct musb_dma_controller, Controller);
+
+       for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
+               if (!(controller->bmUsedChannels & (1 << bBit))) {
+                       controller->bmUsedChannels |= (1 << bBit);
+                       pImplChannel = &(controller->aChannel[bBit]);
+                       pImplChannel->controller = controller;
+                       pImplChannel->bIndex = bBit;
+                       pImplChannel->epnum = hw_ep->epnum;
+                       pImplChannel->transmit = transmit;
+                       pChannel = &(pImplChannel->Channel);
+                       pChannel->private_data = pImplChannel;
+                       pChannel->status = MUSB_DMA_STATUS_FREE;
+                       pChannel->max_len = 0x10000;
+                       /* Tx => mode 1; Rx => mode 0 */
+                       pChannel->desired_mode = transmit;
+                       pChannel->actual_len = 0;
+                       break;
+               }
+       }
+       return pChannel;
+}
+
+static void dma_channel_release(struct dma_channel *pChannel)
+{
+       struct musb_dma_channel *pImplChannel =
+               (struct musb_dma_channel *) pChannel->private_data;
+
+       pChannel->actual_len = 0;
+       pImplChannel->dwStartAddress = 0;
+       pImplChannel->len = 0;
+
+       pImplChannel->controller->bmUsedChannels &=
+               ~(1 << pImplChannel->bIndex);
+
+       pChannel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+static void configure_channel(struct dma_channel *pChannel,
+                               u16 packet_sz, u8 mode,
+                               dma_addr_t dma_addr, u32 len)
+{
+       struct musb_dma_channel *pImplChannel =
+               (struct musb_dma_channel *) pChannel->private_data;
+       struct musb_dma_controller *controller = pImplChannel->controller;
+       void __iomem *mbase = controller->pCoreBase;
+       u8 bChannel = pImplChannel->bIndex;
+       u16 csr = 0;
+
+       DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
+                       pChannel, packet_sz, dma_addr, len, mode);
+
+       if (mode) {
+               csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
+               BUG_ON(len < packet_sz);
+
+               if (packet_sz >= 64) {
+                       csr |= MUSB_HSDMA_BURSTMODE_INCR16
+                                       << MUSB_HSDMA_BURSTMODE_SHIFT;
+               } else if (packet_sz >= 32) {
+                       csr |= MUSB_HSDMA_BURSTMODE_INCR8
+                                       << MUSB_HSDMA_BURSTMODE_SHIFT;
+               } else if (packet_sz >= 16) {
+                       csr |= MUSB_HSDMA_BURSTMODE_INCR4
+                                       << MUSB_HSDMA_BURSTMODE_SHIFT;
+               }
+       }
+
+       csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
+               | (1 << MUSB_HSDMA_ENABLE_SHIFT)
+               | (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
+               | (pImplChannel->transmit
+                               ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
+                               : 0);
+
+       /* address/count */
+       musb_writel(mbase,
+               MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+               dma_addr);
+       musb_writel(mbase,
+               MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+               len);
+
+       /* control (this should start things) */
+       musb_writew(mbase,
+               MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+               csr);
+}
+
+static int dma_channel_program(struct dma_channel *pChannel,
+                               u16 packet_sz, u8 mode,
+                               dma_addr_t dma_addr, u32 len)
+{
+       struct musb_dma_channel *pImplChannel =
+                       (struct musb_dma_channel *) pChannel->private_data;
+
+       DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
+               pImplChannel->epnum,
+               pImplChannel->transmit ? "Tx" : "Rx",
+               packet_sz, dma_addr, len, mode);
+
+       BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN ||
+               pChannel->status == MUSB_DMA_STATUS_BUSY);
+
+       pChannel->actual_len = 0;
+       pImplChannel->dwStartAddress = dma_addr;
+       pImplChannel->len = len;
+       pImplChannel->wMaxPacketSize = packet_sz;
+       pChannel->status = MUSB_DMA_STATUS_BUSY;
+
+       if ((mode == 1) && (len >= packet_sz))
+               configure_channel(pChannel, packet_sz, 1, dma_addr, len);
+       else
+               configure_channel(pChannel, packet_sz, 0, dma_addr, len);
+
+       return true;
+}
+
+static int dma_channel_abort(struct dma_channel *pChannel)
+{
+       struct musb_dma_channel *pImplChannel =
+               (struct musb_dma_channel *) pChannel->private_data;
+       u8 bChannel = pImplChannel->bIndex;
+       void __iomem *mbase = pImplChannel->controller->pCoreBase;
+       u16 csr;
+
+       if (pChannel->status == MUSB_DMA_STATUS_BUSY) {
+               if (pImplChannel->transmit) {
+
+                       csr = musb_readw(mbase,
+                               MUSB_EP_OFFSET(pImplChannel->epnum,
+                                               MUSB_TXCSR));
+                       csr &= ~(MUSB_TXCSR_AUTOSET |
+                                MUSB_TXCSR_DMAENAB |
+                                MUSB_TXCSR_DMAMODE);
+                       musb_writew(mbase,
+                               MUSB_EP_OFFSET(pImplChannel->epnum,
+                                               MUSB_TXCSR),
+                               csr);
+               } else {
+                       csr = musb_readw(mbase,
+                               MUSB_EP_OFFSET(pImplChannel->epnum,
+                                               MUSB_RXCSR));
+                       csr &= ~(MUSB_RXCSR_AUTOCLEAR |
+                                MUSB_RXCSR_DMAENAB |
+                                MUSB_RXCSR_DMAMODE);
+                       musb_writew(mbase,
+                               MUSB_EP_OFFSET(pImplChannel->epnum,
+                                               MUSB_RXCSR),
+                               csr);
+               }
+
+               musb_writew(mbase,
+                       MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+                       0);
+               musb_writel(mbase,
+                       MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+                       0);
+               musb_writel(mbase,
+                       MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+                       0);
+
+               pChannel->status = MUSB_DMA_STATUS_FREE;
+       }
+       return 0;
+}
+
+static irqreturn_t dma_controller_irq(int irq, void *private_data)
+{
+       struct musb_dma_controller *controller =
+               (struct musb_dma_controller *)private_data;
+       struct musb_dma_channel *pImplChannel;
+       struct musb *musb = controller->pDmaPrivate;
+       void __iomem *mbase = controller->pCoreBase;
+       struct dma_channel *pChannel;
+       u8 bChannel;
+       u16 csr;
+       u32 dwAddress;
+       u8 int_hsdma;
+       irqreturn_t retval = IRQ_NONE;
+       unsigned long flags;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
+       if (!int_hsdma)
+               goto done;
+
+       for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) {
+               if (int_hsdma & (1 << bChannel)) {
+                       pImplChannel = (struct musb_dma_channel *)
+                                       &(controller->aChannel[bChannel]);
+                       pChannel = &pImplChannel->Channel;
+
+                       csr = musb_readw(mbase,
+                                       MUSB_HSDMA_CHANNEL_OFFSET(bChannel,
+                                                       MUSB_HSDMA_CONTROL));
+
+                       if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT))
+                               pImplChannel->Channel.status =
+                                       MUSB_DMA_STATUS_BUS_ABORT;
+                       else {
+                               u8 devctl;
+
+                               dwAddress = musb_readl(mbase,
+                                               MUSB_HSDMA_CHANNEL_OFFSET(
+                                                       bChannel,
+                                                       MUSB_HSDMA_ADDRESS));
+                               pChannel->actual_len = dwAddress
+                                       - pImplChannel->dwStartAddress;
+
+                               DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
+                                       pChannel, pImplChannel->dwStartAddress,
+                                       dwAddress, pChannel->actual_len,
+                                       pImplChannel->len,
+                                       (pChannel->actual_len
+                                               < pImplChannel->len) ?
+                                       "=> reconfig 0" : "=> complete");
+
+                               devctl = musb_readb(mbase, MUSB_DEVCTL);
+
+                               pChannel->status = MUSB_DMA_STATUS_FREE;
+
+                               /* completed */
+                               if ((devctl & MUSB_DEVCTL_HM)
+                                       && (pImplChannel->transmit)
+                                       && ((pChannel->desired_mode == 0)
+                                           || (pChannel->actual_len &
+                                           (pImplChannel->wMaxPacketSize - 1)))
+                                        ) {
+                                       /* Send out the packet */
+                                       musb_ep_select(mbase,
+                                               pImplChannel->epnum);
+                                       musb_writew(mbase, MUSB_EP_OFFSET(
+                                                       pImplChannel->epnum,
+                                                       MUSB_TXCSR),
+                                               MUSB_TXCSR_TXPKTRDY);
+                               } else
+                                       musb_dma_completion(
+                                               musb,
+                                               pImplChannel->epnum,
+                                               pImplChannel->transmit);
+                       }
+               }
+       }
+       retval = IRQ_HANDLED;
+done:
+       spin_unlock_irqrestore(&musb->lock, flags);
+       return retval;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+       struct musb_dma_controller *controller;
+
+       controller = container_of(c, struct musb_dma_controller, Controller);
+       if (!controller)
+               return;
+
+       if (controller->irq)
+               free_irq(controller->irq, c);
+
+       kfree(controller);
+}
+
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *pCoreBase)
+{
+       struct musb_dma_controller *controller;
+       struct device *dev = musb->controller;
+       struct platform_device *pdev = to_platform_device(dev);
+       int irq = platform_get_irq(pdev, 1);
+
+       if (irq == 0) {
+               dev_err(dev, "No DMA interrupt line!\n");
+               return NULL;
+       }
+
+       controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL);
+       if (!controller)
+               return NULL;
+
+       controller->bChannelCount = MUSB_HSDMA_CHANNELS;
+       controller->pDmaPrivate = musb;
+       controller->pCoreBase = pCoreBase;
+
+       controller->Controller.start = dma_controller_start;
+       controller->Controller.stop = dma_controller_stop;
+       controller->Controller.channel_alloc = dma_channel_allocate;
+       controller->Controller.channel_release = dma_channel_release;
+       controller->Controller.channel_program = dma_channel_program;
+       controller->Controller.channel_abort = dma_channel_abort;
+
+       if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
+                       musb->controller->bus_id, &controller->Controller)) {
+               dev_err(dev, "request_irq %d failed!\n", irq);
+               dma_controller_destroy(&controller->Controller);
+               return NULL;
+       }
+
+       controller->irq = irq;
+
+       return &controller->Controller;
+}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
new file mode 100644 (file)
index 0000000..298b22e
--- /dev/null
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2005-2007 by Texas Instruments
+ * Some code has been taken from tusb6010.c
+ * Copyrights for that are attributable to:
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/mux.h>
+
+#include "musb_core.h"
+#include "omap2430.h"
+
+#ifdef CONFIG_ARCH_OMAP3430
+#define        get_cpu_rev()   2
+#endif
+
+#define MUSB_TIMEOUT_A_WAIT_BCON       1100
+
+static struct timer_list musb_idle_timer;
+
+static void musb_do_idle(unsigned long _musb)
+{
+       struct musb     *musb = (void *)_musb;
+       unsigned long   flags;
+       u8      power;
+       u8      devctl;
+
+       devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       switch (musb->xceiv.state) {
+       case OTG_STATE_A_WAIT_BCON:
+               devctl &= ~MUSB_DEVCTL_SESSION;
+               musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+               devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+               if (devctl & MUSB_DEVCTL_BDEVICE) {
+                       musb->xceiv.state = OTG_STATE_B_IDLE;
+                       MUSB_DEV_MODE(musb);
+               } else {
+                       musb->xceiv.state = OTG_STATE_A_IDLE;
+                       MUSB_HST_MODE(musb);
+               }
+               break;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       case OTG_STATE_A_SUSPEND:
+               /* finish RESUME signaling? */
+               if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
+                       power = musb_readb(musb->mregs, MUSB_POWER);
+                       power &= ~MUSB_POWER_RESUME;
+                       DBG(1, "root port resume stopped, power %02x\n", power);
+                       musb_writeb(musb->mregs, MUSB_POWER, power);
+                       musb->is_active = 1;
+                       musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
+                                               | MUSB_PORT_STAT_RESUME);
+                       musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+                       usb_hcd_poll_rh_status(musb_to_hcd(musb));
+                       /* NOTE: it might really be A_WAIT_BCON ... */
+                       musb->xceiv.state = OTG_STATE_A_HOST;
+               }
+               break;
+#endif
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       case OTG_STATE_A_HOST:
+               devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+               if (devctl &  MUSB_DEVCTL_BDEVICE)
+                       musb->xceiv.state = OTG_STATE_B_IDLE;
+               else
+                       musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+#endif
+       default:
+               break;
+       }
+       spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+       unsigned long           default_timeout = jiffies + msecs_to_jiffies(3);
+       static unsigned long    last_timer;
+
+       if (timeout == 0)
+               timeout = default_timeout;
+
+       /* Never idle if active, or when VBUS timeout is not set as host */
+       if (musb->is_active || ((musb->a_wait_bcon == 0)
+                       && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+               DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+               del_timer(&musb_idle_timer);
+               last_timer = jiffies;
+               return;
+       }
+
+       if (time_after(last_timer, timeout)) {
+               if (!timer_pending(&musb_idle_timer))
+                       last_timer = timeout;
+               else {
+                       DBG(4, "Longer idle timer already pending, ignoring\n");
+                       return;
+               }
+       }
+       last_timer = timeout;
+
+       DBG(4, "%s inactive, for idle timer for %lu ms\n",
+               otg_state_string(musb),
+               (unsigned long)jiffies_to_msecs(timeout - jiffies));
+       mod_timer(&musb_idle_timer, timeout);
+}
+
+void musb_platform_enable(struct musb *musb)
+{
+}
+void musb_platform_disable(struct musb *musb)
+{
+}
+static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
+{
+}
+
+static void omap_set_vbus(struct musb *musb, int is_on)
+{
+       u8              devctl;
+       /* HDRC controls CPEN, but beware current surges during device
+        * connect.  They can trigger transient overcurrent conditions
+        * that must be ignored.
+        */
+
+       devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+       if (is_on) {
+               musb->is_active = 1;
+               musb->xceiv.default_a = 1;
+               musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+               devctl |= MUSB_DEVCTL_SESSION;
+
+               MUSB_HST_MODE(musb);
+       } else {
+               musb->is_active = 0;
+
+               /* NOTE:  we're skipping A_WAIT_VFALL -> A_IDLE and
+                * jumping right to B_IDLE...
+                */
+
+               musb->xceiv.default_a = 0;
+               musb->xceiv.state = OTG_STATE_B_IDLE;
+               devctl &= ~MUSB_DEVCTL_SESSION;
+
+               MUSB_DEV_MODE(musb);
+       }
+       musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+       DBG(1, "VBUS %s, devctl %02x "
+               /* otg %3x conf %08x prcm %08x */ "\n",
+               otg_state_string(musb),
+               musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+static int omap_set_power(struct otg_transceiver *x, unsigned mA)
+{
+       return 0;
+}
+
+static int musb_platform_resume(struct musb *musb);
+
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+       u8      devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+       devctl |= MUSB_DEVCTL_SESSION;
+       musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+       switch (musb_mode) {
+       case MUSB_HOST:
+               otg_set_host(&musb->xceiv, musb->xceiv.host);
+               break;
+       case MUSB_PERIPHERAL:
+               otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
+               break;
+       case MUSB_OTG:
+               break;
+       }
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+       u32 l;
+
+#if defined(CONFIG_ARCH_OMAP2430)
+       omap_cfg_reg(AE5_2430_USB0HS_STP);
+#endif
+
+       musb_platform_resume(musb);
+
+       l = omap_readl(OTG_SYSCONFIG);
+       l &= ~ENABLEWAKEUP;     /* disable wakeup */
+       l &= ~NOSTDBY;          /* remove possible nostdby */
+       l |= SMARTSTDBY;        /* enable smart standby */
+       l &= ~AUTOIDLE;         /* disable auto idle */
+       l &= ~NOIDLE;           /* remove possible noidle */
+       l |= SMARTIDLE;         /* enable smart idle */
+       l |= AUTOIDLE;          /* enable auto idle */
+       omap_writel(l, OTG_SYSCONFIG);
+
+       l = omap_readl(OTG_INTERFSEL);
+       l |= ULPI_12PIN;
+       omap_writel(l, OTG_INTERFSEL);
+
+       pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
+                       "sysstatus 0x%x, intrfsel 0x%x, simenable  0x%x\n",
+                       omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
+                       omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
+                       omap_readl(OTG_SIMENABLE));
+
+       omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
+
+       if (is_host_enabled(musb))
+               musb->board_set_vbus = omap_set_vbus;
+       if (is_peripheral_enabled(musb))
+               musb->xceiv.set_power = omap_set_power;
+       musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
+
+       setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+       return 0;
+}
+
+int musb_platform_suspend(struct musb *musb)
+{
+       u32 l;
+
+       if (!musb->clock)
+               return 0;
+
+       /* in any role */
+       l = omap_readl(OTG_FORCESTDBY);
+       l |= ENABLEFORCE;       /* enable MSTANDBY */
+       omap_writel(l, OTG_FORCESTDBY);
+
+       l = omap_readl(OTG_SYSCONFIG);
+       l |= ENABLEWAKEUP;      /* enable wakeup */
+       omap_writel(l, OTG_SYSCONFIG);
+
+       if (musb->xceiv.set_suspend)
+               musb->xceiv.set_suspend(&musb->xceiv, 1);
+
+       if (musb->set_clock)
+               musb->set_clock(musb->clock, 0);
+       else
+               clk_disable(musb->clock);
+
+       return 0;
+}
+
+static int musb_platform_resume(struct musb *musb)
+{
+       u32 l;
+
+       if (!musb->clock)
+               return 0;
+
+       if (musb->xceiv.set_suspend)
+               musb->xceiv.set_suspend(&musb->xceiv, 0);
+
+       if (musb->set_clock)
+               musb->set_clock(musb->clock, 1);
+       else
+               clk_enable(musb->clock);
+
+       l = omap_readl(OTG_SYSCONFIG);
+       l &= ~ENABLEWAKEUP;     /* disable wakeup */
+       omap_writel(l, OTG_SYSCONFIG);
+
+       l = omap_readl(OTG_FORCESTDBY);
+       l &= ~ENABLEFORCE;      /* disable MSTANDBY */
+       omap_writel(l, OTG_FORCESTDBY);
+
+       return 0;
+}
+
+
+int musb_platform_exit(struct musb *musb)
+{
+
+       omap_vbus_power(musb, 0 /*off*/, 1);
+
+       musb_platform_suspend(musb);
+
+       clk_put(musb->clock);
+       musb->clock = 0;
+
+       return 0;
+}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
new file mode 100644 (file)
index 0000000..786a620
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_OMAP243X_H__
+#define __MUSB_OMAP243X_H__
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include <asm/arch/hardware.h>
+#include <asm/arch/usb.h>
+
+/*
+ * OMAP2430-specific definitions
+ */
+
+#define MENTOR_BASE_OFFSET     0
+#if    defined(CONFIG_ARCH_OMAP2430)
+#define        OMAP_HSOTG_BASE         (OMAP243X_HS_BASE)
+#elif  defined(CONFIG_ARCH_OMAP3430)
+#define        OMAP_HSOTG_BASE         (OMAP34XX_HSUSB_OTG_BASE)
+#endif
+#define OMAP_HSOTG(offset)     (OMAP_HSOTG_BASE + 0x400 + (offset))
+#define OTG_REVISION           OMAP_HSOTG(0x0)
+#define OTG_SYSCONFIG          OMAP_HSOTG(0x4)
+#      define  MIDLEMODE       12      /* bit position */
+#      define  FORCESTDBY              (0 << MIDLEMODE)
+#      define  NOSTDBY                 (1 << MIDLEMODE)
+#      define  SMARTSTDBY              (2 << MIDLEMODE)
+#      define  SIDLEMODE               3       /* bit position */
+#      define  FORCEIDLE               (0 << SIDLEMODE)
+#      define  NOIDLE                  (1 << SIDLEMODE)
+#      define  SMARTIDLE               (2 << SIDLEMODE)
+#      define  ENABLEWAKEUP            (1 << 2)
+#      define  SOFTRST                 (1 << 1)
+#      define  AUTOIDLE                (1 << 0)
+#define OTG_SYSSTATUS          OMAP_HSOTG(0x8)
+#      define  RESETDONE               (1 << 0)
+#define OTG_INTERFSEL          OMAP_HSOTG(0xc)
+#      define  EXTCP                   (1 << 2)
+#      define  PHYSEL          0       /* bit position */
+#      define  UTMI_8BIT               (0 << PHYSEL)
+#      define  ULPI_12PIN              (1 << PHYSEL)
+#      define  ULPI_8PIN               (2 << PHYSEL)
+#define OTG_SIMENABLE          OMAP_HSOTG(0x10)
+#      define  TM1                     (1 << 0)
+#define OTG_FORCESTDBY         OMAP_HSOTG(0x14)
+#      define  ENABLEFORCE             (1 << 0)
+
+#endif /* CONFIG_ARCH_OMAP2430 */
+
+#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
new file mode 100644 (file)
index 0000000..b73b036
--- /dev/null
@@ -0,0 +1,1151 @@
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Notes:
+ * - Driver assumes that interface to external host (main CPU) is
+ *   configured for NOR FLASH interface instead of VLYNQ serial
+ *   interface.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+#include "musb_core.h"
+
+static void tusb_source_power(struct musb *musb, int is_on);
+
+#define TUSB_REV_MAJOR(reg_val)                ((reg_val >> 4) & 0xf)
+#define TUSB_REV_MINOR(reg_val)                (reg_val & 0xf)
+
+/*
+ * Checks the revision. We need to use the DMA register as 3.0 does not
+ * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
+ */
+u8 tusb_get_revision(struct musb *musb)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+       u32             die_id;
+       u8              rev;
+
+       rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
+       if (TUSB_REV_MAJOR(rev) == 3) {
+               die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase,
+                               TUSB_DIDR1_HI));
+               if (die_id >= TUSB_DIDR1_HI_REV_31)
+                       rev |= 1;
+       }
+
+       return rev;
+}
+
+static int __init tusb_print_revision(struct musb *musb)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+       u8              rev;
+
+       rev = tusb_get_revision(musb);
+
+       pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
+               "prcm",
+               TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)),
+               TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)),
+               "int",
+               TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+               TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+               "gpio",
+               TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)),
+               TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)),
+               "dma",
+               TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+               TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+               "dieid",
+               TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
+               "rev",
+               TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
+
+       return tusb_get_revision(musb);
+}
+
+#define WBUS_QUIRK_MASK        (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
+                               | TUSB_PHY_OTG_CTRL_TESTM0)
+
+/*
+ * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0.
+ * Disables power detection in PHY for the duration of idle.
+ */
+static void tusb_wbus_quirk(struct musb *musb, int enabled)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+       static u32      phy_otg_ctrl, phy_otg_ena;
+       u32             tmp;
+
+       if (enabled) {
+               phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+               phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+               tmp = TUSB_PHY_OTG_CTRL_WRPROTECT
+                               | phy_otg_ena | WBUS_QUIRK_MASK;
+               musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+               tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
+               tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
+               musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+               DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
+                       musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+                       musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+       } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
+                                       & TUSB_PHY_OTG_CTRL_TESTM2) {
+               tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl;
+               musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+               tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
+               musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+               DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
+                       musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+                       musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+               phy_otg_ctrl = 0;
+               phy_otg_ena = 0;
+       }
+}
+
+/*
+ * TUSB 6010 may use a parallel bus that doesn't support byte ops;
+ * so both loading and unloading FIFOs need explicit byte counts.
+ */
+
+static inline void
+tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
+{
+       u32             val;
+       int             i;
+
+       if (len > 4) {
+               for (i = 0; i < (len >> 2); i++) {
+                       memcpy(&val, buf, 4);
+                       musb_writel(fifo, 0, val);
+                       buf += 4;
+               }
+               len %= 4;
+       }
+       if (len > 0) {
+               /* Write the rest 1 - 3 bytes to FIFO */
+               memcpy(&val, buf, len);
+               musb_writel(fifo, 0, val);
+       }
+}
+
+static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
+                                               void __iomem *buf, u16 len)
+{
+       u32             val;
+       int             i;
+
+       if (len > 4) {
+               for (i = 0; i < (len >> 2); i++) {
+                       val = musb_readl(fifo, 0);
+                       memcpy(buf, &val, 4);
+                       buf += 4;
+               }
+               len %= 4;
+       }
+       if (len > 0) {
+               /* Read the rest 1 - 3 bytes from FIFO */
+               val = musb_readl(fifo, 0);
+               memcpy(buf, &val, len);
+       }
+}
+
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
+{
+       void __iomem    *ep_conf = hw_ep->conf;
+       void __iomem    *fifo = hw_ep->fifo;
+       u8              epnum = hw_ep->epnum;
+
+       prefetch(buf);
+
+       DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+                       'T', epnum, fifo, len, buf);
+
+       if (epnum)
+               musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+                       TUSB_EP_CONFIG_XFR_SIZE(len));
+       else
+               musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
+                       TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+       if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+               /* Best case is 32bit-aligned destination address */
+               if ((0x02 & (unsigned long) buf) == 0) {
+                       if (len >= 4) {
+                               writesl(fifo, buf, len >> 2);
+                               buf += (len & ~0x03);
+                               len &= 0x03;
+                       }
+               } else {
+                       if (len >= 2) {
+                               u32 val;
+                               int i;
+
+                               /* Cannot use writesw, fifo is 32-bit */
+                               for (i = 0; i < (len >> 2); i++) {
+                                       val = (u32)(*(u16 *)buf);
+                                       buf += 2;
+                                       val |= (*(u16 *)buf) << 16;
+                                       buf += 2;
+                                       musb_writel(fifo, 0, val);
+                               }
+                               len &= 0x03;
+                       }
+               }
+       }
+
+       if (len > 0)
+               tusb_fifo_write_unaligned(fifo, buf, len);
+}
+
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
+{
+       void __iomem    *ep_conf = hw_ep->conf;
+       void __iomem    *fifo = hw_ep->fifo;
+       u8              epnum = hw_ep->epnum;
+
+       DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+                       'R', epnum, fifo, len, buf);
+
+       if (epnum)
+               musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+                       TUSB_EP_CONFIG_XFR_SIZE(len));
+       else
+               musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+       if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+               /* Best case is 32bit-aligned destination address */
+               if ((0x02 & (unsigned long) buf) == 0) {
+                       if (len >= 4) {
+                               readsl(fifo, buf, len >> 2);
+                               buf += (len & ~0x03);
+                               len &= 0x03;
+                       }
+               } else {
+                       if (len >= 2) {
+                               u32 val;
+                               int i;
+
+                               /* Cannot use readsw, fifo is 32-bit */
+                               for (i = 0; i < (len >> 2); i++) {
+                                       val = musb_readl(fifo, 0);
+                                       *(u16 *)buf = (u16)(val & 0xffff);
+                                       buf += 2;
+                                       *(u16 *)buf = (u16)(val >> 16);
+                                       buf += 2;
+                               }
+                               len &= 0x03;
+                       }
+               }
+       }
+
+       if (len > 0)
+               tusb_fifo_read_unaligned(fifo, buf, len);
+}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+/* This is used by gadget drivers, and OTG transceiver logic, allowing
+ * at most mA current to be drawn from VBUS during a Default-B session
+ * (that is, while VBUS exceeds 4.4V).  In Default-A (including pure host
+ * mode), or low power Default-B sessions, something else supplies power.
+ * Caller must take care of locking.
+ */
+static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
+{
+       struct musb     *musb = container_of(x, struct musb, xceiv);
+       void __iomem    *tbase = musb->ctrl_base;
+       u32             reg;
+
+       /*
+        * Keep clock active when enabled. Note that this is not tied to
+        * drawing VBUS, as with OTG mA can be less than musb->min_power.
+        */
+       if (musb->set_clock) {
+               if (mA)
+                       musb->set_clock(musb->clock, 1);
+               else
+                       musb->set_clock(musb->clock, 0);
+       }
+
+       /* tps65030 seems to consume max 100mA, with maybe 60mA available
+        * (measured on one board) for things other than tps and tusb.
+        *
+        * Boards sharing the CPU clock with CLKIN will need to prevent
+        * certain idle sleep states while the USB link is active.
+        *
+        * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }.
+        * The actual current usage would be very board-specific.  For now,
+        * it's simpler to just use an aggregate (also board-specific).
+        */
+       if (x->default_a || mA < (musb->min_power << 1))
+               mA = 0;
+
+       reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+       if (mA) {
+               musb->is_bus_powered = 1;
+               reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN;
+       } else {
+               musb->is_bus_powered = 0;
+               reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+       }
+       musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+       DBG(2, "draw max %d mA VBUS\n", mA);
+       return 0;
+}
+
+#else
+#define tusb_draw_power        NULL
+#endif
+
+/* workaround for issue 13:  change clock during chip idle
+ * (to be fixed in rev3 silicon) ... symptoms include disconnect
+ * or looping suspend/resume cycles
+ */
+static void tusb_set_clock_source(struct musb *musb, unsigned mode)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+       u32             reg;
+
+       reg = musb_readl(tbase, TUSB_PRCM_CONF);
+       reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3);
+
+       /* 0 = refclk (clkin, XI)
+        * 1 = PHY 60 MHz (internal PLL)
+        * 2 = not supported
+        * 3 = what?
+        */
+       if (mode > 0)
+               reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3);
+
+       musb_writel(tbase, TUSB_PRCM_CONF, reg);
+
+       /* FIXME tusb6010_platform_retime(mode == 0); */
+}
+
+/*
+ * Idle TUSB6010 until next wake-up event; NOR access always wakes.
+ * Other code ensures that we idle unless we're connected _and_ the
+ * USB link is not suspended ... and tells us the relevant wakeup
+ * events.  SW_EN for voltage is handled separately.
+ */
+void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+       u32             reg;
+
+       if ((wakeup_enables & TUSB_PRCM_WBUS)
+                       && (tusb_get_revision(musb) == TUSB_REV_30))
+               tusb_wbus_quirk(musb, 1);
+
+       tusb_set_clock_source(musb, 0);
+
+       wakeup_enables |= TUSB_PRCM_WNORCS;
+       musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables);
+
+       /* REVISIT writeup of WID implies that if WID set and ID is grounded,
+        * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared.
+        * Presumably that's mostly to save power, hence WID is immaterial ...
+        */
+
+       reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+       /* issue 4: when driving vbus, use hipower (vbus_det) comparator */
+       if (is_host_active(musb)) {
+               reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+               reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+       } else {
+               reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+               reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+       }
+       reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
+       musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+       DBG(6, "idle, wake on %02x\n", wakeup_enables);
+}
+
+/*
+ * Updates cable VBUS status. Caller must take care of locking.
+ */
+int musb_platform_get_vbus_status(struct musb *musb)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+       u32             otg_stat, prcm_mngmt;
+       int             ret = 0;
+
+       otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+       prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT);
+
+       /* Temporarily enable VBUS detection if it was disabled for
+        * suspend mode. Unless it's enabled otg_stat and devctl will
+        * not show correct VBUS state.
+        */
+       if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
+               u32 tmp = prcm_mngmt;
+               tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+               musb_writel(tbase, TUSB_PRCM_MNGMT, tmp);
+               otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+               musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt);
+       }
+
+       if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID)
+               ret = 1;
+
+       return ret;
+}
+
+static struct timer_list musb_idle_timer;
+
+static void musb_do_idle(unsigned long _musb)
+{
+       struct musb     *musb = (void *)_musb;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       switch (musb->xceiv.state) {
+       case OTG_STATE_A_WAIT_BCON:
+               if ((musb->a_wait_bcon != 0)
+                       && (musb->idle_timeout == 0
+                               || time_after(jiffies, musb->idle_timeout))) {
+                       DBG(4, "Nothing connected %s, turning off VBUS\n",
+                                       otg_state_string(musb));
+               }
+               /* FALLTHROUGH */
+       case OTG_STATE_A_IDLE:
+               tusb_source_power(musb, 0);
+       default:
+               break;
+       }
+
+       if (!musb->is_active) {
+               u32     wakeups;
+
+               /* wait until khubd handles port change status */
+               if (is_host_active(musb) && (musb->port1_status >> 16))
+                       goto done;
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+               if (is_peripheral_enabled(musb) && !musb->gadget_driver)
+                       wakeups = 0;
+               else {
+                       wakeups = TUSB_PRCM_WHOSTDISCON
+                                       | TUSB_PRCM_WBUS
+                                       | TUSB_PRCM_WVBUS;
+                       if (is_otg_enabled(musb))
+                               wakeups |= TUSB_PRCM_WID;
+               }
+#else
+               wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS;
+#endif
+               tusb_allow_idle(musb, wakeups);
+       }
+done:
+       spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/*
+ * Maybe put TUSB6010 into idle mode mode depending on USB link status,
+ * like "disconnected" or "suspended".  We'll be woken out of it by
+ * connect, resume, or disconnect.
+ *
+ * Needs to be called as the last function everywhere where there is
+ * register access to TUSB6010 because of NOR flash wake-up.
+ * Caller should own controller spinlock.
+ *
+ * Delay because peripheral enables D+ pullup 3msec after SE0, and
+ * we don't want to treat that full speed J as a wakeup event.
+ * ... peripherals must draw only suspend current after 10 msec.
+ */
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+       unsigned long           default_timeout = jiffies + msecs_to_jiffies(3);
+       static unsigned long    last_timer;
+
+       if (timeout == 0)
+               timeout = default_timeout;
+
+       /* Never idle if active, or when VBUS timeout is not set as host */
+       if (musb->is_active || ((musb->a_wait_bcon == 0)
+                       && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+               DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+               del_timer(&musb_idle_timer);
+               last_timer = jiffies;
+               return;
+       }
+
+       if (time_after(last_timer, timeout)) {
+               if (!timer_pending(&musb_idle_timer))
+                       last_timer = timeout;
+               else {
+                       DBG(4, "Longer idle timer already pending, ignoring\n");
+                       return;
+               }
+       }
+       last_timer = timeout;
+
+       DBG(4, "%s inactive, for idle timer for %lu ms\n",
+               otg_state_string(musb),
+               (unsigned long)jiffies_to_msecs(timeout - jiffies));
+       mod_timer(&musb_idle_timer, timeout);
+}
+
+/* ticks of 60 MHz clock */
+#define DEVCLOCK               60000000
+#define OTG_TIMER_MS(msecs)    ((msecs) \
+               ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \
+                               | TUSB_DEV_OTG_TIMER_ENABLE) \
+               : 0)
+
+static void tusb_source_power(struct musb *musb, int is_on)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+       u32             conf, prcm, timer;
+       u8              devctl;
+
+       /* HDRC controls CPEN, but beware current surges during device
+        * connect.  They can trigger transient overcurrent conditions
+        * that must be ignored.
+        */
+
+       prcm = musb_readl(tbase, TUSB_PRCM_MNGMT);
+       conf = musb_readl(tbase, TUSB_DEV_CONF);
+       devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+       if (is_on) {
+               if (musb->set_clock)
+                       musb->set_clock(musb->clock, 1);
+               timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
+               musb->xceiv.default_a = 1;
+               musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+               devctl |= MUSB_DEVCTL_SESSION;
+
+               conf |= TUSB_DEV_CONF_USB_HOST_MODE;
+               MUSB_HST_MODE(musb);
+       } else {
+               u32     otg_stat;
+
+               timer = 0;
+
+               /* If ID pin is grounded, we want to be a_idle */
+               otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+               if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
+                       switch (musb->xceiv.state) {
+                       case OTG_STATE_A_WAIT_VRISE:
+                       case OTG_STATE_A_WAIT_BCON:
+                               musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+                               break;
+                       case OTG_STATE_A_WAIT_VFALL:
+                               musb->xceiv.state = OTG_STATE_A_IDLE;
+                               break;
+                       default:
+                               musb->xceiv.state = OTG_STATE_A_IDLE;
+                       }
+                       musb->is_active = 0;
+                       musb->xceiv.default_a = 1;
+                       MUSB_HST_MODE(musb);
+               } else {
+                       musb->is_active = 0;
+                       musb->xceiv.default_a = 0;
+                       musb->xceiv.state = OTG_STATE_B_IDLE;
+                       MUSB_DEV_MODE(musb);
+               }
+
+               devctl &= ~MUSB_DEVCTL_SESSION;
+               conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
+               if (musb->set_clock)
+                       musb->set_clock(musb->clock, 0);
+       }
+       prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+
+       musb_writel(tbase, TUSB_PRCM_MNGMT, prcm);
+       musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer);
+       musb_writel(tbase, TUSB_DEV_CONF, conf);
+       musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+       DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
+               otg_state_string(musb),
+               musb_readb(musb->mregs, MUSB_DEVCTL),
+               musb_readl(tbase, TUSB_DEV_OTG_STAT),
+               conf, prcm);
+}
+
+/*
+ * Sets the mode to OTG, peripheral or host by changing the ID detection.
+ * Caller must take care of locking.
+ *
+ * Note that if a mini-A cable is plugged in the ID line will stay down as
+ * the weak ID pull-up is not able to pull the ID up.
+ *
+ * REVISIT: It would be possible to add support for changing between host
+ * and peripheral modes in non-OTG configurations by reconfiguring hardware
+ * and then setting musb->board_mode. For now, only support OTG mode.
+ */
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+       u32             otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
+
+       if (musb->board_mode != MUSB_OTG) {
+               ERR("Changing mode currently only supported in OTG mode\n");
+               return;
+       }
+
+       otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+       phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+       phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+       dev_conf = musb_readl(tbase, TUSB_DEV_CONF);
+
+       switch (musb_mode) {
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+       case MUSB_HOST:         /* Disable PHY ID detect, ground ID */
+               phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+               phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+               dev_conf |= TUSB_DEV_CONF_ID_SEL;
+               dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
+               break;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+       case MUSB_PERIPHERAL:   /* Disable PHY ID detect, keep ID pull-up on */
+               phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+               phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+               dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+               break;
+#endif
+
+#ifdef CONFIG_USB_MUSB_OTG
+       case MUSB_OTG:          /* Use PHY ID detection */
+               phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+               phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+               dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+               break;
+#endif
+
+       default:
+               DBG(2, "Trying to set unknown mode %i\n", musb_mode);
+       }
+
+       musb_writel(tbase, TUSB_PHY_OTG_CTRL,
+                       TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl);
+       musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE,
+                       TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena);
+       musb_writel(tbase, TUSB_DEV_CONF, dev_conf);
+
+       otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+       if ((musb_mode == MUSB_PERIPHERAL) &&
+               !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
+                       INFO("Cannot be peripheral with mini-A cable "
+                       "otg_stat: %08x\n", otg_stat);
+}
+
+static inline unsigned long
+tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
+{
+       u32             otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+       unsigned long   idle_timeout = 0;
+
+       /* ID pin */
+       if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
+               int     default_a;
+
+               if (is_otg_enabled(musb))
+                       default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
+               else
+                       default_a = is_host_enabled(musb);
+               DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
+               musb->xceiv.default_a = default_a;
+               tusb_source_power(musb, default_a);
+
+               /* Don't allow idling immediately */
+               if (default_a)
+                       idle_timeout = jiffies + (HZ * 3);
+       }
+
+       /* VBUS state change */
+       if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
+
+               /* B-dev state machine:  no vbus ~= disconnect */
+               if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
+                               || !is_host_enabled(musb)) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+                       /* ? musb_root_disconnect(musb); */
+                       musb->port1_status &=
+                               ~(USB_PORT_STAT_CONNECTION
+                               | USB_PORT_STAT_ENABLE
+                               | USB_PORT_STAT_LOW_SPEED
+                               | USB_PORT_STAT_HIGH_SPEED
+                               | USB_PORT_STAT_TEST
+                               );
+#endif
+
+                       if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
+                               DBG(1, "Forcing disconnect (no interrupt)\n");
+                               if (musb->xceiv.state != OTG_STATE_B_IDLE) {
+                                       /* INTR_DISCONNECT can hide... */
+                                       musb->xceiv.state = OTG_STATE_B_IDLE;
+                                       musb->int_usb |= MUSB_INTR_DISCONNECT;
+                               }
+                               musb->is_active = 0;
+                       }
+                       DBG(2, "vbus change, %s, otg %03x\n",
+                               otg_state_string(musb), otg_stat);
+                       idle_timeout = jiffies + (1 * HZ);
+                       schedule_work(&musb->irq_work);
+
+               } else /* A-dev state machine */ {
+                       DBG(2, "vbus change, %s, otg %03x\n",
+                               otg_state_string(musb), otg_stat);
+
+                       switch (musb->xceiv.state) {
+                       case OTG_STATE_A_IDLE:
+                               DBG(2, "Got SRP, turning on VBUS\n");
+                               musb_set_vbus(musb, 1);
+
+                               /* CONNECT can wake if a_wait_bcon is set */
+                               if (musb->a_wait_bcon != 0)
+                                       musb->is_active = 0;
+                               else
+                                       musb->is_active = 1;
+
+                               /*
+                                * OPT FS A TD.4.6 needs few seconds for
+                                * A_WAIT_VRISE
+                                */
+                               idle_timeout = jiffies + (2 * HZ);
+
+                               break;
+                       case OTG_STATE_A_WAIT_VRISE:
+                               /* ignore; A-session-valid < VBUS_VALID/2,
+                                * we monitor this with the timer
+                                */
+                               break;
+                       case OTG_STATE_A_WAIT_VFALL:
+                               /* REVISIT this irq triggers during short
+                                * spikes caused by enumeration ...
+                                */
+                               if (musb->vbuserr_retry) {
+                                       musb->vbuserr_retry--;
+                                       tusb_source_power(musb, 1);
+                               } else {
+                                       musb->vbuserr_retry
+                                               = VBUSERR_RETRY_COUNT;
+                                       tusb_source_power(musb, 0);
+                               }
+                               break;
+                       default:
+                               break;
+                       }
+               }
+       }
+
+       /* OTG timer expiration */
+       if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
+               u8      devctl;
+
+               DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
+
+               switch (musb->xceiv.state) {
+               case OTG_STATE_A_WAIT_VRISE:
+                       /* VBUS has probably been valid for a while now,
+                        * but may well have bounced out of range a bit
+                        */
+                       devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+                       if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
+                               if ((devctl & MUSB_DEVCTL_VBUS)
+                                               != MUSB_DEVCTL_VBUS) {
+                                       DBG(2, "devctl %02x\n", devctl);
+                                       break;
+                               }
+                               musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+                               musb->is_active = 0;
+                               idle_timeout = jiffies
+                                       + msecs_to_jiffies(musb->a_wait_bcon);
+                       } else {
+                               /* REVISIT report overcurrent to hub? */
+                               ERR("vbus too slow, devctl %02x\n", devctl);
+                               tusb_source_power(musb, 0);
+                       }
+                       break;
+               case OTG_STATE_A_WAIT_BCON:
+                       if (musb->a_wait_bcon != 0)
+                               idle_timeout = jiffies
+                                       + msecs_to_jiffies(musb->a_wait_bcon);
+                       break;
+               case OTG_STATE_A_SUSPEND:
+                       break;
+               case OTG_STATE_B_WAIT_ACON:
+                       break;
+               default:
+                       break;
+               }
+       }
+       schedule_work(&musb->irq_work);
+
+       return idle_timeout;
+}
+
+static irqreturn_t tusb_interrupt(int irq, void *__hci)
+{
+       struct musb     *musb = __hci;
+       void __iomem    *tbase = musb->ctrl_base;
+       unsigned long   flags, idle_timeout = 0;
+       u32             int_mask, int_src;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       /* Mask all interrupts to allow using both edge and level GPIO irq */
+       int_mask = musb_readl(tbase, TUSB_INT_MASK);
+       musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+
+       int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
+       DBG(3, "TUSB IRQ %08x\n", int_src);
+
+       musb->int_usb = (u8) int_src;
+
+       /* Acknowledge wake-up source interrupts */
+       if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
+               u32     reg;
+               u32     i;
+
+               if (tusb_get_revision(musb) == TUSB_REV_30)
+                       tusb_wbus_quirk(musb, 0);
+
+               /* there are issues re-locking the PLL on wakeup ... */
+
+               /* work around issue 8 */
+               for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) {
+                       musb_writel(tbase, TUSB_SCRATCH_PAD, 0);
+                       musb_writel(tbase, TUSB_SCRATCH_PAD, i);
+                       reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
+                       if (reg == i)
+                               break;
+                       DBG(6, "TUSB NOR not ready\n");
+               }
+
+               /* work around issue 13 (2nd half) */
+               tusb_set_clock_source(musb, 1);
+
+               reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE);
+               musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
+               if (reg & ~TUSB_PRCM_WNORCS) {
+                       musb->is_active = 1;
+                       schedule_work(&musb->irq_work);
+               }
+               DBG(3, "wake %sactive %02x\n",
+                               musb->is_active ? "" : "in", reg);
+
+               /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
+       }
+
+       if (int_src & TUSB_INT_SRC_USB_IP_CONN)
+               del_timer(&musb_idle_timer);
+
+       /* OTG state change reports (annoyingly) not issued by Mentor core */
+       if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
+                               | TUSB_INT_SRC_OTG_TIMEOUT
+                               | TUSB_INT_SRC_ID_STATUS_CHNG))
+               idle_timeout = tusb_otg_ints(musb, int_src, tbase);
+
+       /* TX dma callback must be handled here, RX dma callback is
+        * handled in tusb_omap_dma_cb.
+        */
+       if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
+               u32     dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
+               u32     real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK);
+
+               DBG(3, "DMA IRQ %08x\n", dma_src);
+               real_dma_src = ~real_dma_src & dma_src;
+               if (tusb_dma_omap() && real_dma_src) {
+                       int     tx_source = (real_dma_src & 0xffff);
+                       int     i;
+
+                       for (i = 1; i <= 15; i++) {
+                               if (tx_source & (1 << i)) {
+                                       DBG(3, "completing ep%i %s\n", i, "tx");
+                                       musb_dma_completion(musb, i, 1);
+                               }
+                       }
+               }
+               musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
+       }
+
+       /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */
+       if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
+               u32     musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC);
+
+               musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src);
+               musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
+               musb->int_tx = (musb_src & 0xffff);
+       } else {
+               musb->int_rx = 0;
+               musb->int_tx = 0;
+       }
+
+       if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff))
+               musb_interrupt(musb);
+
+       /* Acknowledge TUSB interrupts. Clear only non-reserved bits */
+       musb_writel(tbase, TUSB_INT_SRC_CLEAR,
+               int_src & ~TUSB_INT_MASK_RESERVED_BITS);
+
+       musb_platform_try_idle(musb, idle_timeout);
+
+       musb_writel(tbase, TUSB_INT_MASK, int_mask);
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+static int dma_off;
+
+/*
+ * Enables TUSB6010. Caller must take care of locking.
+ * REVISIT:
+ * - Check what is unnecessary in MGC_HdrcStart()
+ */
+void musb_platform_enable(struct musb *musb)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+
+       /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
+        * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
+       musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
+
+       /* Setup TUSB interrupt, disable DMA and GPIO interrupts */
+       musb_writel(tbase, TUSB_USBIP_INT_MASK, 0);
+       musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+       musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+       /* Clear all subsystem interrups */
+       musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
+       musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff);
+       musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff);
+
+       /* Acknowledge pending interrupt(s) */
+       musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS);
+
+       /* Only 0 clock cycles for minimum interrupt de-assertion time and
+        * interrupt polarity active low seems to work reliably here */
+       musb_writel(tbase, TUSB_INT_CTRL_CONF,
+                       TUSB_INT_CTRL_CONF_INT_RELCYC(0));
+
+       set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
+
+       /* maybe force into the Default-A OTG state machine */
+       if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
+                       & TUSB_DEV_OTG_STAT_ID_STATUS))
+               musb_writel(tbase, TUSB_INT_SRC_SET,
+                               TUSB_INT_SRC_ID_STATUS_CHNG);
+
+       if (is_dma_capable() && dma_off)
+               printk(KERN_WARNING "%s %s: dma not reactivated\n",
+                               __FILE__, __func__);
+       else
+               dma_off = 1;
+}
+
+/*
+ * Disables TUSB6010. Caller must take care of locking.
+ */
+void musb_platform_disable(struct musb *musb)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+
+       /* FIXME stop DMA, IRQs, timers, ... */
+
+       /* disable all IRQs */
+       musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+       musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff);
+       musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+       musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+       del_timer(&musb_idle_timer);
+
+       if (is_dma_capable() && !dma_off) {
+               printk(KERN_WARNING "%s %s: dma still active\n",
+                               __FILE__, __func__);
+               dma_off = 1;
+       }
+}
+
+/*
+ * Sets up TUSB6010 CPU interface specific signals and registers
+ * Note: Settings optimized for OMAP24xx
+ */
+static void __init tusb_setup_cpu_interface(struct musb *musb)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+
+       /*
+        * Disable GPIO[5:0] pullups (used as output DMA requests)
+        * Don't disable GPIO[7:6] as they are needed for wake-up.
+        */
+       musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F);
+
+       /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
+       musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
+
+       /* Turn GPIO[5:0] to DMAREQ[5:0] signals */
+       musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
+
+       /* Burst size 16x16 bits, all six DMA requests enabled, DMA request
+        * de-assertion time 2 system clocks p 62 */
+       musb_writel(tbase, TUSB_DMA_REQ_CONF,
+               TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
+               TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
+               TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+       /* Set 0 wait count for synchronous burst access */
+       musb_writel(tbase, TUSB_WAIT_COUNT, 1);
+}
+
+static int __init tusb_start(struct musb *musb)
+{
+       void __iomem    *tbase = musb->ctrl_base;
+       int             ret = 0;
+       unsigned long   flags;
+       u32             reg;
+
+       if (musb->board_set_power)
+               ret = musb->board_set_power(1);
+       if (ret != 0) {
+               printk(KERN_ERR "tusb: Cannot enable TUSB6010\n");
+               return ret;
+       }
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       if (musb_readl(tbase, TUSB_PROD_TEST_RESET) !=
+               TUSB_PROD_TEST_RESET_VAL) {
+               printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
+               goto err;
+       }
+
+       ret = tusb_print_revision(musb);
+       if (ret < 2) {
+               printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
+                               ret);
+               goto err;
+       }
+
+       /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
+        * NOR FLASH interface is used */
+       musb_writel(tbase, TUSB_VLYNQ_CTRL, 8);
+
+       /* Select PHY free running 60MHz as a system clock */
+       tusb_set_clock_source(musb, 1);
+
+       /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
+        * power saving, enable VBus detect and session end comparators,
+        * enable IDpullup, enable VBus charging */
+       musb_writel(tbase, TUSB_PRCM_MNGMT,
+               TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
+               TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
+               TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
+               TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
+               TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
+       tusb_setup_cpu_interface(musb);
+
+       /* simplify:  always sense/pullup ID pins, as if in OTG mode */
+       reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+       reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+       musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg);
+
+       reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+       reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+       musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg);
+
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       return 0;
+
+err:
+       spin_unlock_irqrestore(&musb->lock, flags);
+
+       if (musb->board_set_power)
+               musb->board_set_power(0);
+
+       return -ENODEV;
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+       struct platform_device  *pdev;
+       struct resource         *mem;
+       void __iomem            *sync;
+       int                     ret;
+
+       pdev = to_platform_device(musb->controller);
+
+       /* dma address for async dma */
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       musb->async = mem->start;
+
+       /* dma address for sync dma */
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!mem) {
+               pr_debug("no sync dma resource?\n");
+               return -ENODEV;
+       }
+       musb->sync = mem->start;
+
+       sync = ioremap(mem->start, mem->end - mem->start + 1);
+       if (!sync) {
+               pr_debug("ioremap for sync failed\n");
+               return -ENOMEM;
+       }
+       musb->sync_va = sync;
+
+       /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
+        * FIFOs at 0x600, TUSB at 0x800
+        */
+       musb->mregs += TUSB_BASE_OFFSET;
+
+       ret = tusb_start(musb);
+       if (ret) {
+               printk(KERN_ERR "Could not start tusb6010 (%d)\n",
+                               ret);
+               return -ENODEV;
+       }
+       musb->isr = tusb_interrupt;
+
+       if (is_host_enabled(musb))
+               musb->board_set_vbus = tusb_source_power;
+       if (is_peripheral_enabled(musb))
+               musb->xceiv.set_power = tusb_draw_power;
+
+       setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+       return ret;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+       del_timer_sync(&musb_idle_timer);
+
+       if (musb->board_set_power)
+               musb->board_set_power(0);
+
+       iounmap(musb->sync_va);
+
+       return 0;
+}
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
new file mode 100644 (file)
index 0000000..db6dad0
--- /dev/null
@@ -0,0 +1,402 @@
+/*
+ * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TUSB6010_H__
+#define __TUSB6010_H__
+
+extern u8 tusb_get_revision(struct musb *musb);
+
+#ifdef CONFIG_USB_TUSB6010
+#define musb_in_tusb()                 1
+#else
+#define musb_in_tusb()                 0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap()                        1
+#else
+#define tusb_dma_omap()                        0
+#endif
+
+/* VLYNQ control register. 32-bit at offset 0x000 */
+#define TUSB_VLYNQ_CTRL                        0x004
+
+/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */
+#define TUSB_BASE_OFFSET               0x400
+
+/* FIFO registers 32-bit at offset 0x600 */
+#define TUSB_FIFO_BASE                 0x600
+
+/* Device System & Control registers. 32-bit at offset 0x800 */
+#define TUSB_SYS_REG_BASE              0x800
+
+#define TUSB_DEV_CONF                  (TUSB_SYS_REG_BASE + 0x000)
+#define                TUSB_DEV_CONF_USB_HOST_MODE             (1 << 16)
+#define                TUSB_DEV_CONF_PROD_TEST_MODE            (1 << 15)
+#define                TUSB_DEV_CONF_SOFT_ID                   (1 << 1)
+#define                TUSB_DEV_CONF_ID_SEL                    (1 << 0)
+
+#define TUSB_PHY_OTG_CTRL_ENABLE       (TUSB_SYS_REG_BASE + 0x004)
+#define TUSB_PHY_OTG_CTRL              (TUSB_SYS_REG_BASE + 0x008)
+#define                TUSB_PHY_OTG_CTRL_WRPROTECT             (0xa5 << 24)
+#define                TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP         (1 << 23)
+#define                TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN       (1 << 19)
+#define                TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN       (1 << 18)
+#define                TUSB_PHY_OTG_CTRL_TESTM2                (1 << 17)
+#define                TUSB_PHY_OTG_CTRL_TESTM1                (1 << 16)
+#define                TUSB_PHY_OTG_CTRL_TESTM0                (1 << 15)
+#define                TUSB_PHY_OTG_CTRL_TX_DATA2              (1 << 14)
+#define                TUSB_PHY_OTG_CTRL_TX_GZ2                (1 << 13)
+#define                TUSB_PHY_OTG_CTRL_TX_ENABLE2            (1 << 12)
+#define                TUSB_PHY_OTG_CTRL_DM_PULLDOWN           (1 << 11)
+#define                TUSB_PHY_OTG_CTRL_DP_PULLDOWN           (1 << 10)
+#define                TUSB_PHY_OTG_CTRL_OSC_EN                (1 << 9)
+#define                TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v)      (((v) & 3) << 7)
+#define                TUSB_PHY_OTG_CTRL_PD                    (1 << 6)
+#define                TUSB_PHY_OTG_CTRL_PLL_ON                (1 << 5)
+#define                TUSB_PHY_OTG_CTRL_EXT_RPU               (1 << 4)
+#define                TUSB_PHY_OTG_CTRL_PWR_GOOD              (1 << 3)
+#define                TUSB_PHY_OTG_CTRL_RESET                 (1 << 2)
+#define                TUSB_PHY_OTG_CTRL_SUSPENDM              (1 << 1)
+#define                TUSB_PHY_OTG_CTRL_CLK_MODE              (1 << 0)
+
+/*OTG status register */
+#define TUSB_DEV_OTG_STAT              (TUSB_SYS_REG_BASE + 0x00c)
+#define                TUSB_DEV_OTG_STAT_PWR_CLK_GOOD          (1 << 8)
+#define                TUSB_DEV_OTG_STAT_SESS_END              (1 << 7)
+#define                TUSB_DEV_OTG_STAT_SESS_VALID            (1 << 6)
+#define                TUSB_DEV_OTG_STAT_VBUS_VALID            (1 << 5)
+#define                TUSB_DEV_OTG_STAT_VBUS_SENSE            (1 << 4)
+#define                TUSB_DEV_OTG_STAT_ID_STATUS             (1 << 3)
+#define                TUSB_DEV_OTG_STAT_HOST_DISCON           (1 << 2)
+#define                TUSB_DEV_OTG_STAT_LINE_STATE            (3 << 0)
+#define                TUSB_DEV_OTG_STAT_DP_ENABLE             (1 << 1)
+#define                TUSB_DEV_OTG_STAT_DM_ENABLE             (1 << 0)
+
+#define TUSB_DEV_OTG_TIMER             (TUSB_SYS_REG_BASE + 0x010)
+#      define TUSB_DEV_OTG_TIMER_ENABLE                (1 << 31)
+#      define TUSB_DEV_OTG_TIMER_VAL(v)                ((v) & 0x07ffffff)
+#define TUSB_PRCM_REV                  (TUSB_SYS_REG_BASE + 0x014)
+
+/* PRCM configuration register */
+#define TUSB_PRCM_CONF                 (TUSB_SYS_REG_BASE + 0x018)
+#define                TUSB_PRCM_CONF_SFW_CPEN         (1 << 24)
+#define                TUSB_PRCM_CONF_SYS_CLKSEL(v)    (((v) & 3) << 16)
+
+/* PRCM management register */
+#define TUSB_PRCM_MNGMT                        (TUSB_SYS_REG_BASE + 0x01c)
+#define                TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v)        (((v) & 0xf) << 25)
+#define                TUSB_PRCM_MNGMT_SRP_FIX_EN              (1 << 24)
+#define                TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v)     (((v) & 0xf) << 20)
+#define                TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN       (1 << 19)
+#define                TUSB_PRCM_MNGMT_DFT_CLK_DIS             (1 << 18)
+#define                TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS           (1 << 17)
+#define                TUSB_PRCM_MNGMT_OTG_SESS_END_EN         (1 << 10)
+#define                TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN         (1 << 9)
+#define                TUSB_PRCM_MNGMT_OTG_ID_PULLUP           (1 << 8)
+#define                TUSB_PRCM_MNGMT_15_SW_EN                (1 << 4)
+#define                TUSB_PRCM_MNGMT_33_SW_EN                (1 << 3)
+#define                TUSB_PRCM_MNGMT_5V_CPEN                 (1 << 2)
+#define                TUSB_PRCM_MNGMT_PM_IDLE                 (1 << 1)
+#define                TUSB_PRCM_MNGMT_DEV_IDLE                (1 << 0)
+
+/* Wake-up source clear and mask registers */
+#define TUSB_PRCM_WAKEUP_SOURCE                (TUSB_SYS_REG_BASE + 0x020)
+#define TUSB_PRCM_WAKEUP_CLEAR         (TUSB_SYS_REG_BASE + 0x028)
+#define TUSB_PRCM_WAKEUP_MASK          (TUSB_SYS_REG_BASE + 0x02c)
+#define                TUSB_PRCM_WAKEUP_RESERVED_BITS  (0xffffe << 13)
+#define                TUSB_PRCM_WGPIO_7       (1 << 12)
+#define                TUSB_PRCM_WGPIO_6       (1 << 11)
+#define                TUSB_PRCM_WGPIO_5       (1 << 10)
+#define                TUSB_PRCM_WGPIO_4       (1 << 9)
+#define                TUSB_PRCM_WGPIO_3       (1 << 8)
+#define                TUSB_PRCM_WGPIO_2       (1 << 7)
+#define                TUSB_PRCM_WGPIO_1       (1 << 6)
+#define                TUSB_PRCM_WGPIO_0       (1 << 5)
+#define                TUSB_PRCM_WHOSTDISCON   (1 << 4)        /* Host disconnect */
+#define                TUSB_PRCM_WBUS          (1 << 3)        /* USB bus resume */
+#define                TUSB_PRCM_WNORCS        (1 << 2)        /* NOR chip select */
+#define                TUSB_PRCM_WVBUS         (1 << 1)        /* OTG PHY VBUS */
+#define                TUSB_PRCM_WID           (1 << 0)        /* OTG PHY ID detect */
+
+#define TUSB_PULLUP_1_CTRL             (TUSB_SYS_REG_BASE + 0x030)
+#define TUSB_PULLUP_2_CTRL             (TUSB_SYS_REG_BASE + 0x034)
+#define TUSB_INT_CTRL_REV              (TUSB_SYS_REG_BASE + 0x038)
+#define TUSB_INT_CTRL_CONF             (TUSB_SYS_REG_BASE + 0x03c)
+#define TUSB_USBIP_INT_SRC             (TUSB_SYS_REG_BASE + 0x040)
+#define TUSB_USBIP_INT_SET             (TUSB_SYS_REG_BASE + 0x044)
+#define TUSB_USBIP_INT_CLEAR           (TUSB_SYS_REG_BASE + 0x048)
+#define TUSB_USBIP_INT_MASK            (TUSB_SYS_REG_BASE + 0x04c)
+#define TUSB_DMA_INT_SRC               (TUSB_SYS_REG_BASE + 0x050)
+#define TUSB_DMA_INT_SET               (TUSB_SYS_REG_BASE + 0x054)
+#define TUSB_DMA_INT_CLEAR             (TUSB_SYS_REG_BASE + 0x058)
+#define TUSB_DMA_INT_MASK              (TUSB_SYS_REG_BASE + 0x05c)
+#define TUSB_GPIO_INT_SRC              (TUSB_SYS_REG_BASE + 0x060)
+#define TUSB_GPIO_INT_SET              (TUSB_SYS_REG_BASE + 0x064)
+#define TUSB_GPIO_INT_CLEAR            (TUSB_SYS_REG_BASE + 0x068)
+#define TUSB_GPIO_INT_MASK             (TUSB_SYS_REG_BASE + 0x06c)
+
+/* NOR flash interrupt source registers */
+#define TUSB_INT_SRC                   (TUSB_SYS_REG_BASE + 0x070)
+#define TUSB_INT_SRC_SET               (TUSB_SYS_REG_BASE + 0x074)
+#define TUSB_INT_SRC_CLEAR             (TUSB_SYS_REG_BASE + 0x078)
+#define TUSB_INT_MASK                  (TUSB_SYS_REG_BASE + 0x07c)
+#define                TUSB_INT_SRC_TXRX_DMA_DONE              (1 << 24)
+#define                TUSB_INT_SRC_USB_IP_CORE                (1 << 17)
+#define                TUSB_INT_SRC_OTG_TIMEOUT                (1 << 16)
+#define                TUSB_INT_SRC_VBUS_SENSE_CHNG            (1 << 15)
+#define                TUSB_INT_SRC_ID_STATUS_CHNG             (1 << 14)
+#define                TUSB_INT_SRC_DEV_WAKEUP                 (1 << 13)
+#define                TUSB_INT_SRC_DEV_READY                  (1 << 12)
+#define                TUSB_INT_SRC_USB_IP_TX                  (1 << 9)
+#define                TUSB_INT_SRC_USB_IP_RX                  (1 << 8)
+#define                TUSB_INT_SRC_USB_IP_VBUS_ERR            (1 << 7)
+#define                TUSB_INT_SRC_USB_IP_VBUS_REQ            (1 << 6)
+#define                TUSB_INT_SRC_USB_IP_DISCON              (1 << 5)
+#define                TUSB_INT_SRC_USB_IP_CONN                (1 << 4)
+#define                TUSB_INT_SRC_USB_IP_SOF                 (1 << 3)
+#define                TUSB_INT_SRC_USB_IP_RST_BABBLE          (1 << 2)
+#define                TUSB_INT_SRC_USB_IP_RESUME              (1 << 1)
+#define                TUSB_INT_SRC_USB_IP_SUSPEND             (1 << 0)
+
+/* NOR flash interrupt registers reserved bits. Must be written as 0 */
+#define                TUSB_INT_MASK_RESERVED_17               (0x3fff << 17)
+#define                TUSB_INT_MASK_RESERVED_13               (1 << 13)
+#define                TUSB_INT_MASK_RESERVED_8                (0xf << 8)
+#define                TUSB_INT_SRC_RESERVED_26                (0x1f << 26)
+#define                TUSB_INT_SRC_RESERVED_18                (0x3f << 18)
+#define                TUSB_INT_SRC_RESERVED_10                (0x03 << 10)
+
+/* Reserved bits for NOR flash interrupt mask and clear register */
+#define                TUSB_INT_MASK_RESERVED_BITS     (TUSB_INT_MASK_RESERVED_17 | \
+                                               TUSB_INT_MASK_RESERVED_13 | \
+                                               TUSB_INT_MASK_RESERVED_8)
+
+/* Reserved bits for NOR flash interrupt status register */
+#define                TUSB_INT_SRC_RESERVED_BITS      (TUSB_INT_SRC_RESERVED_26 | \
+                                               TUSB_INT_SRC_RESERVED_18 | \
+                                               TUSB_INT_SRC_RESERVED_10)
+
+#define TUSB_GPIO_REV                  (TUSB_SYS_REG_BASE + 0x080)
+#define TUSB_GPIO_CONF                 (TUSB_SYS_REG_BASE + 0x084)
+#define TUSB_DMA_CTRL_REV              (TUSB_SYS_REG_BASE + 0x100)
+#define TUSB_DMA_REQ_CONF              (TUSB_SYS_REG_BASE + 0x104)
+#define TUSB_EP0_CONF                  (TUSB_SYS_REG_BASE + 0x108)
+#define TUSB_DMA_EP_MAP                        (TUSB_SYS_REG_BASE + 0x148)
+
+/* Offsets from each ep base register */
+#define TUSB_EP_TX_OFFSET              0x10c   /* EP_IN in docs */
+#define TUSB_EP_RX_OFFSET              0x14c   /* EP_OUT in docs */
+#define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188
+
+#define TUSB_WAIT_COUNT                        (TUSB_SYS_REG_BASE + 0x1c8)
+#define TUSB_SCRATCH_PAD               (TUSB_SYS_REG_BASE + 0x1c4)
+#define TUSB_PROD_TEST_RESET           (TUSB_SYS_REG_BASE + 0x1d8)
+
+/* Device System & Control register bitfields */
+#define TUSB_INT_CTRL_CONF_INT_RELCYC(v)       (((v) & 0x7) << 18)
+#define TUSB_INT_CTRL_CONF_INT_POLARITY                (1 << 17)
+#define TUSB_INT_CTRL_CONF_INT_MODE            (1 << 16)
+#define TUSB_GPIO_CONF_DMAREQ(v)               (((v) & 0x3f) << 24)
+#define TUSB_DMA_REQ_CONF_BURST_SIZE(v)                (((v) & 3) << 26)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v)                (((v) & 0x3f) << 20)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v)     (((v) & 0xf) << 16)
+#define TUSB_EP0_CONFIG_SW_EN                  (1 << 8)
+#define TUSB_EP0_CONFIG_DIR_TX                 (1 << 7)
+#define TUSB_EP0_CONFIG_XFR_SIZE(v)            ((v) & 0x7f)
+#define TUSB_EP_CONFIG_SW_EN                   (1 << 31)
+#define TUSB_EP_CONFIG_XFR_SIZE(v)             ((v) & 0x7fffffff)
+#define TUSB_PROD_TEST_RESET_VAL               0xa596
+#define TUSB_EP_FIFO(ep)                       (TUSB_FIFO_BASE + (ep) * 0x20)
+
+#define TUSB_DIDR1_LO                          (TUSB_SYS_REG_BASE + 0x1f8)
+#define TUSB_DIDR1_HI                          (TUSB_SYS_REG_BASE + 0x1fc)
+#define                TUSB_DIDR1_HI_CHIP_REV(v)               (((v) >> 17) & 0xf)
+#define                        TUSB_DIDR1_HI_REV_20            0
+#define                        TUSB_DIDR1_HI_REV_30            1
+#define                        TUSB_DIDR1_HI_REV_31            2
+
+#define TUSB_REV_10    0x10
+#define TUSB_REV_20    0x20
+#define TUSB_REV_30    0x30
+#define TUSB_REV_31    0x31
+
+/*----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_TUSB6010
+
+/* configuration parameters specific to this silicon */
+
+/* Number of Tx endpoints. Legal values are 1 - 16 (this value includes EP0) */
+#define MUSB_C_NUM_EPT 16
+
+/* Number of Rx endpoints. Legal values are 1 - 16 (this value includes EP0) */
+#define MUSB_C_NUM_EPR 16
+
+/* Endpoint 1 to 15 direction types. C_EP1_DEF is defined if either Tx endpoint
+ * 1 or Rx endpoint 1 are used.
+ */
+#define MUSB_C_EP1_DEF
+
+/* C_EP1_TX_DEF is defined if Tx endpoint 1 is used */
+#define MUSB_C_EP1_TX_DEF
+
+/* C_EP1_RX_DEF is defined if Rx endpoint 1 is used */
+#define MUSB_C_EP1_RX_DEF
+
+/* C_EP1_TOR_DEF is defined if Tx endpoint 1 and Rx endpoint 1 share a FIFO */
+/* #define C_EP1_TOR_DEF */
+
+/* C_EP1_TAR_DEF is defined if both Tx endpoint 1 and Rx endpoint 1 are used
+ * and do not share a FIFO.
+ */
+#define MUSB_C_EP1_TAR_DEF
+
+/* Similarly for all other used endpoints */
+#define MUSB_C_EP2_DEF
+#define MUSB_C_EP2_TX_DEF
+#define MUSB_C_EP2_RX_DEF
+#define MUSB_C_EP2_TAR_DEF
+#define MUSB_C_EP3_DEF
+#define MUSB_C_EP3_TX_DEF
+#define MUSB_C_EP3_RX_DEF
+#define MUSB_C_EP3_TAR_DEF
+#define MUSB_C_EP4_DEF
+#define MUSB_C_EP4_TX_DEF
+#define MUSB_C_EP4_RX_DEF
+#define MUSB_C_EP4_TAR_DEF
+
+/* Endpoint 1 to 15 FIFO address bits. Legal values are 3 to 13 - corresponding
+ * to FIFO sizes of 8 to 8192 bytes. If an Tx endpoint shares a FIFO with an Rx
+ * endpoint then the Rx FIFO size must be the same as the Tx FIFO size. All
+ * endpoints 1 to 15 must be defined, unused endpoints should be set to 2.
+ */
+#define MUSB_C_EP1T_BITS 5
+#define MUSB_C_EP1R_BITS 5
+#define MUSB_C_EP2T_BITS 5
+#define MUSB_C_EP2R_BITS 5
+#define MUSB_C_EP3T_BITS 3
+#define MUSB_C_EP3R_BITS 3
+#define MUSB_C_EP4T_BITS 3
+#define MUSB_C_EP4R_BITS 3
+
+#define MUSB_C_EP5T_BITS 2
+#define MUSB_C_EP5R_BITS 2
+#define MUSB_C_EP6T_BITS 2
+#define MUSB_C_EP6R_BITS 2
+#define MUSB_C_EP7T_BITS 2
+#define MUSB_C_EP7R_BITS 2
+#define MUSB_C_EP8T_BITS 2
+#define MUSB_C_EP8R_BITS 2
+#define MUSB_C_EP9T_BITS 2
+#define MUSB_C_EP9R_BITS 2
+#define MUSB_C_EP10T_BITS 2
+#define MUSB_C_EP10R_BITS 2
+#define MUSB_C_EP11T_BITS 2
+#define MUSB_C_EP11R_BITS 2
+#define MUSB_C_EP12T_BITS 2
+#define MUSB_C_EP12R_BITS 2
+#define MUSB_C_EP13T_BITS 2
+#define MUSB_C_EP13R_BITS 2
+#define MUSB_C_EP14T_BITS 2
+#define MUSB_C_EP14R_BITS 2
+#define MUSB_C_EP15T_BITS 2
+#define MUSB_C_EP15R_BITS 2
+
+/* Define the following constant if the USB2.0 Transceiver Macrocell data width
+ * is 16-bits.
+ */
+/* #define C_UTM_16 */
+
+/* Define this constant if the CPU uses big-endian byte ordering. */
+/* #define C_BIGEND */
+
+/* Define the following constant if any Tx endpoint is required to support
+ * multiple bulk packets.
+ */
+/* #define C_MP_TX */
+
+/* Define the following constant if any Rx endpoint is required to support
+ * multiple bulk packets.
+ */
+/* #define C_MP_RX */
+
+/* Define the following constant if any Tx endpoint is required to support high
+ * bandwidth ISO.
+ */
+/* #define C_HB_TX */
+
+/* Define the following constant if any Rx endpoint is required to support high
+ * bandwidth ISO.
+ */
+/* #define C_HB_RX */
+
+/* Define the following constant if software connect/disconnect control is
+ * required.
+ */
+#define MUSB_C_SOFT_CON
+
+/* Define the following constant if Vendor Control Registers are required. */
+/* #define C_VEND_REG */
+
+/* Vendor control register widths. */
+#define MUSB_C_VCTL_BITS 4
+#define MUSB_C_VSTAT_BITS 8
+
+/* Define the following constant to include a DMA controller. */
+/* #define C_DMA */
+
+/* Define the following constant if 2 or more DMA channels are required. */
+/* #define C_DMA2 */
+
+/* Define the following constant if 3 or more DMA channels are required. */
+/* #define C_DMA3 */
+
+/* Define the following constant if 4 or more DMA channels are required. */
+/* #define C_DMA4 */
+
+/* Define the following constant if 5 or more DMA channels are required. */
+/* #define C_DMA5 */
+
+/* Define the following constant if 6 or more DMA channels are required. */
+/* #define C_DMA6 */
+
+/* Define the following constant if 7 or more DMA channels are required. */
+/* #define C_DMA7 */
+
+/* Define the following constant if 8 or more DMA channels are required. */
+/* #define C_DMA8 */
+
+/* Enable Dynamic FIFO Sizing */
+#define MUSB_C_DYNFIFO_DEF
+
+/* Derived constants. The following constants are derived from the previous
+ * configuration constants
+ */
+
+/* Total number of endpoints. Legal values are 2 - 16. This must be equal to
+ * the larger of C_NUM_EPT, C_NUM_EPR
+ */
+/* #define MUSB_C_NUM_EPS 5 */
+
+/* C_EPMAX_BITS is equal to the largest endpoint FIFO word address bits */
+#define MUSB_C_EPMAX_BITS 11
+
+/* C_RAM_BITS is the number of address bits required to address the RAM (32-bit
+ * addresses).  It is defined as log2 of the sum of 2** of all the endpoint FIFO
+ * dword address bits (rounded up).
+ */
+#define MUSB_C_RAM_BITS 12
+
+#endif /* CONFIG_USB_TUSB6010 */
+
+#endif /* __TUSB6010_H__ */
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
new file mode 100644 (file)
index 0000000..52f7f29
--- /dev/null
@@ -0,0 +1,719 @@
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/mux.h>
+
+#include "musb_core.h"
+
+#define to_chdat(c)            ((struct tusb_omap_dma_ch *)(c)->private_data)
+
+#define MAX_DMAREQ             5       /* REVISIT: Really 6, but req5 not OK */
+
+struct tusb_omap_dma_ch {
+       struct musb             *musb;
+       void __iomem            *tbase;
+       unsigned long           phys_offset;
+       int                     epnum;
+       u8                      tx;
+       struct musb_hw_ep       *hw_ep;
+
+       int                     ch;
+       s8                      dmareq;
+       s8                      sync_dev;
+
+       struct tusb_omap_dma    *tusb_dma;
+
+       void __iomem            *dma_addr;
+
+       u32                     len;
+       u16                     packet_sz;
+       u16                     transfer_packet_sz;
+       u32                     transfer_len;
+       u32                     completed_len;
+};
+
+struct tusb_omap_dma {
+       struct dma_controller           controller;
+       struct musb                     *musb;
+       void __iomem                    *tbase;
+
+       int                             ch;
+       s8                              dmareq;
+       s8                              sync_dev;
+       unsigned                        multichannel:1;
+};
+
+static int tusb_omap_dma_start(struct dma_controller *c)
+{
+       struct tusb_omap_dma    *tusb_dma;
+
+       tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+       /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
+
+       return 0;
+}
+
+static int tusb_omap_dma_stop(struct dma_controller *c)
+{
+       struct tusb_omap_dma    *tusb_dma;
+
+       tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+       /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
+
+       return 0;
+}
+
+/*
+ * Allocate dmareq0 to the current channel unless it's already taken
+ */
+static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+       u32             reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+       if (reg != 0) {
+               DBG(3, "ep%i dmareq0 is busy for ep%i\n",
+                       chdat->epnum, reg & 0xf);
+               return -EAGAIN;
+       }
+
+       if (chdat->tx)
+               reg = (1 << 4) | chdat->epnum;
+       else
+               reg = chdat->epnum;
+
+       musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+       return 0;
+}
+
+static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+       u32             reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+       if ((reg & 0xf) != chdat->epnum) {
+               printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
+                       chdat->epnum, reg & 0xf);
+               return;
+       }
+       musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
+}
+
+/*
+ * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
+ * musb_gadget.c.
+ */
+static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
+{
+       struct dma_channel      *channel = (struct dma_channel *)data;
+       struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+       struct tusb_omap_dma    *tusb_dma = chdat->tusb_dma;
+       struct musb             *musb = chdat->musb;
+       struct musb_hw_ep       *hw_ep = chdat->hw_ep;
+       void __iomem            *ep_conf = hw_ep->conf;
+       void __iomem            *mbase = musb->mregs;
+       unsigned long           remaining, flags, pio;
+       int                     ch;
+
+       spin_lock_irqsave(&musb->lock, flags);
+
+       if (tusb_dma->multichannel)
+               ch = chdat->ch;
+       else
+               ch = tusb_dma->ch;
+
+       if (ch_status != OMAP_DMA_BLOCK_IRQ)
+               printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);
+
+       DBG(3, "ep%i %s dma callback ch: %i status: %x\n",
+               chdat->epnum, chdat->tx ? "tx" : "rx",
+               ch, ch_status);
+
+       if (chdat->tx)
+               remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+       else
+               remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+       remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
+
+       /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
+       if (unlikely(remaining > chdat->transfer_len)) {
+               DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
+                       chdat->tx ? "tx" : "rx", chdat->ch,
+                       remaining);
+               remaining = 0;
+       }
+
+       channel->actual_len = chdat->transfer_len - remaining;
+       pio = chdat->len - channel->actual_len;
+
+       DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
+
+       /* Transfer remaining 1 - 31 bytes */
+       if (pio > 0 && pio < 32) {
+               u8      *buf;
+
+               DBG(3, "Using PIO for remaining %lu bytes\n", pio);
+               buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
+               if (chdat->tx) {
+                       dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
+                                       chdat->transfer_len, DMA_TO_DEVICE);
+                       musb_write_fifo(hw_ep, pio, buf);
+               } else {
+                       musb_read_fifo(hw_ep, pio, buf);
+                       dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
+                                       chdat->transfer_len, DMA_FROM_DEVICE);
+               }
+               channel->actual_len += pio;
+       }
+
+       if (!tusb_dma->multichannel)
+               tusb_omap_free_shared_dmareq(chdat);
+
+       channel->status = MUSB_DMA_STATUS_FREE;
+
+       /* Handle only RX callbacks here. TX callbacks must be handled based
+        * on the TUSB DMA status interrupt.
+        * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback
+        * interrupt for RX and TX.
+        */
+       if (!chdat->tx)
+               musb_dma_completion(musb, chdat->epnum, chdat->tx);
+
+       /* We must terminate short tx transfers manually by setting TXPKTRDY.
+        * REVISIT: This same problem may occur with other MUSB dma as well.
+        * Easy to test with g_ether by pinging the MUSB board with ping -s54.
+        */
+       if ((chdat->transfer_len < chdat->packet_sz)
+                       || (chdat->transfer_len % chdat->packet_sz != 0)) {
+               u16     csr;
+
+               if (chdat->tx) {
+                       DBG(3, "terminating short tx packet\n");
+                       musb_ep_select(mbase, chdat->epnum);
+                       csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+                       csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
+                               | MUSB_TXCSR_P_WZC_BITS;
+                       musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+               }
+       }
+
+       spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
+                               u8 rndis_mode, dma_addr_t dma_addr, u32 len)
+{
+       struct tusb_omap_dma_ch         *chdat = to_chdat(channel);
+       struct tusb_omap_dma            *tusb_dma = chdat->tusb_dma;
+       struct musb                     *musb = chdat->musb;
+       struct musb_hw_ep               *hw_ep = chdat->hw_ep;
+       void __iomem                    *mbase = musb->mregs;
+       void __iomem                    *ep_conf = hw_ep->conf;
+       dma_addr_t                      fifo = hw_ep->fifo_sync;
+       struct omap_dma_channel_params  dma_params;
+       u32                             dma_remaining;
+       int                             src_burst, dst_burst;
+       u16                             csr;
+       int                             ch;
+       s8                              dmareq;
+       s8                              sync_dev;
+
+       if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
+               return false;
+
+       /*
+        * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
+        * register which will cause missed DMA interrupt. We could try to
+        * use a timer for the callback, but it is unsafe as the XFR_SIZE
+        * register is corrupt, and we won't know if the DMA worked.
+        */
+       if (dma_addr & 0x2)
+               return false;
+
+       /*
+        * Because of HW issue #10, it seems like mixing sync DMA and async
+        * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
+        * using the channel for DMA.
+        */
+       if (chdat->tx)
+               dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+       else
+               dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+       dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
+       if (dma_remaining) {
+               DBG(2, "Busy %s dma ch%i, not using: %08x\n",
+                       chdat->tx ? "tx" : "rx", chdat->ch,
+                       dma_remaining);
+               return false;
+       }
+
+       chdat->transfer_len = len & ~0x1f;
+
+       if (len < packet_sz)
+               chdat->transfer_packet_sz = chdat->transfer_len;
+       else
+               chdat->transfer_packet_sz = packet_sz;
+
+       if (tusb_dma->multichannel) {
+               ch = chdat->ch;
+               dmareq = chdat->dmareq;
+               sync_dev = chdat->sync_dev;
+       } else {
+               if (tusb_omap_use_shared_dmareq(chdat) != 0) {
+                       DBG(3, "could not get dma for ep%i\n", chdat->epnum);
+                       return false;
+               }
+               if (tusb_dma->ch < 0) {
+                       /* REVISIT: This should get blocked earlier, happens
+                        * with MSC ErrorRecoveryTest
+                        */
+                       WARN_ON(1);
+                       return false;
+               }
+
+               ch = tusb_dma->ch;
+               dmareq = tusb_dma->dmareq;
+               sync_dev = tusb_dma->sync_dev;
+               omap_set_dma_callback(ch, tusb_omap_dma_cb, channel);
+       }
+
+       chdat->packet_sz = packet_sz;
+       chdat->len = len;
+       channel->actual_len = 0;
+       chdat->dma_addr = (void __iomem *)dma_addr;
+       channel->status = MUSB_DMA_STATUS_BUSY;
+
+       /* Since we're recycling dma areas, we need to clean or invalidate */
+       if (chdat->tx)
+               dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE);
+       else
+               dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE);
+
+       /* Use 16-bit transfer if dma_addr is not 32-bit aligned */
+       if ((dma_addr & 0x3) == 0) {
+               dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+               dma_params.elem_count = 8;              /* Elements in frame */
+       } else {
+               dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
+               dma_params.elem_count = 16;             /* Elements in frame */
+               fifo = hw_ep->fifo_async;
+       }
+
+       dma_params.frame_count  = chdat->transfer_len / 32; /* Burst sz frame */
+
+       DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
+               chdat->epnum, chdat->tx ? "tx" : "rx",
+               ch, dma_addr, chdat->transfer_len, len,
+               chdat->transfer_packet_sz, packet_sz);
+
+       /*
+        * Prepare omap DMA for transfer
+        */
+       if (chdat->tx) {
+               dma_params.src_amode    = OMAP_DMA_AMODE_POST_INC;
+               dma_params.src_start    = (unsigned long)dma_addr;
+               dma_params.src_ei       = 0;
+               dma_params.src_fi       = 0;
+
+               dma_params.dst_amode    = OMAP_DMA_AMODE_DOUBLE_IDX;
+               dma_params.dst_start    = (unsigned long)fifo;
+               dma_params.dst_ei       = 1;
+               dma_params.dst_fi       = -31;  /* Loop 32 byte window */
+
+               dma_params.trigger      = sync_dev;
+               dma_params.sync_mode    = OMAP_DMA_SYNC_FRAME;
+               dma_params.src_or_dst_synch     = 0;    /* Dest sync */
+
+               src_burst = OMAP_DMA_DATA_BURST_16;     /* 16x32 read */
+               dst_burst = OMAP_DMA_DATA_BURST_8;      /* 8x32 write */
+       } else {
+               dma_params.src_amode    = OMAP_DMA_AMODE_DOUBLE_IDX;
+               dma_params.src_start    = (unsigned long)fifo;
+               dma_params.src_ei       = 1;
+               dma_params.src_fi       = -31;  /* Loop 32 byte window */
+
+               dma_params.dst_amode    = OMAP_DMA_AMODE_POST_INC;
+               dma_params.dst_start    = (unsigned long)dma_addr;
+               dma_params.dst_ei       = 0;
+               dma_params.dst_fi       = 0;
+
+               dma_params.trigger      = sync_dev;
+               dma_params.sync_mode    = OMAP_DMA_SYNC_FRAME;
+               dma_params.src_or_dst_synch     = 1;    /* Source sync */
+
+               src_burst = OMAP_DMA_DATA_BURST_8;      /* 8x32 read */
+               dst_burst = OMAP_DMA_DATA_BURST_16;     /* 16x32 write */
+       }
+
+       DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
+               chdat->epnum, chdat->tx ? "tx" : "rx",
+               (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16,
+               ((dma_addr & 0x3) == 0) ? "sync" : "async",
+               dma_params.src_start, dma_params.dst_start);
+
+       omap_set_dma_params(ch, &dma_params);
+       omap_set_dma_src_burst_mode(ch, src_burst);
+       omap_set_dma_dest_burst_mode(ch, dst_burst);
+       omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED);
+
+       /*
+        * Prepare MUSB for DMA transfer
+        */
+       if (chdat->tx) {
+               musb_ep_select(mbase, chdat->epnum);
+               csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+               csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
+                       | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+               csr &= ~MUSB_TXCSR_P_UNDERRUN;
+               musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+       } else {
+               musb_ep_select(mbase, chdat->epnum);
+               csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+               csr |= MUSB_RXCSR_DMAENAB;
+               csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
+               musb_writew(hw_ep->regs, MUSB_RXCSR,
+                       csr | MUSB_RXCSR_P_WZC_BITS);
+       }
+
+       /*
+        * Start DMA transfer
+        */
+       omap_start_dma(ch);
+
+       if (chdat->tx) {
+               /* Send transfer_packet_sz packets at a time */
+               musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+                       chdat->transfer_packet_sz);
+
+               musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+                       TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+       } else {
+               /* Receive transfer_packet_sz packets at a time */
+               musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+                       chdat->transfer_packet_sz << 16);
+
+               musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+                       TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+       }
+
+       return true;
+}
+
+static int tusb_omap_dma_abort(struct dma_channel *channel)
+{
+       struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+       struct tusb_omap_dma    *tusb_dma = chdat->tusb_dma;
+
+       if (!tusb_dma->multichannel) {
+               if (tusb_dma->ch >= 0) {
+                       omap_stop_dma(tusb_dma->ch);
+                       omap_free_dma(tusb_dma->ch);
+                       tusb_dma->ch = -1;
+               }
+
+               tusb_dma->dmareq = -1;
+               tusb_dma->sync_dev = -1;
+       }
+
+       channel->status = MUSB_DMA_STATUS_FREE;
+
+       return 0;
+}
+
+static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+       u32             reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+       int             i, dmareq_nr = -1;
+
+       const int sync_dev[6] = {
+               OMAP24XX_DMA_EXT_DMAREQ0,
+               OMAP24XX_DMA_EXT_DMAREQ1,
+               OMAP242X_DMA_EXT_DMAREQ2,
+               OMAP242X_DMA_EXT_DMAREQ3,
+               OMAP242X_DMA_EXT_DMAREQ4,
+               OMAP242X_DMA_EXT_DMAREQ5,
+       };
+
+       for (i = 0; i < MAX_DMAREQ; i++) {
+               int cur = (reg & (0xf << (i * 5))) >> (i * 5);
+               if (cur == 0) {
+                       dmareq_nr = i;
+                       break;
+               }
+       }
+
+       if (dmareq_nr == -1)
+               return -EAGAIN;
+
+       reg |= (chdat->epnum << (dmareq_nr * 5));
+       if (chdat->tx)
+               reg |= ((1 << 4) << (dmareq_nr * 5));
+       musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+       chdat->dmareq = dmareq_nr;
+       chdat->sync_dev = sync_dev[chdat->dmareq];
+
+       return 0;
+}
+
+static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+       u32 reg;
+
+       if (!chdat || chdat->dmareq < 0)
+               return;
+
+       reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+       reg &= ~(0x1f << (chdat->dmareq * 5));
+       musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+       chdat->dmareq = -1;
+       chdat->sync_dev = -1;
+}
+
+static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
+
+static struct dma_channel *
+tusb_omap_dma_allocate(struct dma_controller *c,
+               struct musb_hw_ep *hw_ep,
+               u8 tx)
+{
+       int ret, i;
+       const char              *dev_name;
+       struct tusb_omap_dma    *tusb_dma;
+       struct musb             *musb;
+       void __iomem            *tbase;
+       struct dma_channel      *channel = NULL;
+       struct tusb_omap_dma_ch *chdat = NULL;
+       u32                     reg;
+
+       tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+       musb = tusb_dma->musb;
+       tbase = musb->ctrl_base;
+
+       reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
+       if (tx)
+               reg &= ~(1 << hw_ep->epnum);
+       else
+               reg &= ~(1 << (hw_ep->epnum + 15));
+       musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
+
+       /* REVISIT: Why does dmareq5 not work? */
+       if (hw_ep->epnum == 0) {
+               DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
+               return NULL;
+       }
+
+       for (i = 0; i < MAX_DMAREQ; i++) {
+               struct dma_channel *ch = dma_channel_pool[i];
+               if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
+                       ch->status = MUSB_DMA_STATUS_FREE;
+                       channel = ch;
+                       chdat = ch->private_data;
+                       break;
+               }
+       }
+
+       if (!channel)
+               return NULL;
+
+       if (tx) {
+               chdat->tx = 1;
+               dev_name = "TUSB transmit";
+       } else {
+               chdat->tx = 0;
+               dev_name = "TUSB receive";
+       }
+
+       chdat->musb = tusb_dma->musb;
+       chdat->tbase = tusb_dma->tbase;
+       chdat->hw_ep = hw_ep;
+       chdat->epnum = hw_ep->epnum;
+       chdat->dmareq = -1;
+       chdat->completed_len = 0;
+       chdat->tusb_dma = tusb_dma;
+
+       channel->max_len = 0x7fffffff;
+       channel->desired_mode = 0;
+       channel->actual_len = 0;
+
+       if (tusb_dma->multichannel) {
+               ret = tusb_omap_dma_allocate_dmareq(chdat);
+               if (ret != 0)
+                       goto free_dmareq;
+
+               ret = omap_request_dma(chdat->sync_dev, dev_name,
+                               tusb_omap_dma_cb, channel, &chdat->ch);
+               if (ret != 0)
+                       goto free_dmareq;
+       } else if (tusb_dma->ch == -1) {
+               tusb_dma->dmareq = 0;
+               tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0;
+
+               /* Callback data gets set later in the shared dmareq case */
+               ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared",
+                               tusb_omap_dma_cb, NULL, &tusb_dma->ch);
+               if (ret != 0)
+                       goto free_dmareq;
+
+               chdat->dmareq = -1;
+               chdat->ch = -1;
+       }
+
+       DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
+               chdat->epnum,
+               chdat->tx ? "tx" : "rx",
+               chdat->ch >= 0 ? "dedicated" : "shared",
+               chdat->ch >= 0 ? chdat->ch : tusb_dma->ch,
+               chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq,
+               chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev);
+
+       return channel;
+
+free_dmareq:
+       tusb_omap_dma_free_dmareq(chdat);
+
+       DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum);
+       channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+       return NULL;
+}
+
+static void tusb_omap_dma_release(struct dma_channel *channel)
+{
+       struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+       struct musb             *musb = chdat->musb;
+       void __iomem            *tbase = musb->ctrl_base;
+       u32                     reg;
+
+       DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch);
+
+       reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
+       if (chdat->tx)
+               reg |= (1 << chdat->epnum);
+       else
+               reg |= (1 << (chdat->epnum + 15));
+       musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
+
+       reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR);
+       if (chdat->tx)
+               reg |= (1 << chdat->epnum);
+       else
+               reg |= (1 << (chdat->epnum + 15));
+       musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg);
+
+       channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+       if (chdat->ch >= 0) {
+               omap_stop_dma(chdat->ch);
+               omap_free_dma(chdat->ch);
+               chdat->ch = -1;
+       }
+
+       if (chdat->dmareq >= 0)
+               tusb_omap_dma_free_dmareq(chdat);
+
+       channel = NULL;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+       struct tusb_omap_dma    *tusb_dma;
+       int                     i;
+
+       tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+       for (i = 0; i < MAX_DMAREQ; i++) {
+               struct dma_channel *ch = dma_channel_pool[i];
+               if (ch) {
+                       kfree(ch->private_data);
+                       kfree(ch);
+               }
+       }
+
+       if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
+               omap_free_dma(tusb_dma->ch);
+
+       kfree(tusb_dma);
+}
+
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *base)
+{
+       void __iomem            *tbase = musb->ctrl_base;
+       struct tusb_omap_dma    *tusb_dma;
+       int                     i;
+
+       /* REVISIT: Get dmareq lines used from board-*.c */
+
+       musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
+       musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
+
+       musb_writel(tbase, TUSB_DMA_REQ_CONF,
+               TUSB_DMA_REQ_CONF_BURST_SIZE(2)
+               | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
+               | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+       tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
+       if (!tusb_dma)
+               goto cleanup;
+
+       tusb_dma->musb = musb;
+       tusb_dma->tbase = musb->ctrl_base;
+
+       tusb_dma->ch = -1;
+       tusb_dma->dmareq = -1;
+       tusb_dma->sync_dev = -1;
+
+       tusb_dma->controller.start = tusb_omap_dma_start;
+       tusb_dma->controller.stop = tusb_omap_dma_stop;
+       tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
+       tusb_dma->controller.channel_release = tusb_omap_dma_release;
+       tusb_dma->controller.channel_program = tusb_omap_dma_program;
+       tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
+
+       if (tusb_get_revision(musb) >= TUSB_REV_30)
+               tusb_dma->multichannel = 1;
+
+       for (i = 0; i < MAX_DMAREQ; i++) {
+               struct dma_channel      *ch;
+               struct tusb_omap_dma_ch *chdat;
+
+               ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
+               if (!ch)
+                       goto cleanup;
+
+               dma_channel_pool[i] = ch;
+
+               chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
+               if (!chdat)
+                       goto cleanup;
+
+               ch->status = MUSB_DMA_STATUS_UNKNOWN;
+               ch->private_data = chdat;
+       }
+
+       return &tusb_dma->controller;
+
+cleanup:
+       dma_controller_destroy(&tusb_dma->controller);
+
+       return NULL;
+}
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
new file mode 100644 (file)
index 0000000..d325a0d
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * This is used to for host and peripheral modes of the driver for
+ * Inventra (Multidrop) Highspeed Dual-Role Controllers:  (M)HDRC.
+ *
+ * Board initialization should put one of these into dev->platform_data,
+ * probably on some platform_device named "musb_hdrc".  It encapsulates
+ * key configuration differences between boards.
+ */
+
+/* The USB role is defined by the connector used on the board, so long as
+ * standards are being followed.  (Developer boards sometimes won't.)
+ */
+enum musb_mode {
+       MUSB_UNDEFINED = 0,
+       MUSB_HOST,              /* A or Mini-A connector */
+       MUSB_PERIPHERAL,        /* B or Mini-B connector */
+       MUSB_OTG                /* Mini-AB connector */
+};
+
+struct clk;
+
+struct musb_hdrc_platform_data {
+       /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */
+       u8              mode;
+
+       /* for clk_get() */
+       const char      *clock;
+
+       /* (HOST or OTG) switch VBUS on/off */
+       int             (*set_vbus)(struct device *dev, int is_on);
+
+       /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */
+       u8              power;
+
+       /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */
+       u8              min_power;
+
+       /* (HOST or OTG) msec/2 after VBUS on till power good */
+       u8              potpgt;
+
+       /* TBD:  chip defaults should probably go someplace else,
+        * e.g. number of tx/rx endpoints, etc
+        */
+       unsigned        multipoint:1;
+
+       /* Power the device on or off */
+       int             (*set_power)(int state);
+
+       /* Turn device clock on or off */
+       int             (*set_clock)(struct clk *clock, int is_on);
+};
+
+
+/* TUSB 6010 support */
+
+#define        TUSB6010_OSCCLK_60      16667   /* psec/clk @ 60.0 MHz */
+#define        TUSB6010_REFCLK_24      41667   /* psec/clk @ 24.0 MHz XI */
+#define        TUSB6010_REFCLK_19      52083   /* psec/clk @ 19.2 MHz CLKIN */
+
+#ifdef CONFIG_ARCH_OMAP2
+
+extern int __init tusb6010_setup_interface(
+               struct musb_hdrc_platform_data *data,
+               unsigned ps_refclk, unsigned waitpin,
+               unsigned async_cs, unsigned sync_cs,
+               unsigned irq, unsigned dmachan);
+
+extern int tusb6010_platform_retime(unsigned is_refclk);
+
+#endif /* OMAP2 */