]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/usb/musb/musb_gadget.c
Merge omap-upstream
[linux-2.6-omap-h63xx.git] / drivers / usb / musb / musb_gadget.c
1 /******************************************************************
2  * Copyright 2005 Mentor Graphics Corporation
3  * Copyright (C) 2005-2006 by Texas Instruments
4  *
5  * This file is part of the Inventra Controller Driver for Linux.
6  *
7  * The Inventra Controller Driver for Linux is free software; you
8  * can redistribute it and/or modify it under the terms of the GNU
9  * General Public License version 2 as published by the Free Software
10  * Foundation.
11  *
12  * The Inventra Controller Driver for Linux is distributed in
13  * the hope that it will be useful, but WITHOUT ANY WARRANTY;
14  * without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
16  * License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with The Inventra Controller Driver for Linux ; if not,
20  * write to the Free Software Foundation, Inc., 59 Temple Place,
21  * Suite 330, Boston, MA  02111-1307  USA
22  *
23  * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
24  * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
25  * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
26  * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
27  * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
28  * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
29  * NON-INFRINGEMENT.  MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
30  * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
31  * GRAPHICS SUPPORT CUSTOMER.
32  ******************************************************************/
33
34 #include <linux/kernel.h>
35 #include <linux/list.h>
36 #include <linux/timer.h>
37 #include <linux/module.h>
38 #include <linux/smp.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
41 #include <linux/moduleparam.h>
42 #include <linux/stat.h>
43 #include <linux/dma-mapping.h>
44
45 #include "musbdefs.h"
46
47
48 /* MUSB PERIPHERAL status 3-mar:
49  *
50  * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
51  *   Minor glitches:
52  *
53  *     + remote wakeup to Linux hosts work, but saw USBCV failures;
54  *       in one test run (operator error?)
55  *     + endpoint halt tests -- in both usbtest and usbcv -- seem
56  *       to break when dma is enabled ... is something wrongly
57  *       clearing SENDSTALL?
58  *
59  * - Mass storage behaved ok when last tested.  Network traffic patterns
60  *   (with lots of short transfers etc) need retesting; they turn up the
61  *   worst cases of the DMA, since short packets are typical but are not
62  *   required.
63  *
64  * - TX/IN
65  *     + both pio and dma behave in with network and g_zero tests
66  *     + no cppi throughput issues other than no-hw-queueing
67  *     + failed with FLAT_REG (DaVinci)
68  *     + seems to behave with double buffering, PIO -and- CPPI
69  *     + with gadgetfs + AIO, requests got lost?
70  *
71  * - RX/OUT
72  *     + both pio and dma behave in with network and g_zero tests
73  *     + dma is slow in typical case (short_not_ok is clear)
74  *     + double buffering ok with PIO
75  *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
76  *     + request lossage observed with gadgetfs
77  *
78  * - ISO not tested ... might work, but only weakly isochronous
79  *
80  * - Gadget driver disabling of softconnect during bind() is ignored; so
81  *   drivers can't hold off host requests until userspace is ready.
82  *   (Workaround:  they can turn it off later.)
83  *
84  * - PORTABILITY (assumes PIO works):
85  *     + DaVinci, basically works with cppi dma
86  *     + OMAP 2430, ditto with mentor dma
87  *     + TUSB 6010, platform-specific dma in the works
88  */
89
90 /**************************************************************************
91 Handling completion
92 **************************************************************************/
93
94 /*
95  * Immediately complete a request.
96  *
97  * @param pRequest the request to complete
98  * @param status the status to complete the request with
99  * Context: controller locked, IRQs blocked.
100  */
101 void musb_g_giveback(
102         struct musb_ep          *ep,
103         struct usb_request      *pRequest,
104         int status)
105 __releases(ep->musb->Lock)
106 __acquires(ep->musb->Lock)
107 {
108         struct musb_request     *req;
109         struct musb             *musb;
110         int                     busy = ep->busy;
111
112         req = to_musb_request(pRequest);
113
114         list_del(&pRequest->list);
115         if (req->request.status == -EINPROGRESS)
116                 req->request.status = status;
117         musb = req->musb;
118
119         ep->busy = 1;
120         spin_unlock(&musb->Lock);
121         if (is_dma_capable()) {
122                 if (req->mapped) {
123                         dma_unmap_single(musb->controller,
124                                         req->request.dma,
125                                         req->request.length,
126                                         req->bTx
127                                                 ? DMA_TO_DEVICE
128                                                 : DMA_FROM_DEVICE);
129                         req->request.dma = DMA_ADDR_INVALID;
130                         req->mapped = 0;
131                 } else if (req->request.dma != DMA_ADDR_INVALID)
132                         dma_sync_single_for_cpu(musb->controller,
133                                         req->request.dma,
134                                         req->request.length,
135                                         req->bTx
136                                                 ? DMA_TO_DEVICE
137                                                 : DMA_FROM_DEVICE);
138         }
139         if (pRequest->status == 0)
140                 DBG(5, "%s done request %p,  %d/%d\n",
141                                 ep->end_point.name, pRequest,
142                                 req->request.actual, req->request.length);
143         else
144                 DBG(2, "%s request %p, %d/%d fault %d\n",
145                                 ep->end_point.name, pRequest,
146                                 req->request.actual, req->request.length,
147                                 pRequest->status);
148         req->request.complete(&req->ep->end_point, &req->request);
149         spin_lock(&musb->Lock);
150         ep->busy = busy;
151 }
152
153 /* ----------------------------------------------------------------------- */
154
155 /*
156  * Abort requests queued to an endpoint using the status. Synchronous.
157  * caller locked controller and blocked irqs, and selected this ep.
158  */
159 static void nuke(struct musb_ep *ep, const int status)
160 {
161         struct musb_request     *req = NULL;
162         void __iomem *epio = ep->pThis->aLocalEnd[ep->bEndNumber].regs;
163
164         ep->busy = 1;
165
166         if (is_dma_capable() && ep->dma) {
167                 struct dma_controller   *c = ep->pThis->pDmaController;
168                 int value;
169                 if (ep->is_in) {
170                         musb_writew(epio, MGC_O_HDRC_TXCSR,
171                                         0 | MGC_M_TXCSR_FLUSHFIFO);
172                         musb_writew(epio, MGC_O_HDRC_TXCSR,
173                                         0 | MGC_M_TXCSR_FLUSHFIFO);
174                 } else {
175                         musb_writew(epio, MGC_O_HDRC_RXCSR,
176                                         0 | MGC_M_RXCSR_FLUSHFIFO);
177                         musb_writew(epio, MGC_O_HDRC_RXCSR,
178                                         0 | MGC_M_RXCSR_FLUSHFIFO);
179                 }
180
181                 value = c->channel_abort(ep->dma);
182                 DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
183                 c->channel_release(ep->dma);
184                 ep->dma = NULL;
185         }
186
187         while (!list_empty(&(ep->req_list))) {
188                 req = container_of(ep->req_list.next, struct musb_request,
189                                 request.list);
190                 musb_g_giveback(ep, &req->request, status);
191         }
192 }
193
194 /**************************************************************************
195  * TX/IN and RX/OUT Data transfers
196  **************************************************************************/
197
198 /*
199  * This assumes the separate CPPI engine is responding to DMA requests
200  * from the usb core ... sequenced a bit differently from mentor dma.
201  */
202
203 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
204 {
205         if (can_bulk_split(musb, ep->type))
206                 return ep->hw_ep->wMaxPacketSizeTx;
207         else
208                 return ep->wPacketSize;
209 }
210
211
212 #ifdef CONFIG_USB_INVENTRA_DMA
213
214 /* Peripheral tx (IN) using Mentor DMA works as follows:
215         Only mode 0 is used for transfers <= wPktSize,
216         mode 1 is used for larger transfers,
217
218         One of the following happens:
219         - Host sends IN token which causes an endpoint interrupt
220                 -> TxAvail
221                         -> if DMA is currently busy, exit.
222                         -> if queue is non-empty, txstate().
223
224         - Request is queued by the gadget driver.
225                 -> if queue was previously empty, txstate()
226
227         txstate()
228                 -> start
229                   /\    -> setup DMA
230                   |     (data is transferred to the FIFO, then sent out when
231                   |     IN token(s) are recd from Host.
232                   |             -> DMA interrupt on completion
233                   |                calls TxAvail.
234                   |                   -> stop DMA, ~DmaEenab,
235                   |                   -> set TxPktRdy for last short pkt or zlp
236                   |                   -> Complete Request
237                   |                   -> Continue next request (call txstate)
238                   |___________________________________|
239
240  * Non-Mentor DMA engines can of course work differently, such as by
241  * upleveling from irq-per-packet to irq-per-buffer.
242  */
243
244 #endif
245
246 /*
247  * An endpoint is transmitting data. This can be called either from
248  * the IRQ routine or from ep.queue() to kickstart a request on an
249  * endpoint.
250  *
251  * Context: controller locked, IRQs blocked, endpoint selected
252  */
253 static void txstate(struct musb *musb, struct musb_request *req)
254 {
255         u8                      bEnd = req->bEnd;
256         struct musb_ep          *pEnd;
257         void __iomem            *epio = musb->aLocalEnd[bEnd].regs;
258         struct usb_request      *pRequest;
259         u16                     wFifoCount = 0, wCsrVal;
260         int                     use_dma = 0;
261
262         pEnd = req->ep;
263
264         /* we shouldn't get here while DMA is active ... but we do ... */
265         if (dma_channel_status(pEnd->dma) == MGC_DMA_STATUS_BUSY) {
266                 DBG(4, "dma pending...\n");
267                 return;
268         }
269
270         /* read TXCSR before */
271         wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
272
273         pRequest = &req->request;
274         wFifoCount = min(max_ep_writesize(musb, pEnd),
275                         (int)(pRequest->length - pRequest->actual));
276
277         if (wCsrVal & MGC_M_TXCSR_TXPKTRDY) {
278                 DBG(5, "%s old packet still ready , txcsr %03x\n",
279                                 pEnd->end_point.name, wCsrVal);
280                 return;
281         }
282
283         if (wCsrVal & MGC_M_TXCSR_P_SENDSTALL) {
284                 DBG(5, "%s stalling, txcsr %03x\n",
285                                 pEnd->end_point.name, wCsrVal);
286                 return;
287         }
288
289         DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
290                         bEnd, pEnd->wPacketSize, wFifoCount,
291                         wCsrVal);
292
293 #ifndef CONFIG_USB_INVENTRA_FIFO
294         if (is_dma_capable() && pEnd->dma) {
295                 struct dma_controller   *c = musb->pDmaController;
296
297                 use_dma = (pRequest->dma != DMA_ADDR_INVALID);
298
299                 /* MGC_M_TXCSR_P_ISO is still set correctly */
300
301 #ifdef CONFIG_USB_INVENTRA_DMA
302                 {
303                         size_t request_size;
304
305                         /* setup DMA, then program endpoint CSR */
306                         request_size = min(pRequest->length,
307                                                 pEnd->dma->dwMaxLength);
308                         if (request_size <= pEnd->wPacketSize)
309                                 pEnd->dma->bDesiredMode = 0;
310                         else
311                                 pEnd->dma->bDesiredMode = 1;
312
313                         use_dma = use_dma && c->channel_program(
314                                         pEnd->dma, pEnd->wPacketSize,
315                                         pEnd->dma->bDesiredMode,
316                                         pRequest->dma, request_size);
317                         if (use_dma) {
318                                 if (pEnd->dma->bDesiredMode == 0) {
319                                         /* ASSERT: DMAENAB is clear */
320                                         wCsrVal &= ~(MGC_M_TXCSR_AUTOSET |
321                                                         MGC_M_TXCSR_DMAMODE);
322                                         wCsrVal |= (MGC_M_TXCSR_DMAENAB |
323                                                         MGC_M_TXCSR_MODE);
324                                         // against programming guide
325                                 }
326                                 else
327                                         wCsrVal |= (MGC_M_TXCSR_AUTOSET
328                                                         | MGC_M_TXCSR_DMAENAB
329                                                         | MGC_M_TXCSR_DMAMODE
330                                                         | MGC_M_TXCSR_MODE);
331
332                                 wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN;
333                                 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
334                         }
335                 }
336
337 #elif defined(CONFIG_USB_TI_CPPI_DMA)
338                 /* program endpoint CSR first, then setup DMA */
339                 wCsrVal &= ~(MGC_M_TXCSR_AUTOSET
340                                 | MGC_M_TXCSR_DMAMODE
341                                 | MGC_M_TXCSR_P_UNDERRUN
342                                 | MGC_M_TXCSR_TXPKTRDY);
343                 wCsrVal |= MGC_M_TXCSR_MODE | MGC_M_TXCSR_DMAENAB;
344                 musb_writew(epio, MGC_O_HDRC_TXCSR,
345                         (MGC_M_TXCSR_P_WZC_BITS & ~MGC_M_TXCSR_P_UNDERRUN)
346                                 | wCsrVal);
347
348                 /* ensure writebuffer is empty */
349                 wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
350
351                 /* NOTE host side sets DMAENAB later than this; both are
352                  * OK since the transfer dma glue (between CPPI and Mentor
353                  * fifos) just tells CPPI it could start.  Data only moves
354                  * to the USB TX fifo when both fifos are ready.
355                  */
356
357                 /* "mode" is irrelevant here; handle terminating ZLPs like
358                  * PIO does, since the hardware RNDIS mode seems unreliable
359                  * except for the last-packet-is-already-short case.
360                  */
361                 use_dma = use_dma && c->channel_program(
362                                 pEnd->dma, pEnd->wPacketSize,
363                                 0,
364                                 pRequest->dma,
365                                 pRequest->length);
366                 if (!use_dma) {
367                         c->channel_release(pEnd->dma);
368                         pEnd->dma = NULL;
369                         /* ASSERT: DMAENAB clear */
370                         wCsrVal &= ~(MGC_M_TXCSR_DMAMODE | MGC_M_TXCSR_MODE);
371                         /* invariant: prequest->buf is non-null */
372                 }
373 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
374                 use_dma = use_dma && c->channel_program(
375                                 pEnd->dma, pEnd->wPacketSize,
376                                 pRequest->zero,
377                                 pRequest->dma,
378                                 pRequest->length);
379 #endif
380         }
381 #endif
382
383         if (!use_dma) {
384                 musb_write_fifo(pEnd->hw_ep, wFifoCount,
385                                 (u8 *) (pRequest->buf + pRequest->actual));
386                 pRequest->actual += wFifoCount;
387                 wCsrVal |= MGC_M_TXCSR_TXPKTRDY;
388                 wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN;
389                 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
390         }
391
392         /* host may already have the data when this message shows... */
393         DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
394                         pEnd->end_point.name, use_dma ? "dma" : "pio",
395                         pRequest->actual, pRequest->length,
396                         musb_readw(epio, MGC_O_HDRC_TXCSR),
397                         wFifoCount,
398                         musb_readw(epio, MGC_O_HDRC_TXMAXP));
399 }
400
401 /*
402  * FIFO state update (e.g. data ready).
403  * Called from IRQ,  with controller locked.
404  */
405 void musb_g_tx(struct musb *musb, u8 bEnd)
406 {
407         u16                     wCsrVal;
408         struct usb_request      *pRequest;
409         u8 __iomem              *pBase = musb->pRegs;
410         struct musb_ep          *pEnd = &musb->aLocalEnd[bEnd].ep_in;
411         void __iomem            *epio = musb->aLocalEnd[bEnd].regs;
412         struct dma_channel      *dma;
413
414         MGC_SelectEnd(pBase, bEnd);
415         pRequest = next_request(pEnd);
416
417         wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
418         DBG(4, "<== %s, txcsr %04x\n", pEnd->end_point.name, wCsrVal);
419
420         dma = is_dma_capable() ? pEnd->dma : NULL;
421         do {
422                 /* REVISIT for high bandwidth, MGC_M_TXCSR_P_INCOMPTX
423                  * probably rates reporting as a host error
424                  */
425                 if (wCsrVal & MGC_M_TXCSR_P_SENTSTALL) {
426                         wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
427                         wCsrVal &= ~MGC_M_TXCSR_P_SENTSTALL;
428                         musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
429                         if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
430                                 dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
431                                 musb->pDmaController->channel_abort(dma);
432                         }
433
434                         if (pRequest)
435                                 musb_g_giveback(pEnd, pRequest, -EPIPE);
436
437                         break;
438                 }
439
440                 if (wCsrVal & MGC_M_TXCSR_P_UNDERRUN) {
441                         /* we NAKed, no big deal ... little reason to care */
442                         wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
443                         wCsrVal &= ~(MGC_M_TXCSR_P_UNDERRUN
444                                         | MGC_M_TXCSR_TXPKTRDY);
445                         musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
446                         DBG(20, "underrun on ep%d, req %p\n", bEnd, pRequest);
447                 }
448
449                 if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
450                         /* SHOULD NOT HAPPEN ... has with cppi though, after
451                          * changing SENDSTALL (and other cases); harmless?
452                          */
453                         DBG(5, "%s dma still busy?\n", pEnd->end_point.name);
454                         break;
455                 }
456
457                 if (pRequest) {
458                         u8      is_dma = 0;
459
460                         if (dma && (wCsrVal & MGC_M_TXCSR_DMAENAB)) {
461                                 is_dma = 1;
462                                 wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
463                                 wCsrVal &= ~(MGC_M_TXCSR_DMAENAB
464                                                 | MGC_M_TXCSR_P_UNDERRUN
465                                                 | MGC_M_TXCSR_TXPKTRDY);
466                                 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
467                                 /* ensure writebuffer is empty */
468                                 wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
469                                 pRequest->actual += pEnd->dma->dwActualLength;
470                                 DBG(4, "TXCSR%d %04x, dma off, "
471                                                 "len %Zd, req %p\n",
472                                         bEnd, wCsrVal,
473                                         pEnd->dma->dwActualLength,
474                                         pRequest);
475                         }
476
477                         if (is_dma || pRequest->actual == pRequest->length) {
478
479                                 /* First, maybe a terminating short packet.
480                                  * Some DMA engines might handle this by
481                                  * themselves.
482                                  */
483                                 if ((pRequest->zero
484                                                 && pRequest->length
485                                                 && (pRequest->length
486                                                         % pEnd->wPacketSize)
487                                                         == 0)
488 #ifdef CONFIG_USB_INVENTRA_DMA
489                                         || (is_dma &&
490                                                 ((!dma->bDesiredMode) ||
491                                                     (pRequest->actual &
492                                                     (pEnd->wPacketSize - 1))))
493 #endif
494                                 ) {
495                                         /* on dma completion, fifo may not
496                                          * be available yet ...
497                                          */
498                                         if (wCsrVal & MGC_M_TXCSR_TXPKTRDY)
499                                                 break;
500
501                                         DBG(4, "sending zero pkt\n");
502                                         musb_writew(epio, MGC_O_HDRC_TXCSR,
503                                                         MGC_M_TXCSR_MODE
504                                                         | MGC_M_TXCSR_TXPKTRDY);
505                                         pRequest->zero = 0;
506                                 }
507
508                                 /* ... or if not, then complete it */
509                                 musb_g_giveback(pEnd, pRequest, 0);
510
511                                 /* kickstart next transfer if appropriate;
512                                  * the packet that just completed might not
513                                  * be transmitted for hours or days.
514                                  * REVISIT for double buffering...
515                                  * FIXME revisit for stalls too...
516                                  */
517                                 MGC_SelectEnd(pBase, bEnd);
518                                 wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
519                                 if (wCsrVal & MGC_M_TXCSR_FIFONOTEMPTY)
520                                         break;
521                                 pRequest = pEnd->desc
522                                                 ? next_request(pEnd)
523                                                 : NULL;
524                                 if (!pRequest) {
525                                         DBG(4, "%s idle now\n",
526                                                         pEnd->end_point.name);
527                                         break;
528                                 }
529                         }
530
531                         txstate(musb, to_musb_request(pRequest));
532                 }
533
534         } while (0);
535 }
536
537 /* ------------------------------------------------------------ */
538
539 #ifdef CONFIG_USB_INVENTRA_DMA
540
541 /* Peripheral rx (OUT) using Mentor DMA works as follows:
542         - Only mode 0 is used.
543
544         - Request is queued by the gadget class driver.
545                 -> if queue was previously empty, rxstate()
546
547         - Host sends OUT token which causes an endpoint interrupt
548           /\      -> RxReady
549           |           -> if request queued, call rxstate
550           |             /\      -> setup DMA
551           |             |            -> DMA interrupt on completion
552           |             |               -> RxReady
553           |             |                     -> stop DMA
554           |             |                     -> ack the read
555           |             |                     -> if data recd = max expected
556           |             |                               by the request, or host
557           |             |                               sent a short packet,
558           |             |                               complete the request,
559           |             |                               and start the next one.
560           |             |_____________________________________|
561           |                                      else just wait for the host
562           |                                         to send the next OUT token.
563           |__________________________________________________|
564
565  * Non-Mentor DMA engines can of course work differently.
566  */
567
568 #endif
569
570 /*
571  * Context: controller locked, IRQs blocked, endpoint selected
572  */
573 static void rxstate(struct musb *musb, struct musb_request *req)
574 {
575         u16                     wCsrVal = 0;
576         const u8                bEnd = req->bEnd;
577         struct usb_request      *pRequest = &req->request;
578         struct musb_ep          *pEnd = &musb->aLocalEnd[bEnd].ep_out;
579         void __iomem            *epio = musb->aLocalEnd[bEnd].regs;
580         u16                     wFifoCount = 0;
581         u16                     wCount = pEnd->wPacketSize;
582
583         wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
584
585         if (is_cppi_enabled() && pEnd->dma) {
586                 struct dma_controller   *c = musb->pDmaController;
587                 struct dma_channel      *channel = pEnd->dma;
588
589                 /* NOTE:  CPPI won't actually stop advancing the DMA
590                  * queue after short packet transfers, so this is almost
591                  * always going to run as IRQ-per-packet DMA so that
592                  * faults will be handled correctly.
593                  */
594                 if (c->channel_program(channel,
595                                 pEnd->wPacketSize,
596                                 !pRequest->short_not_ok,
597                                 pRequest->dma + pRequest->actual,
598                                 pRequest->length - pRequest->actual)) {
599
600                         /* make sure that if an rxpkt arrived after the irq,
601                          * the cppi engine will be ready to take it as soon
602                          * as DMA is enabled
603                          */
604                         wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR
605                                         | MGC_M_RXCSR_DMAMODE);
606                         wCsrVal |= MGC_M_RXCSR_DMAENAB | MGC_M_RXCSR_P_WZC_BITS;
607                         musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
608                         return;
609                 }
610         }
611
612         if (wCsrVal & MGC_M_RXCSR_RXPKTRDY) {
613                 wCount = musb_readw(epio, MGC_O_HDRC_RXCOUNT);
614                 if (pRequest->actual < pRequest->length) {
615 #ifdef CONFIG_USB_INVENTRA_DMA
616                         if (is_dma_capable() && pEnd->dma) {
617                                 struct dma_controller   *c;
618                                 struct dma_channel      *channel;
619                                 int                     use_dma = 0;
620
621                                 c = musb->pDmaController;
622                                 channel = pEnd->dma;
623
624         /* We use DMA Req mode 0 in RxCsr, and DMA controller operates in
625          * mode 0 only. So we do not get endpoint interrupts due to DMA
626          * completion. We only get interrupts from DMA controller.
627          *
628          * We could operate in DMA mode 1 if we knew the size of the tranfer
629          * in advance. For mass storage class, request->length = what the host
630          * sends, so that'd work.  But for pretty much everything else,
631          * request->length is routinely more than what the host sends. For
632          * most these gadgets, end of is signified either by a short packet,
633          * or filling the last byte of the buffer.  (Sending extra data in
634          * that last pckate should trigger an overflow fault.)  But in mode 1,
635          * we don't get DMA completion interrrupt for short packets.
636          *
637          * Theoretically, we could enable DMAReq interrupt (RxCsr_DMAMODE = 1),
638          * to get endpoint interrupt on every DMA req, but that didn't seem
639          * to work reliably.
640          *
641          * REVISIT an updated g_file_storage can set req->short_not_ok, which
642          * then becomes usable as a runtime "use mode 1" hint...
643          */
644
645                                 wCsrVal |= MGC_M_RXCSR_DMAENAB;
646 #ifdef USE_MODE1
647                                 wCsrVal |= MGC_M_RXCSR_AUTOCLEAR;
648 //                              wCsrVal |= MGC_M_RXCSR_DMAMODE;
649
650                                 /* this special sequence (enabling and then
651                                    disabling MGC_M_RXCSR_DMAMODE) is required
652                                    to get DMAReq to activate
653                                  */
654                                 musb_writew(epio, MGC_O_HDRC_RXCSR,
655                                         wCsrVal | MGC_M_RXCSR_DMAMODE);
656 #endif
657                                 musb_writew(epio, MGC_O_HDRC_RXCSR,
658                                                 wCsrVal);
659
660                                 if (pRequest->actual < pRequest->length) {
661                                         int transfer_size = 0;
662 #ifdef USE_MODE1
663                                         transfer_size = min(pRequest->length,
664                                                         channel->dwMaxLength);
665 #else
666                                         transfer_size = wCount;
667 #endif
668                                         if (transfer_size <= pEnd->wPacketSize)
669                                                 pEnd->dma->bDesiredMode = 0;
670                                         else
671                                                 pEnd->dma->bDesiredMode = 1;
672
673                                         use_dma = c->channel_program(
674                                                         channel,
675                                                         pEnd->wPacketSize,
676                                                         channel->bDesiredMode,
677                                                         pRequest->dma
678                                                         + pRequest->actual,
679                                                         transfer_size);
680                                 }
681
682                                 if (use_dma)
683                                         return;
684                         }
685 #endif  /* Mentor's USB */
686
687                         wFifoCount = pRequest->length - pRequest->actual;
688                         DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
689                                         pEnd->end_point.name,
690                                         wCount, wFifoCount,
691                                         pEnd->wPacketSize);
692
693                         wFifoCount = min(wCount, wFifoCount);
694
695 #ifdef  CONFIG_USB_TUSB_OMAP_DMA
696                         if (tusb_dma_omap() && pEnd->dma) {
697                                 struct dma_controller *c = musb->pDmaController;
698                                 struct dma_channel *channel = pEnd->dma;
699                                 u32 dma_addr = pRequest->dma + pRequest->actual;
700                                 int ret;
701
702                                 ret = c->channel_program(channel,
703                                                 pEnd->wPacketSize,
704                                                 channel->bDesiredMode,
705                                                 dma_addr,
706                                                 wFifoCount);
707                                 if (ret == TRUE)
708                                         return;
709                         }
710 #endif
711
712                         musb_read_fifo(pEnd->hw_ep, wFifoCount, (u8 *)
713                                         (pRequest->buf + pRequest->actual));
714                         pRequest->actual += wFifoCount;
715
716                         /* REVISIT if we left anything in the fifo, flush
717                          * it and report -EOVERFLOW
718                          */
719
720                         /* ack the read! */
721                         wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
722                         wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY;
723                         musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
724                 }
725         }
726
727         /* reach the end or short packet detected */
728         if (pRequest->actual == pRequest->length || wCount < pEnd->wPacketSize)
729                 musb_g_giveback(pEnd, pRequest, 0);
730 }
731
732 /*
733  * Data ready for a request; called from IRQ
734  */
735 void musb_g_rx(struct musb *musb, u8 bEnd)
736 {
737         u16                     wCsrVal;
738         struct usb_request      *pRequest;
739         void __iomem            *pBase = musb->pRegs;
740         struct musb_ep          *pEnd = &musb->aLocalEnd[bEnd].ep_out;
741         void __iomem            *epio = musb->aLocalEnd[bEnd].regs;
742         struct dma_channel      *dma;
743
744         MGC_SelectEnd(pBase, bEnd);
745
746         pRequest = next_request(pEnd);
747
748         wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
749         dma = is_dma_capable() ? pEnd->dma : NULL;
750
751         DBG(4, "<== %s, rxcsr %04x%s %p\n", pEnd->end_point.name,
752                         wCsrVal, dma ? " (dma)" : "", pRequest);
753
754         if (wCsrVal & MGC_M_RXCSR_P_SENTSTALL) {
755                 if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
756                         dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
757                         (void) musb->pDmaController->channel_abort(dma);
758                         pRequest->actual += pEnd->dma->dwActualLength;
759                 }
760
761                 wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
762                 wCsrVal &= ~MGC_M_RXCSR_P_SENTSTALL;
763                 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
764
765                 if (pRequest)
766                         musb_g_giveback(pEnd, pRequest, -EPIPE);
767                 goto done;
768         }
769
770         if (wCsrVal & MGC_M_RXCSR_P_OVERRUN) {
771                 // wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
772                 wCsrVal &= ~MGC_M_RXCSR_P_OVERRUN;
773                 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
774
775                 DBG(3, "%s iso overrun on %p\n", pEnd->name, pRequest);
776                 if (pRequest && pRequest->status == -EINPROGRESS)
777                         pRequest->status = -EOVERFLOW;
778         }
779         if (wCsrVal & MGC_M_RXCSR_INCOMPRX) {
780                 /* REVISIT not necessarily an error */
781                 DBG(4, "%s, incomprx\n", pEnd->end_point.name);
782         }
783
784         if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
785                 /* "should not happen"; likely RXPKTRDY pending for DMA */
786                 DBG((wCsrVal & MGC_M_RXCSR_DMAENAB) ? 4 : 1,
787                         "%s busy, csr %04x\n",
788                         pEnd->end_point.name, wCsrVal);
789                 goto done;
790         }
791
792         if (dma && (wCsrVal & MGC_M_RXCSR_DMAENAB)) {
793                 wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR
794                                 | MGC_M_RXCSR_DMAENAB
795                                 | MGC_M_RXCSR_DMAMODE);
796                 musb_writew(epio, MGC_O_HDRC_RXCSR,
797                         MGC_M_RXCSR_P_WZC_BITS | wCsrVal);
798
799                 pRequest->actual += pEnd->dma->dwActualLength;
800
801                 DBG(4, "RXCSR%d %04x, dma off, %04x, len %Zd, req %p\n",
802                         bEnd, wCsrVal,
803                         musb_readw(epio, MGC_O_HDRC_RXCSR),
804                         pEnd->dma->dwActualLength, pRequest);
805
806 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
807                 /* Autoclear doesn't clear RxPktRdy for short packets */
808                 if ((dma->bDesiredMode == 0)
809                                 || (dma->dwActualLength
810                                         & (pEnd->wPacketSize - 1))) {
811                         /* ack the read! */
812                         wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY;
813                         musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
814                 }
815
816                 /* incomplete, and not short? wait for next IN packet */
817                 if ((pRequest->actual < pRequest->length)
818                                 && (pEnd->dma->dwActualLength
819                                         == pEnd->wPacketSize))
820                         goto done;
821 #endif
822                 musb_g_giveback(pEnd, pRequest, 0);
823
824                 pRequest = next_request(pEnd);
825                 if (!pRequest)
826                         goto done;
827
828                 /* don't start more i/o till the stall clears */
829                 MGC_SelectEnd(pBase, bEnd);
830                 wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
831                 if (wCsrVal & MGC_M_RXCSR_P_SENDSTALL)
832                         goto done;
833         }
834
835
836         /* analyze request if the ep is hot */
837         if (pRequest)
838                 rxstate(musb, to_musb_request(pRequest));
839         else
840                 DBG(3, "packet waiting for %s%s request\n",
841                                 pEnd->desc ? "" : "inactive ",
842                                 pEnd->end_point.name);
843
844 done:
845         return;
846 }
847
848 /* ------------------------------------------------------------ */
849
850 static int musb_gadget_enable(struct usb_ep *ep,
851                         const struct usb_endpoint_descriptor *desc)
852 {
853         unsigned long           flags;
854         struct musb_ep          *pEnd;
855         struct musb_hw_ep       *hw_ep;
856         void __iomem            *regs;
857         struct musb             *musb;
858         void __iomem    *pBase;
859         u8              bEnd;
860         u16             csr;
861         unsigned        tmp;
862         int             status = -EINVAL;
863
864         if (!ep || !desc)
865                 return -EINVAL;
866
867         pEnd = to_musb_ep(ep);
868         hw_ep = pEnd->hw_ep;
869         regs = hw_ep->regs;
870         musb = pEnd->pThis;
871         pBase = musb->pRegs;
872         bEnd = pEnd->bEndNumber;
873
874         spin_lock_irqsave(&musb->Lock, flags);
875
876         if (pEnd->desc) {
877                 status = -EBUSY;
878                 goto fail;
879         }
880         pEnd->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
881
882         /* check direction and (later) maxpacket size against endpoint */
883         if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != bEnd)
884                 goto fail;
885
886         /* REVISIT this rules out high bandwidth periodic transfers */
887         tmp = le16_to_cpu(desc->wMaxPacketSize);
888         if (tmp & ~0x07ff)
889                 goto fail;
890         pEnd->wPacketSize = tmp;
891
892         /* enable the interrupts for the endpoint, set the endpoint
893          * packet size (or fail), set the mode, clear the fifo
894          */
895         MGC_SelectEnd(pBase, bEnd);
896         if (desc->bEndpointAddress & USB_DIR_IN) {
897                 u16 wIntrTxE = musb_readw(pBase, MGC_O_HDRC_INTRTXE);
898
899                 if (hw_ep->bIsSharedFifo)
900                         pEnd->is_in = 1;
901                 if (!pEnd->is_in)
902                         goto fail;
903                 if (tmp > hw_ep->wMaxPacketSizeTx)
904                         goto fail;
905
906                 wIntrTxE |= (1 << bEnd);
907                 musb_writew(pBase, MGC_O_HDRC_INTRTXE, wIntrTxE);
908
909                 /* REVISIT if can_bulk_split(), use by updating "tmp";
910                  * likewise high bandwidth periodic tx
911                  */
912                 musb_writew(regs, MGC_O_HDRC_TXMAXP, tmp);
913
914                 csr = MGC_M_TXCSR_MODE | MGC_M_TXCSR_CLRDATATOG;
915                 if (musb_readw(regs, MGC_O_HDRC_TXCSR)
916                                 & MGC_M_TXCSR_FIFONOTEMPTY)
917                         csr |= MGC_M_TXCSR_FLUSHFIFO;
918                 if (pEnd->type == USB_ENDPOINT_XFER_ISOC)
919                         csr |= MGC_M_TXCSR_P_ISO;
920
921                 /* set twice in case of double buffering */
922                 musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
923                 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
924                 musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
925
926         } else {
927                 u16 wIntrRxE = musb_readw(pBase, MGC_O_HDRC_INTRRXE);
928
929                 if (hw_ep->bIsSharedFifo)
930                         pEnd->is_in = 0;
931                 if (pEnd->is_in)
932                         goto fail;
933                 if (tmp > hw_ep->wMaxPacketSizeRx)
934                         goto fail;
935
936                 wIntrRxE |= (1 << bEnd);
937                 musb_writew(pBase, MGC_O_HDRC_INTRRXE, wIntrRxE);
938
939                 /* REVISIT if can_bulk_combine() use by updating "tmp"
940                  * likewise high bandwidth periodic rx
941                  */
942                 musb_writew(regs, MGC_O_HDRC_RXMAXP, tmp);
943
944                 /* force shared fifo to OUT-only mode */
945                 if (hw_ep->bIsSharedFifo) {
946                         csr = musb_readw(regs, MGC_O_HDRC_TXCSR);
947                         csr &= ~(MGC_M_TXCSR_MODE | MGC_M_TXCSR_TXPKTRDY);
948                         musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
949                 }
950
951                 csr = MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_CLRDATATOG;
952                 if (pEnd->type == USB_ENDPOINT_XFER_ISOC)
953                         csr |= MGC_M_RXCSR_P_ISO;
954                 else if (pEnd->type == USB_ENDPOINT_XFER_INT)
955                         csr |= MGC_M_RXCSR_DISNYET;
956
957                 /* set twice in case of double buffering */
958                 musb_writew(regs, MGC_O_HDRC_RXCSR, csr);
959                 musb_writew(regs, MGC_O_HDRC_RXCSR, csr);
960         }
961
962         /* NOTE:  all the I/O code _should_ work fine without DMA, in case
963          * for some reason you run out of channels here.
964          */
965         if (is_dma_capable() && musb->pDmaController) {
966                 struct dma_controller   *c = musb->pDmaController;
967
968                 pEnd->dma = c->channel_alloc(c, hw_ep,
969                                 (desc->bEndpointAddress & USB_DIR_IN));
970         } else
971                 pEnd->dma = NULL;
972
973         pEnd->desc = desc;
974         pEnd->busy = 0;
975         status = 0;
976
977         pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
978                         musb_driver_name, pEnd->end_point.name,
979                         ({ char *s; switch (pEnd->type) {
980                         case USB_ENDPOINT_XFER_BULK:    s = "bulk"; break;
981                         case USB_ENDPOINT_XFER_INT:     s = "int"; break;
982                         default:                        s = "iso"; break;
983                         }; s; }),
984                         pEnd->is_in ? "IN" : "OUT",
985                         pEnd->dma ? "dma, " : "",
986                         pEnd->wPacketSize);
987
988         schedule_work(&musb->irq_work);
989
990 fail:
991         spin_unlock_irqrestore(&musb->Lock, flags);
992         return status;
993 }
994
995 /*
996  * Disable an endpoint flushing all requests queued.
997  */
998 static int musb_gadget_disable(struct usb_ep *ep)
999 {
1000         unsigned long   flags;
1001         struct musb     *musb;
1002         u8              bEnd;
1003         struct musb_ep  *pEnd;
1004         void __iomem    *epio;
1005         int             status = 0;
1006
1007         pEnd = to_musb_ep(ep);
1008         musb = pEnd->pThis;
1009         bEnd = pEnd->bEndNumber;
1010         epio = musb->aLocalEnd[bEnd].regs;
1011
1012         spin_lock_irqsave(&musb->Lock, flags);
1013         MGC_SelectEnd(musb->pRegs, bEnd);
1014
1015         /* zero the endpoint sizes */
1016         if (pEnd->is_in) {
1017                 u16 wIntrTxE = musb_readw(musb->pRegs, MGC_O_HDRC_INTRTXE);
1018                 wIntrTxE &= ~(1 << bEnd);
1019                 musb_writew(musb->pRegs, MGC_O_HDRC_INTRTXE, wIntrTxE);
1020                 musb_writew(epio, MGC_O_HDRC_TXMAXP, 0);
1021         } else {
1022                 u16 wIntrRxE = musb_readw(musb->pRegs, MGC_O_HDRC_INTRRXE);
1023                 wIntrRxE &= ~(1 << bEnd);
1024                 musb_writew(musb->pRegs, MGC_O_HDRC_INTRRXE, wIntrRxE);
1025                 musb_writew(epio, MGC_O_HDRC_RXMAXP, 0);
1026         }
1027
1028         pEnd->desc = NULL;
1029
1030         /* abort all pending DMA and requests */
1031         nuke(pEnd, -ESHUTDOWN);
1032
1033         schedule_work(&musb->irq_work);
1034
1035         spin_unlock_irqrestore(&(musb->Lock), flags);
1036
1037         DBG(2, "%s\n", pEnd->end_point.name);
1038
1039         return status;
1040 }
1041
1042 /*
1043  * Allocate a request for an endpoint.
1044  * Reused by ep0 code.
1045  */
1046 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1047 {
1048         struct musb_ep          *musb_ep = to_musb_ep(ep);
1049         struct musb_request     *pRequest = NULL;
1050
1051         pRequest = kzalloc(sizeof *pRequest, gfp_flags);
1052         if (pRequest) {
1053                 INIT_LIST_HEAD(&pRequest->request.list);
1054                 pRequest->request.dma = DMA_ADDR_INVALID;
1055                 pRequest->bEnd = musb_ep->bEndNumber;
1056                 pRequest->ep = musb_ep;
1057         }
1058
1059         return &pRequest->request;
1060 }
1061
1062 /*
1063  * Free a request
1064  * Reused by ep0 code.
1065  */
1066 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1067 {
1068         kfree(to_musb_request(req));
1069 }
1070
1071 static LIST_HEAD(buffers);
1072
1073 struct free_record {
1074         struct list_head        list;
1075         struct device           *dev;
1076         unsigned                bytes;
1077         dma_addr_t              dma;
1078 };
1079
1080 /*
1081  * Context: controller locked, IRQs blocked.
1082  */
1083 static void musb_ep_restart(struct musb *musb, struct musb_request *req)
1084 {
1085         DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1086                 req->bTx ? "TX/IN" : "RX/OUT",
1087                 &req->request, req->request.length, req->bEnd);
1088
1089         MGC_SelectEnd(musb->pRegs, req->bEnd);
1090         if (req->bTx)
1091                 txstate(musb, req);
1092         else
1093                 rxstate(musb, req);
1094 }
1095
1096 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1097                         gfp_t gfp_flags)
1098 {
1099         struct musb_ep          *pEnd;
1100         struct musb_request     *pRequest;
1101         struct musb             *musb;
1102         int                     status = 0;
1103         unsigned long           lockflags;
1104
1105         if (!ep || !req)
1106                 return -EINVAL;
1107         if (!req->buf)
1108                 return -ENODATA;
1109
1110         pEnd = to_musb_ep(ep);
1111         musb = pEnd->pThis;
1112
1113         pRequest = to_musb_request(req);
1114         pRequest->musb = musb;
1115
1116         if (pRequest->ep != pEnd)
1117                 return -EINVAL;
1118
1119         DBG(4, "<== to %s request=%p\n", ep->name, req);
1120
1121         /* request is mine now... */
1122         pRequest->request.actual = 0;
1123         pRequest->request.status = -EINPROGRESS;
1124         pRequest->bEnd = pEnd->bEndNumber;
1125         pRequest->bTx = pEnd->is_in;
1126
1127         if (is_dma_capable() && pEnd->dma) {
1128                 if (pRequest->request.dma == DMA_ADDR_INVALID) {
1129                         pRequest->request.dma = dma_map_single(
1130                                         musb->controller,
1131                                         pRequest->request.buf,
1132                                         pRequest->request.length,
1133                                         pRequest->bTx
1134                                                 ? DMA_TO_DEVICE
1135                                                 : DMA_FROM_DEVICE);
1136                         pRequest->mapped = 1;
1137                 } else {
1138                         dma_sync_single_for_device(musb->controller,
1139                                         pRequest->request.dma,
1140                                         pRequest->request.length,
1141                                         pRequest->bTx
1142                                                 ? DMA_TO_DEVICE
1143                                                 : DMA_FROM_DEVICE);
1144                         pRequest->mapped = 0;
1145                 }
1146         } else if (!req->buf) {
1147                 return -ENODATA;
1148         } else
1149                 pRequest->mapped = 0;
1150
1151         spin_lock_irqsave(&musb->Lock, lockflags);
1152
1153         /* don't queue if the ep is down */
1154         if (!pEnd->desc) {
1155                 DBG(4, "req %p queued to %s while ep %s\n",
1156                                 req, ep->name, "disabled");
1157                 status = -ESHUTDOWN;
1158                 goto cleanup;
1159         }
1160
1161         /* add pRequest to the list */
1162         list_add_tail(&(pRequest->request.list), &(pEnd->req_list));
1163
1164         /* it this is the head of the queue, start i/o ... */
1165         if (!pEnd->busy && &pRequest->request.list == pEnd->req_list.next)
1166                 musb_ep_restart(musb, pRequest);
1167
1168 cleanup:
1169         spin_unlock_irqrestore(&musb->Lock, lockflags);
1170         return status;
1171 }
1172
1173 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *pRequest)
1174 {
1175         struct musb_ep          *pEnd = to_musb_ep(ep);
1176         struct usb_request      *r;
1177         unsigned long           flags;
1178         int                     status = 0;
1179         struct musb             *musb = pEnd->pThis;
1180
1181         if (!ep || !pRequest || to_musb_request(pRequest)->ep != pEnd)
1182                 return -EINVAL;
1183
1184         spin_lock_irqsave(&musb->Lock, flags);
1185
1186         list_for_each_entry(r, &pEnd->req_list, list) {
1187                 if (r == pRequest)
1188                         break;
1189         }
1190         if (r != pRequest) {
1191                 DBG(3, "request %p not queued to %s\n", pRequest, ep->name);
1192                 status = -EINVAL;
1193                 goto done;
1194         }
1195
1196         /* if the hardware doesn't have the request, easy ... */
1197         if (pEnd->req_list.next != &pRequest->list || pEnd->busy)
1198                 musb_g_giveback(pEnd, pRequest, -ECONNRESET);
1199
1200         /* ... else abort the dma transfer ... */
1201         else if (is_dma_capable() && pEnd->dma) {
1202                 struct dma_controller   *c = musb->pDmaController;
1203
1204                 MGC_SelectEnd(musb->pRegs, pEnd->bEndNumber);
1205                 if (c->channel_abort)
1206                         status = c->channel_abort(pEnd->dma);
1207                 else
1208                         status = -EBUSY;
1209                 if (status == 0)
1210                         musb_g_giveback(pEnd, pRequest, -ECONNRESET);
1211         } else {
1212                 /* NOTE: by sticking to easily tested hardware/driver states,
1213                  * we leave counting of in-flight packets imprecise.
1214                  */
1215                 musb_g_giveback(pEnd, pRequest, -ECONNRESET);
1216         }
1217
1218 done:
1219         spin_unlock_irqrestore(&musb->Lock, flags);
1220         return status;
1221 }
1222
1223 /*
1224  * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1225  * data but will queue requests.
1226  *
1227  * exported to ep0 code
1228  */
1229 int musb_gadget_set_halt(struct usb_ep *ep, int value)
1230 {
1231         struct musb_ep          *pEnd = to_musb_ep(ep);
1232         u8                      bEnd = pEnd->bEndNumber;
1233         struct musb             *musb = pEnd->pThis;
1234         void __iomem            *epio = musb->aLocalEnd[bEnd].regs;
1235         void __iomem            *pBase;
1236         unsigned long           flags;
1237         u16                     wCsr;
1238         struct musb_request     *pRequest = NULL;
1239         int                     status = 0;
1240
1241         if (!ep)
1242                 return -EINVAL;
1243         pBase = musb->pRegs;
1244
1245         spin_lock_irqsave(&musb->Lock, flags);
1246
1247         if ((USB_ENDPOINT_XFER_ISOC == pEnd->type)) {
1248                 status = -EINVAL;
1249                 goto done;
1250         }
1251
1252         MGC_SelectEnd(pBase, bEnd);
1253
1254         /* cannot portably stall with non-empty FIFO */
1255         pRequest = to_musb_request(next_request(pEnd));
1256         if (value && pEnd->is_in) {
1257                 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1258                 if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY) {
1259                         DBG(3, "%s fifo busy, cannot halt\n", ep->name);
1260                         spin_unlock_irqrestore(&musb->Lock, flags);
1261                         return -EAGAIN;
1262                 }
1263
1264         }
1265
1266         /* set/clear the stall and toggle bits */
1267         DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1268         if (pEnd->is_in) {
1269                 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1270                 if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY)
1271                         wCsr |= MGC_M_TXCSR_FLUSHFIFO;
1272                 wCsr |= MGC_M_TXCSR_P_WZC_BITS
1273                         | MGC_M_TXCSR_CLRDATATOG;
1274                 if (value)
1275                         wCsr |= MGC_M_TXCSR_P_SENDSTALL;
1276                 else
1277                         wCsr &= ~(MGC_M_TXCSR_P_SENDSTALL
1278                                 | MGC_M_TXCSR_P_SENTSTALL);
1279                 wCsr &= ~MGC_M_TXCSR_TXPKTRDY;
1280                 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
1281         } else {
1282                 wCsr = musb_readw(epio, MGC_O_HDRC_RXCSR);
1283                 wCsr |= MGC_M_RXCSR_P_WZC_BITS
1284                         | MGC_M_RXCSR_FLUSHFIFO
1285                         | MGC_M_RXCSR_CLRDATATOG;
1286                 if (value)
1287                         wCsr |= MGC_M_RXCSR_P_SENDSTALL;
1288                 else
1289                         wCsr &= ~(MGC_M_RXCSR_P_SENDSTALL
1290                                 | MGC_M_RXCSR_P_SENTSTALL);
1291                 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
1292         }
1293
1294 done:
1295
1296         /* maybe start the first request in the queue */
1297         if (!pEnd->busy && !value && pRequest) {
1298                 DBG(3, "restarting the request\n");
1299                 musb_ep_restart(musb, pRequest);
1300         }
1301
1302         spin_unlock_irqrestore(&musb->Lock, flags);
1303         return status;
1304 }
1305
1306 static int musb_gadget_fifo_status(struct usb_ep *ep)
1307 {
1308         struct musb_ep          *musb_ep = to_musb_ep(ep);
1309         void __iomem            *epio = musb_ep->hw_ep->regs;
1310         int                     retval = -EINVAL;
1311
1312         if (musb_ep->desc && !musb_ep->is_in) {
1313                 struct musb             *musb = musb_ep->pThis;
1314                 int                     bEnd = musb_ep->bEndNumber;
1315                 void __iomem            *mbase = musb->pRegs;
1316                 unsigned long           flags;
1317
1318                 spin_lock_irqsave(&musb->Lock, flags);
1319
1320                 MGC_SelectEnd(mbase, bEnd);
1321                 /* FIXME return zero unless RXPKTRDY is set */
1322                 retval = musb_readw(epio, MGC_O_HDRC_RXCOUNT);
1323
1324                 spin_unlock_irqrestore(&musb->Lock, flags);
1325         }
1326         return retval;
1327 }
1328
1329 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1330 {
1331         struct musb_ep  *musb_ep = to_musb_ep(ep);
1332         struct musb     *musb = musb_ep->pThis;
1333         u8              nEnd = musb_ep->bEndNumber;
1334         void __iomem    *epio = musb->aLocalEnd[nEnd].regs;
1335         void __iomem    *mbase;
1336         unsigned long   flags;
1337         u16             wCsr, wIntrTxE;
1338
1339         mbase = musb->pRegs;
1340
1341         spin_lock_irqsave(&musb->Lock, flags);
1342         MGC_SelectEnd(mbase, (u8) nEnd);
1343
1344         /* disable interrupts */
1345         wIntrTxE = musb_readw(mbase, MGC_O_HDRC_INTRTXE);
1346         musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE & ~(1 << nEnd));
1347
1348         if (musb_ep->is_in) {
1349                 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1350                 if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY) {
1351                         wCsr |= MGC_M_TXCSR_FLUSHFIFO | MGC_M_TXCSR_P_WZC_BITS;
1352                         musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
1353                         /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1354                         musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
1355                 }
1356         } else {
1357                 wCsr = musb_readw(epio, MGC_O_HDRC_RXCSR);
1358                 wCsr |= MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_P_WZC_BITS;
1359                 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
1360                 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
1361         }
1362
1363         /* re-enable interrupt */
1364         musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE);
1365         spin_unlock_irqrestore(&musb->Lock, flags);
1366 }
1367
1368 static const struct usb_ep_ops musb_ep_ops = {
1369         .enable         = musb_gadget_enable,
1370         .disable        = musb_gadget_disable,
1371         .alloc_request  = musb_alloc_request,
1372         .free_request   = musb_free_request,
1373         .queue          = musb_gadget_queue,
1374         .dequeue        = musb_gadget_dequeue,
1375         .set_halt       = musb_gadget_set_halt,
1376         .fifo_status    = musb_gadget_fifo_status,
1377         .fifo_flush     = musb_gadget_fifo_flush
1378 };
1379
1380 /***********************************************************************/
1381
1382 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1383 {
1384         struct musb     *musb = gadget_to_musb(gadget);
1385
1386         return (int)musb_readw(musb->pRegs, MGC_O_HDRC_FRAME);
1387 }
1388
1389 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1390 {
1391         struct musb     *musb = gadget_to_musb(gadget);
1392         void __iomem    *mregs = musb->pRegs;
1393         unsigned long   flags;
1394         int             status = -EINVAL;
1395         u8              power, devctl;
1396         int             retries;
1397
1398         spin_lock_irqsave(&musb->Lock, flags);
1399
1400         switch (musb->xceiv.state) {
1401         case OTG_STATE_B_PERIPHERAL:
1402                 /* NOTE:  OTG state machine doesn't include B_SUSPENDED;
1403                  * that's part of the standard usb 1.1 state machine, and
1404                  * doesn't affect OTG transitions.
1405                  */
1406                 if (musb->may_wakeup && musb->is_suspended)
1407                         break;
1408                 goto done;
1409         case OTG_STATE_B_IDLE:
1410                 /* Start SRP ... OTG not required. */
1411                 devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
1412                 DBG(2, "Sending SRP: devctl: %02x\n", devctl);
1413                 devctl |= MGC_M_DEVCTL_SESSION;
1414                 musb_writeb(mregs, MGC_O_HDRC_DEVCTL, devctl);
1415                 devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
1416                 retries = 100;
1417                 while (!(devctl & MGC_M_DEVCTL_SESSION)) {
1418                         devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
1419                         if (retries-- < 1)
1420                                 break;
1421                 }
1422                 retries = 10000;
1423                 while (devctl & MGC_M_DEVCTL_SESSION) {
1424                         devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
1425                         if (retries-- < 1)
1426                                 break;
1427                 }
1428
1429                 /* Block idling for at least 1s */
1430                 musb_platform_try_idle(musb,
1431                         jiffies + msecs_to_jiffies(1 * HZ));
1432
1433                 status = 0;
1434                 goto done;
1435         default:
1436                 goto done;
1437         }
1438
1439         status = 0;
1440
1441         power = musb_readb(mregs, MGC_O_HDRC_POWER);
1442         power |= MGC_M_POWER_RESUME;
1443         musb_writeb(mregs, MGC_O_HDRC_POWER, power);
1444         DBG(2, "issue wakeup\n");
1445
1446         /* FIXME do this next chunk in a timer callback, no udelay */
1447         mdelay(2);
1448
1449         power = musb_readb(mregs, MGC_O_HDRC_POWER);
1450         power &= ~MGC_M_POWER_RESUME;
1451         musb_writeb(mregs, MGC_O_HDRC_POWER, power);
1452 done:
1453         spin_unlock_irqrestore(&musb->Lock, flags);
1454         return status;
1455 }
1456
1457 static int
1458 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1459 {
1460         struct musb     *musb = gadget_to_musb(gadget);
1461
1462         musb->is_self_powered = !!is_selfpowered;
1463         return 0;
1464 }
1465
1466 static void musb_pullup(struct musb *musb, int is_on)
1467 {
1468         u8 power;
1469
1470         power = musb_readb(musb->pRegs, MGC_O_HDRC_POWER);
1471         if (is_on)
1472                 power |= MGC_M_POWER_SOFTCONN;
1473         else
1474                 power &= ~MGC_M_POWER_SOFTCONN;
1475
1476         /* FIXME if on, HdrcStart; if off, HdrcStop */
1477
1478         DBG(3, "gadget %s D+ pullup %s\n",
1479                 musb->pGadgetDriver->function, is_on ? "on" : "off");
1480         musb_writeb(musb->pRegs, MGC_O_HDRC_POWER, power);
1481 }
1482
1483 #if 0
1484 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1485 {
1486         DBG(2, "<= %s =>\n", __FUNCTION__);
1487
1488         // FIXME iff driver's softconnect flag is set (as it is during probe,
1489         // though that can clear it), just musb_pullup().
1490
1491         return -EINVAL;
1492 }
1493
1494 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1495 {
1496         /* FIXME -- delegate to otg_transciever logic */
1497
1498         DBG(2, "<= vbus_draw %u =>\n", mA);
1499         return 0;
1500 }
1501 #endif
1502
1503 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1504 {
1505         struct musb     *musb = gadget_to_musb(gadget);
1506
1507         if (!musb->xceiv.set_power)
1508                 return -EOPNOTSUPP;
1509         return otg_set_power(&musb->xceiv, mA);
1510 }
1511
1512 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1513 {
1514         struct musb     *musb = gadget_to_musb(gadget);
1515         unsigned long   flags;
1516
1517         is_on = !!is_on;
1518
1519         /* NOTE: this assumes we are sensing vbus; we'd rather
1520          * not pullup unless the B-session is active.
1521          */
1522         spin_lock_irqsave(&musb->Lock, flags);
1523         if (is_on != musb->softconnect) {
1524                 musb->softconnect = is_on;
1525                 musb_pullup(musb, is_on);
1526         }
1527         spin_unlock_irqrestore(&musb->Lock, flags);
1528         return 0;
1529 }
1530
1531 static const struct usb_gadget_ops musb_gadget_operations = {
1532         .get_frame              = musb_gadget_get_frame,
1533         .wakeup                 = musb_gadget_wakeup,
1534         .set_selfpowered        = musb_gadget_set_self_powered,
1535         //.vbus_session         = musb_gadget_vbus_session,
1536         .vbus_draw              = musb_gadget_vbus_draw,
1537         .pullup                 = musb_gadget_pullup,
1538 };
1539
1540 /****************************************************************
1541  * Registration operations
1542  ****************************************************************/
1543
1544 /* Only this registration code "knows" the rule (from USB standards)
1545  * about there being only one external upstream port.  It assumes
1546  * all peripheral ports are external...
1547  */
1548 static struct musb *the_gadget;
1549
1550 static void musb_gadget_release(struct device *dev)
1551 {
1552         // kref_put(WHAT)
1553         dev_dbg(dev, "%s\n", __FUNCTION__);
1554 }
1555
1556
1557 static void __init
1558 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 bEnd, int is_in)
1559 {
1560         struct musb_hw_ep       *hw_ep = musb->aLocalEnd + bEnd;
1561
1562         memset(ep, 0, sizeof *ep);
1563
1564         ep->bEndNumber = bEnd;
1565         ep->pThis = musb;
1566         ep->hw_ep = hw_ep;
1567         ep->is_in = is_in;
1568
1569         INIT_LIST_HEAD(&ep->req_list);
1570
1571         sprintf(ep->name, "ep%d%s", bEnd,
1572                         (!bEnd || hw_ep->bIsSharedFifo) ? "" : (
1573                                 is_in ? "in" : "out"));
1574         ep->end_point.name = ep->name;
1575         INIT_LIST_HEAD(&ep->end_point.ep_list);
1576         if (!bEnd) {
1577                 ep->end_point.maxpacket = 64;
1578                 ep->end_point.ops = &musb_g_ep0_ops;
1579                 musb->g.ep0 = &ep->end_point;
1580         } else {
1581                 if (is_in)
1582                         ep->end_point.maxpacket = hw_ep->wMaxPacketSizeTx;
1583                 else
1584                         ep->end_point.maxpacket = hw_ep->wMaxPacketSizeRx;
1585                 ep->end_point.ops = &musb_ep_ops;
1586                 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1587         }
1588 }
1589
1590 /*
1591  * Initialize the endpoints exposed to peripheral drivers, with backlinks
1592  * to the rest of the driver state.
1593  */
1594 static inline void __init musb_g_init_endpoints(struct musb *musb)
1595 {
1596         u8                      bEnd;
1597         struct musb_hw_ep       *hw_ep;
1598         unsigned                count = 0;
1599
1600         /* intialize endpoint list just once */
1601         INIT_LIST_HEAD(&(musb->g.ep_list));
1602
1603         for (bEnd = 0, hw_ep = musb->aLocalEnd;
1604                         bEnd < musb->bEndCount;
1605                         bEnd++, hw_ep++) {
1606                 if (hw_ep->bIsSharedFifo /* || !bEnd */) {
1607                         init_peripheral_ep(musb, &hw_ep->ep_in, bEnd, 0);
1608                         count++;
1609                 } else {
1610                         if (hw_ep->wMaxPacketSizeTx) {
1611                                 init_peripheral_ep(musb, &hw_ep->ep_in,
1612                                                         bEnd, 1);
1613                                 count++;
1614                         }
1615                         if (hw_ep->wMaxPacketSizeRx) {
1616                                 init_peripheral_ep(musb, &hw_ep->ep_out,
1617                                                         bEnd, 0);
1618                                 count++;
1619                         }
1620                 }
1621         }
1622 }
1623
1624 /* called once during driver setup to initialize and link into
1625  * the driver model; memory is zeroed.
1626  */
1627 int __init musb_gadget_setup(struct musb *musb)
1628 {
1629         int status;
1630
1631         /* REVISIT minor race:  if (erroneously) setting up two
1632          * musb peripherals at the same time, only the bus lock
1633          * is probably held.
1634          */
1635         if (the_gadget)
1636                 return -EBUSY;
1637         the_gadget = musb;
1638
1639         musb->g.ops = &musb_gadget_operations;
1640         musb->g.is_dualspeed = 1;
1641         musb->g.speed = USB_SPEED_UNKNOWN;
1642
1643         /* this "gadget" abstracts/virtualizes the controller */
1644         strcpy(musb->g.dev.bus_id, "gadget");
1645         musb->g.dev.parent = musb->controller;
1646         musb->g.dev.dma_mask = musb->controller->dma_mask;
1647         musb->g.dev.release = musb_gadget_release;
1648         musb->g.name = musb_driver_name;
1649
1650         if (is_otg_enabled(musb))
1651                 musb->g.is_otg = 1;
1652
1653         musb_g_init_endpoints(musb);
1654
1655         musb->is_active = 0;
1656         musb_platform_try_idle(musb, 0);
1657
1658         status = device_register(&musb->g.dev);
1659         if (status != 0)
1660                 the_gadget = NULL;
1661         return status;
1662 }
1663
1664 void musb_gadget_cleanup(struct musb *musb)
1665 {
1666         if (musb != the_gadget)
1667                 return;
1668
1669         device_unregister(&musb->g.dev);
1670         the_gadget = NULL;
1671 }
1672
1673 /*
1674  * Register the gadget driver. Used by gadget drivers when
1675  * registering themselves with the controller.
1676  *
1677  * -EINVAL something went wrong (not driver)
1678  * -EBUSY another gadget is already using the controller
1679  * -ENOMEM no memeory to perform the operation
1680  *
1681  * @param driver the gadget driver
1682  * @return <0 if error, 0 if everything is fine
1683  */
1684 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1685 {
1686         int retval;
1687         unsigned long flags;
1688         struct musb *musb = the_gadget;
1689
1690         if (!driver
1691                         || driver->speed != USB_SPEED_HIGH
1692                         || !driver->bind
1693                         || !driver->setup)
1694                 return -EINVAL;
1695
1696         /* driver must be initialized to support peripheral mode */
1697         if (!musb || !(musb->board_mode == MUSB_OTG
1698                                 || musb->board_mode != MUSB_OTG)) {
1699                 DBG(1,"%s, no dev??\n", __FUNCTION__);
1700                 return -ENODEV;
1701         }
1702
1703         DBG(3, "registering driver %s\n", driver->function);
1704         spin_lock_irqsave(&musb->Lock, flags);
1705
1706         if (musb->pGadgetDriver) {
1707                 DBG(1, "%s is already bound to %s\n",
1708                                 musb_driver_name,
1709                                 musb->pGadgetDriver->driver.name);
1710                 retval = -EBUSY;
1711         } else {
1712                 musb->pGadgetDriver = driver;
1713                 musb->g.dev.driver = &driver->driver;
1714                 driver->driver.bus = NULL;
1715                 musb->softconnect = 1;
1716                 retval = 0;
1717         }
1718
1719         spin_unlock_irqrestore(&musb->Lock, flags);
1720
1721         if (retval == 0)
1722                 retval = driver->bind(&musb->g);
1723         if (retval != 0) {
1724                 DBG(3, "bind to driver %s failed --> %d\n",
1725                         driver->driver.name, retval);
1726                 musb->pGadgetDriver = NULL;
1727                 musb->g.dev.driver = NULL;
1728         }
1729
1730         /* start peripheral and/or OTG engines */
1731         if (retval == 0) {
1732                 spin_lock_irqsave(&musb->Lock, flags);
1733
1734                 /* REVISIT always use otg_set_peripheral(), handling
1735                  * issues including the root hub one below ...
1736                  */
1737                 musb->xceiv.gadget = &musb->g;
1738                 musb->xceiv.state = OTG_STATE_B_IDLE;
1739                 musb->is_active = 1;
1740
1741                 /* FIXME this ignores the softconnect flag.  Drivers are
1742                  * allowed hold the peripheral inactive until for example
1743                  * userspace hooks up printer hardware or DSP codecs, so
1744                  * hosts only see fully functional devices.
1745                  */
1746
1747                 if (!is_otg_enabled(musb))
1748                         musb_start(musb);
1749
1750                 spin_unlock_irqrestore(&musb->Lock, flags);
1751
1752                 if (is_otg_enabled(musb)) {
1753                         DBG(3, "OTG startup...\n");
1754
1755                         /* REVISIT:  funcall to other code, which also
1756                          * handles power budgeting ... this way also
1757                          * ensures HdrcStart is indirectly called.
1758                          */
1759                         retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1760                         if (retval < 0) {
1761                                 DBG(1, "add_hcd failed, %d\n", retval);
1762                                 spin_lock_irqsave(&musb->Lock, flags);
1763                                 musb->xceiv.gadget = NULL;
1764                                 musb->xceiv.state = OTG_STATE_UNDEFINED;
1765                                 musb->pGadgetDriver = NULL;
1766                                 musb->g.dev.driver = NULL;
1767                                 spin_unlock_irqrestore(&musb->Lock, flags);
1768                         }
1769                 }
1770         }
1771
1772         return retval;
1773 }
1774 EXPORT_SYMBOL(usb_gadget_register_driver);
1775
1776 static void
1777 stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1778 {
1779         int                     i;
1780         struct musb_hw_ep       *hw_ep;
1781
1782         /* don't disconnect if it's not connected */
1783         if (musb->g.speed == USB_SPEED_UNKNOWN)
1784                 driver = NULL;
1785         else
1786                 musb->g.speed = USB_SPEED_UNKNOWN;
1787
1788         /* deactivate the hardware */
1789         if (musb->softconnect) {
1790                 musb->softconnect = 0;
1791                 musb_pullup(musb, 0);
1792         }
1793         musb_stop(musb);
1794
1795         /* killing any outstanding requests will quiesce the driver;
1796          * then report disconnect
1797          */
1798         if (driver) {
1799                 for (i = 0, hw_ep = musb->aLocalEnd;
1800                                 i < musb->bEndCount;
1801                                 i++, hw_ep++) {
1802                         MGC_SelectEnd(musb->pRegs, i);
1803                         if (hw_ep->bIsSharedFifo /* || !bEnd */) {
1804                                 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1805                         } else {
1806                                 if (hw_ep->wMaxPacketSizeTx)
1807                                         nuke(&hw_ep->ep_in, -ESHUTDOWN);
1808                                 if (hw_ep->wMaxPacketSizeRx)
1809                                         nuke(&hw_ep->ep_out, -ESHUTDOWN);
1810                         }
1811                 }
1812
1813                 spin_unlock(&musb->Lock);
1814                 driver->disconnect (&musb->g);
1815                 spin_lock(&musb->Lock);
1816         }
1817 }
1818
1819 /*
1820  * Unregister the gadget driver. Used by gadget drivers when
1821  * unregistering themselves from the controller.
1822  *
1823  * @param driver the gadget driver to unregister
1824  */
1825 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1826 {
1827         unsigned long   flags;
1828         int             retval = 0;
1829         struct musb     *musb = the_gadget;
1830
1831         if (!driver || !driver->unbind || !musb)
1832                 return -EINVAL;
1833
1834         /* REVISIT always use otg_set_peripheral() here too;
1835          * this needs to shut down the OTG engine.
1836          */
1837
1838         spin_lock_irqsave(&musb->Lock, flags);
1839
1840 #ifdef  CONFIG_USB_MUSB_OTG
1841         musb_hnp_stop(musb);
1842 #endif
1843
1844         if (musb->pGadgetDriver == driver) {
1845                 musb->xceiv.state = OTG_STATE_UNDEFINED;
1846                 stop_activity(musb, driver);
1847
1848                 DBG(3, "unregistering driver %s\n", driver->function);
1849                 spin_unlock_irqrestore(&musb->Lock, flags);
1850                 driver->unbind(&musb->g);
1851                 spin_lock_irqsave(&musb->Lock, flags);
1852
1853                 musb->pGadgetDriver = NULL;
1854                 musb->g.dev.driver = NULL;
1855
1856                 musb->is_active = 0;
1857                 musb_platform_try_idle(musb, 0);
1858         } else
1859                 retval = -EINVAL;
1860         spin_unlock_irqrestore(&musb->Lock, flags);
1861
1862         if (is_otg_enabled(musb) && retval == 0) {
1863                 usb_remove_hcd(musb_to_hcd(musb));
1864                 /* FIXME we need to be able to register another
1865                  * gadget driver here and have everything work;
1866                  * that currently misbehaves.
1867                  */
1868         }
1869
1870         return retval;
1871 }
1872 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1873
1874
1875 /***********************************************************************/
1876
1877 /* lifecycle operations called through plat_uds.c */
1878
1879 void musb_g_resume(struct musb *musb)
1880 {
1881         musb->is_suspended = 0;
1882         switch (musb->xceiv.state) {
1883         case OTG_STATE_B_IDLE:
1884                 break;
1885         case OTG_STATE_B_WAIT_ACON:
1886         case OTG_STATE_B_PERIPHERAL:
1887                 musb->is_active = 1;
1888                 if (musb->pGadgetDriver && musb->pGadgetDriver->resume) {
1889                         spin_unlock(&musb->Lock);
1890                         musb->pGadgetDriver->resume(&musb->g);
1891                         spin_lock(&musb->Lock);
1892                 }
1893                 break;
1894         default:
1895                 WARN("unhandled RESUME transition (%s)\n",
1896                                 otg_state_string(musb));
1897         }
1898 }
1899
1900 /* called when SOF packets stop for 3+ msec */
1901 void musb_g_suspend(struct musb *musb)
1902 {
1903         u8      devctl;
1904
1905         devctl = musb_readb(musb->pRegs, MGC_O_HDRC_DEVCTL);
1906         DBG(3, "devctl %02x\n", devctl);
1907
1908         switch (musb->xceiv.state) {
1909         case OTG_STATE_B_IDLE:
1910                 if ((devctl & MGC_M_DEVCTL_VBUS) == MGC_M_DEVCTL_VBUS)
1911                         musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
1912                 break;
1913         case OTG_STATE_B_PERIPHERAL:
1914                 musb->is_suspended = 1;
1915                 if (musb->pGadgetDriver && musb->pGadgetDriver->suspend) {
1916                         spin_unlock(&musb->Lock);
1917                         musb->pGadgetDriver->suspend(&musb->g);
1918                         spin_lock(&musb->Lock);
1919                 }
1920                 break;
1921         default:
1922                 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1923                  * A_PERIPHERAL may need care too
1924                  */
1925                 WARN("unhandled SUSPEND transition (%s)\n",
1926                                 otg_state_string(musb));
1927         }
1928 }
1929
1930 /* Called during SRP. Caller must hold lock */
1931 void musb_g_wakeup(struct musb *musb)
1932 {
1933         musb_gadget_wakeup(&musb->g);
1934 }
1935
1936 /* called when VBUS drops below session threshold, and in other cases */
1937 void musb_g_disconnect(struct musb *musb)
1938 {
1939         void __iomem    *mregs = musb->pRegs;
1940         u8      devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
1941
1942         DBG(3, "devctl %02x\n", devctl);
1943
1944         /* clear HR */
1945         musb_writeb(mregs, MGC_O_HDRC_DEVCTL, devctl & MGC_M_DEVCTL_SESSION);
1946
1947         /* don't draw vbus until new b-default session */
1948         (void) musb_gadget_vbus_draw(&musb->g, 0);
1949
1950         musb->g.speed = USB_SPEED_UNKNOWN;
1951         if (musb->pGadgetDriver && musb->pGadgetDriver->disconnect) {
1952                 spin_unlock(&musb->Lock);
1953                 musb->pGadgetDriver->disconnect(&musb->g);
1954                 spin_lock(&musb->Lock);
1955         }
1956
1957         switch (musb->xceiv.state) {
1958         default:
1959 #ifdef  CONFIG_USB_MUSB_OTG
1960                 musb->xceiv.state = OTG_STATE_A_IDLE;
1961                 break;
1962         case OTG_STATE_A_PERIPHERAL:
1963                 musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
1964                 break;
1965         case OTG_STATE_B_WAIT_ACON:
1966         case OTG_STATE_B_HOST:
1967 #endif
1968         case OTG_STATE_B_PERIPHERAL:
1969                 musb->xceiv.state = OTG_STATE_B_IDLE;
1970                 break;
1971         case OTG_STATE_B_SRP_INIT:
1972                 break;
1973         }
1974
1975         musb->is_active = 0;
1976 }
1977
1978 void musb_g_reset(struct musb *musb)
1979 __releases(musb->Lock)
1980 __acquires(musb->Lock)
1981 {
1982         void __iomem    *pBase = musb->pRegs;
1983         u8              devctl = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
1984         u8              power;
1985
1986         DBG(3, "<== %s addr=%x driver '%s'\n",
1987                         (devctl & MGC_M_DEVCTL_BDEVICE)
1988                                 ? "B-Device" : "A-Device",
1989                         musb_readb(pBase, MGC_O_HDRC_FADDR),
1990                         musb->pGadgetDriver
1991                                 ? musb->pGadgetDriver->driver.name
1992                                 : NULL
1993                         );
1994
1995         /* report disconnect, if we didn't already (flushing EP state) */
1996         if (musb->g.speed != USB_SPEED_UNKNOWN)
1997                 musb_g_disconnect(musb);
1998
1999         /* clear HR */
2000         else if (devctl & MGC_M_DEVCTL_HR)
2001                 musb_writeb(pBase, MGC_O_HDRC_DEVCTL, MGC_M_DEVCTL_SESSION);
2002
2003
2004         /* what speed did we negotiate? */
2005         power = musb_readb(pBase, MGC_O_HDRC_POWER);
2006         musb->g.speed = (power & MGC_M_POWER_HSMODE)
2007                         ? USB_SPEED_HIGH : USB_SPEED_FULL;
2008
2009         /* start in USB_STATE_DEFAULT */
2010         musb->is_active = 1;
2011         musb->is_suspended = 0;
2012         MUSB_DEV_MODE(musb);
2013         musb->bAddress = 0;
2014         musb->ep0_state = MGC_END0_STAGE_SETUP;
2015
2016         musb->may_wakeup = 0;
2017         musb->g.b_hnp_enable = 0;
2018         musb->g.a_alt_hnp_support = 0;
2019         musb->g.a_hnp_support = 0;
2020
2021         /* Normal reset, as B-Device;
2022          * or else after HNP, as A-Device
2023          */
2024         if (devctl & MGC_M_DEVCTL_BDEVICE) {
2025                 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
2026                 musb->g.is_a_peripheral = 0;
2027         } else if (is_otg_enabled(musb)) {
2028                 musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
2029                 musb->g.is_a_peripheral = 1;
2030         } else
2031                 WARN_ON(1);
2032
2033         /* start with default limits on VBUS power draw */
2034         (void) musb_gadget_vbus_draw(&musb->g,
2035                         is_otg_enabled(musb) ? 8 : 100);
2036 }