1 /******************************************************************
2 * Copyright 2005 Mentor Graphics Corporation
3 * Copyright (C) 2005-2006 by Texas Instruments
5 * This file is part of the Inventra Controller Driver for Linux.
7 * The Inventra Controller Driver for Linux is free software; you
8 * can redistribute it and/or modify it under the terms of the GNU
9 * General Public License version 2 as published by the Free Software
12 * The Inventra Controller Driver for Linux is distributed in
13 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
14 * without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
16 * License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with The Inventra Controller Driver for Linux ; if not,
20 * write to the Free Software Foundation, Inc., 59 Temple Place,
21 * Suite 330, Boston, MA 02111-1307 USA
23 * ANY DOWNLOAD, USE, REPRODUCTION, MODIFICATION OR DISTRIBUTION
24 * OF THIS DRIVER INDICATES YOUR COMPLETE AND UNCONDITIONAL ACCEPTANCE
25 * OF THOSE TERMS.THIS DRIVER IS PROVIDED "AS IS" AND MENTOR GRAPHICS
26 * MAKES NO WARRANTIES, EXPRESS OR IMPLIED, RELATED TO THIS DRIVER.
27 * MENTOR GRAPHICS SPECIFICALLY DISCLAIMS ALL IMPLIED WARRANTIES
28 * OF MERCHANTABILITY; FITNESS FOR A PARTICULAR PURPOSE AND
29 * NON-INFRINGEMENT. MENTOR GRAPHICS DOES NOT PROVIDE SUPPORT
30 * SERVICES OR UPDATES FOR THIS DRIVER, EVEN IF YOU ARE A MENTOR
31 * GRAPHICS SUPPORT CUSTOMER.
32 ******************************************************************/
34 #include <linux/kernel.h>
35 #include <linux/list.h>
36 #include <linux/timer.h>
37 #include <linux/module.h>
38 #include <linux/smp.h>
39 #include <linux/spinlock.h>
40 #include <linux/delay.h>
41 #include <linux/moduleparam.h>
42 #include <linux/stat.h>
43 #include <linux/dma-mapping.h>
48 /* MUSB PERIPHERAL status 3-mar:
50 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
53 * + remote wakeup to Linux hosts work, but saw USBCV failures;
54 * in one test run (operator error?)
55 * + endpoint halt tests -- in both usbtest and usbcv -- seem
56 * to break when dma is enabled ... is something wrongly
59 * - Mass storage behaved ok when last tested. Network traffic patterns
60 * (with lots of short transfers etc) need retesting; they turn up the
61 * worst cases of the DMA, since short packets are typical but are not
65 * + both pio and dma behave in with network and g_zero tests
66 * + no cppi throughput issues other than no-hw-queueing
67 * + failed with FLAT_REG (DaVinci)
68 * + seems to behave with double buffering, PIO -and- CPPI
69 * + with gadgetfs + AIO, requests got lost?
72 * + both pio and dma behave in with network and g_zero tests
73 * + dma is slow in typical case (short_not_ok is clear)
74 * + double buffering ok with PIO
75 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
76 * + request lossage observed with gadgetfs
78 * - ISO not tested ... might work, but only weakly isochronous
80 * - Gadget driver disabling of softconnect during bind() is ignored; so
81 * drivers can't hold off host requests until userspace is ready.
82 * (Workaround: they can turn it off later.)
84 * - PORTABILITY (assumes PIO works):
85 * + DaVinci, basically works with cppi dma
86 * + OMAP 2430, ditto with mentor dma
87 * + TUSB 6010, platform-specific dma in the works
90 /**************************************************************************
92 **************************************************************************/
95 * Immediately complete a request.
97 * @param pRequest the request to complete
98 * @param status the status to complete the request with
99 * Context: controller locked, IRQs blocked.
101 void musb_g_giveback(
103 struct usb_request *pRequest,
105 __releases(ep->musb->Lock)
106 __acquires(ep->musb->Lock)
108 struct musb_request *req;
112 req = to_musb_request(pRequest);
114 list_del(&pRequest->list);
115 if (req->request.status == -EINPROGRESS)
116 req->request.status = status;
120 spin_unlock(&musb->Lock);
121 if (is_dma_capable()) {
123 dma_unmap_single(musb->controller,
129 req->request.dma = DMA_ADDR_INVALID;
131 } else if (req->request.dma != DMA_ADDR_INVALID)
132 dma_sync_single_for_cpu(musb->controller,
139 if (pRequest->status == 0)
140 DBG(5, "%s done request %p, %d/%d\n",
141 ep->end_point.name, pRequest,
142 req->request.actual, req->request.length);
144 DBG(2, "%s request %p, %d/%d fault %d\n",
145 ep->end_point.name, pRequest,
146 req->request.actual, req->request.length,
148 req->request.complete(&req->ep->end_point, &req->request);
149 spin_lock(&musb->Lock);
153 /* ----------------------------------------------------------------------- */
156 * Abort requests queued to an endpoint using the status. Synchronous.
157 * caller locked controller and blocked irqs, and selected this ep.
159 static void nuke(struct musb_ep *ep, const int status)
161 struct musb_request *req = NULL;
162 void __iomem *epio = ep->pThis->aLocalEnd[ep->bEndNumber].regs;
166 if (is_dma_capable() && ep->dma) {
167 struct dma_controller *c = ep->pThis->pDmaController;
170 musb_writew(epio, MGC_O_HDRC_TXCSR,
171 0 | MGC_M_TXCSR_FLUSHFIFO);
172 musb_writew(epio, MGC_O_HDRC_TXCSR,
173 0 | MGC_M_TXCSR_FLUSHFIFO);
175 musb_writew(epio, MGC_O_HDRC_RXCSR,
176 0 | MGC_M_RXCSR_FLUSHFIFO);
177 musb_writew(epio, MGC_O_HDRC_RXCSR,
178 0 | MGC_M_RXCSR_FLUSHFIFO);
181 value = c->channel_abort(ep->dma);
182 DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
183 c->channel_release(ep->dma);
187 while (!list_empty(&(ep->req_list))) {
188 req = container_of(ep->req_list.next, struct musb_request,
190 musb_g_giveback(ep, &req->request, status);
194 /**************************************************************************
195 * TX/IN and RX/OUT Data transfers
196 **************************************************************************/
199 * This assumes the separate CPPI engine is responding to DMA requests
200 * from the usb core ... sequenced a bit differently from mentor dma.
203 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
205 if (can_bulk_split(musb, ep->type))
206 return ep->hw_ep->wMaxPacketSizeTx;
208 return ep->wPacketSize;
212 #ifdef CONFIG_USB_INVENTRA_DMA
214 /* Peripheral tx (IN) using Mentor DMA works as follows:
215 Only mode 0 is used for transfers <= wPktSize,
216 mode 1 is used for larger transfers,
218 One of the following happens:
219 - Host sends IN token which causes an endpoint interrupt
221 -> if DMA is currently busy, exit.
222 -> if queue is non-empty, txstate().
224 - Request is queued by the gadget driver.
225 -> if queue was previously empty, txstate()
230 | (data is transferred to the FIFO, then sent out when
231 | IN token(s) are recd from Host.
232 | -> DMA interrupt on completion
234 | -> stop DMA, ~DmaEenab,
235 | -> set TxPktRdy for last short pkt or zlp
236 | -> Complete Request
237 | -> Continue next request (call txstate)
238 |___________________________________|
240 * Non-Mentor DMA engines can of course work differently, such as by
241 * upleveling from irq-per-packet to irq-per-buffer.
247 * An endpoint is transmitting data. This can be called either from
248 * the IRQ routine or from ep.queue() to kickstart a request on an
251 * Context: controller locked, IRQs blocked, endpoint selected
253 static void txstate(struct musb *musb, struct musb_request *req)
256 struct musb_ep *pEnd;
257 void __iomem *epio = musb->aLocalEnd[bEnd].regs;
258 struct usb_request *pRequest;
259 u16 wFifoCount = 0, wCsrVal;
264 /* we shouldn't get here while DMA is active ... but we do ... */
265 if (dma_channel_status(pEnd->dma) == MGC_DMA_STATUS_BUSY) {
266 DBG(4, "dma pending...\n");
270 /* read TXCSR before */
271 wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
273 pRequest = &req->request;
274 wFifoCount = min(max_ep_writesize(musb, pEnd),
275 (int)(pRequest->length - pRequest->actual));
277 if (wCsrVal & MGC_M_TXCSR_TXPKTRDY) {
278 DBG(5, "%s old packet still ready , txcsr %03x\n",
279 pEnd->end_point.name, wCsrVal);
283 if (wCsrVal & MGC_M_TXCSR_P_SENDSTALL) {
284 DBG(5, "%s stalling, txcsr %03x\n",
285 pEnd->end_point.name, wCsrVal);
289 DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
290 bEnd, pEnd->wPacketSize, wFifoCount,
293 #ifndef CONFIG_USB_INVENTRA_FIFO
294 if (is_dma_capable() && pEnd->dma) {
295 struct dma_controller *c = musb->pDmaController;
297 use_dma = (pRequest->dma != DMA_ADDR_INVALID);
299 /* MGC_M_TXCSR_P_ISO is still set correctly */
301 #ifdef CONFIG_USB_INVENTRA_DMA
305 /* setup DMA, then program endpoint CSR */
306 request_size = min(pRequest->length,
307 pEnd->dma->dwMaxLength);
308 if (request_size <= pEnd->wPacketSize)
309 pEnd->dma->bDesiredMode = 0;
311 pEnd->dma->bDesiredMode = 1;
313 use_dma = use_dma && c->channel_program(
314 pEnd->dma, pEnd->wPacketSize,
315 pEnd->dma->bDesiredMode,
316 pRequest->dma, request_size);
318 if (pEnd->dma->bDesiredMode == 0) {
319 /* ASSERT: DMAENAB is clear */
320 wCsrVal &= ~(MGC_M_TXCSR_AUTOSET |
321 MGC_M_TXCSR_DMAMODE);
322 wCsrVal |= (MGC_M_TXCSR_DMAENAB |
324 // against programming guide
327 wCsrVal |= (MGC_M_TXCSR_AUTOSET
328 | MGC_M_TXCSR_DMAENAB
329 | MGC_M_TXCSR_DMAMODE
332 wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN;
333 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
337 #elif defined(CONFIG_USB_TI_CPPI_DMA)
338 /* program endpoint CSR first, then setup DMA */
339 wCsrVal &= ~(MGC_M_TXCSR_AUTOSET
340 | MGC_M_TXCSR_DMAMODE
341 | MGC_M_TXCSR_P_UNDERRUN
342 | MGC_M_TXCSR_TXPKTRDY);
343 wCsrVal |= MGC_M_TXCSR_MODE | MGC_M_TXCSR_DMAENAB;
344 musb_writew(epio, MGC_O_HDRC_TXCSR,
345 (MGC_M_TXCSR_P_WZC_BITS & ~MGC_M_TXCSR_P_UNDERRUN)
348 /* ensure writebuffer is empty */
349 wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
351 /* NOTE host side sets DMAENAB later than this; both are
352 * OK since the transfer dma glue (between CPPI and Mentor
353 * fifos) just tells CPPI it could start. Data only moves
354 * to the USB TX fifo when both fifos are ready.
357 /* "mode" is irrelevant here; handle terminating ZLPs like
358 * PIO does, since the hardware RNDIS mode seems unreliable
359 * except for the last-packet-is-already-short case.
361 use_dma = use_dma && c->channel_program(
362 pEnd->dma, pEnd->wPacketSize,
367 c->channel_release(pEnd->dma);
369 /* ASSERT: DMAENAB clear */
370 wCsrVal &= ~(MGC_M_TXCSR_DMAMODE | MGC_M_TXCSR_MODE);
371 /* invariant: prequest->buf is non-null */
373 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
374 use_dma = use_dma && c->channel_program(
375 pEnd->dma, pEnd->wPacketSize,
384 musb_write_fifo(pEnd->hw_ep, wFifoCount,
385 (u8 *) (pRequest->buf + pRequest->actual));
386 pRequest->actual += wFifoCount;
387 wCsrVal |= MGC_M_TXCSR_TXPKTRDY;
388 wCsrVal &= ~MGC_M_TXCSR_P_UNDERRUN;
389 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
392 /* host may already have the data when this message shows... */
393 DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
394 pEnd->end_point.name, use_dma ? "dma" : "pio",
395 pRequest->actual, pRequest->length,
396 musb_readw(epio, MGC_O_HDRC_TXCSR),
398 musb_readw(epio, MGC_O_HDRC_TXMAXP));
402 * FIFO state update (e.g. data ready).
403 * Called from IRQ, with controller locked.
405 void musb_g_tx(struct musb *musb, u8 bEnd)
408 struct usb_request *pRequest;
409 u8 __iomem *pBase = musb->pRegs;
410 struct musb_ep *pEnd = &musb->aLocalEnd[bEnd].ep_in;
411 void __iomem *epio = musb->aLocalEnd[bEnd].regs;
412 struct dma_channel *dma;
414 MGC_SelectEnd(pBase, bEnd);
415 pRequest = next_request(pEnd);
417 wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
418 DBG(4, "<== %s, txcsr %04x\n", pEnd->end_point.name, wCsrVal);
420 dma = is_dma_capable() ? pEnd->dma : NULL;
422 /* REVISIT for high bandwidth, MGC_M_TXCSR_P_INCOMPTX
423 * probably rates reporting as a host error
425 if (wCsrVal & MGC_M_TXCSR_P_SENTSTALL) {
426 wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
427 wCsrVal &= ~MGC_M_TXCSR_P_SENTSTALL;
428 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
429 if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
430 dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
431 musb->pDmaController->channel_abort(dma);
435 musb_g_giveback(pEnd, pRequest, -EPIPE);
440 if (wCsrVal & MGC_M_TXCSR_P_UNDERRUN) {
441 /* we NAKed, no big deal ... little reason to care */
442 wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
443 wCsrVal &= ~(MGC_M_TXCSR_P_UNDERRUN
444 | MGC_M_TXCSR_TXPKTRDY);
445 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
446 DBG(20, "underrun on ep%d, req %p\n", bEnd, pRequest);
449 if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
450 /* SHOULD NOT HAPPEN ... has with cppi though, after
451 * changing SENDSTALL (and other cases); harmless?
453 DBG(5, "%s dma still busy?\n", pEnd->end_point.name);
460 if (dma && (wCsrVal & MGC_M_TXCSR_DMAENAB)) {
462 wCsrVal |= MGC_M_TXCSR_P_WZC_BITS;
463 wCsrVal &= ~(MGC_M_TXCSR_DMAENAB
464 | MGC_M_TXCSR_P_UNDERRUN
465 | MGC_M_TXCSR_TXPKTRDY);
466 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsrVal);
467 /* ensure writebuffer is empty */
468 wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
469 pRequest->actual += pEnd->dma->dwActualLength;
470 DBG(4, "TXCSR%d %04x, dma off, "
473 pEnd->dma->dwActualLength,
477 if (is_dma || pRequest->actual == pRequest->length) {
479 /* First, maybe a terminating short packet.
480 * Some DMA engines might handle this by
488 #ifdef CONFIG_USB_INVENTRA_DMA
490 ((!dma->bDesiredMode) ||
492 (pEnd->wPacketSize - 1))))
495 /* on dma completion, fifo may not
496 * be available yet ...
498 if (wCsrVal & MGC_M_TXCSR_TXPKTRDY)
501 DBG(4, "sending zero pkt\n");
502 musb_writew(epio, MGC_O_HDRC_TXCSR,
504 | MGC_M_TXCSR_TXPKTRDY);
508 /* ... or if not, then complete it */
509 musb_g_giveback(pEnd, pRequest, 0);
511 /* kickstart next transfer if appropriate;
512 * the packet that just completed might not
513 * be transmitted for hours or days.
514 * REVISIT for double buffering...
515 * FIXME revisit for stalls too...
517 MGC_SelectEnd(pBase, bEnd);
518 wCsrVal = musb_readw(epio, MGC_O_HDRC_TXCSR);
519 if (wCsrVal & MGC_M_TXCSR_FIFONOTEMPTY)
521 pRequest = pEnd->desc
525 DBG(4, "%s idle now\n",
526 pEnd->end_point.name);
531 txstate(musb, to_musb_request(pRequest));
537 /* ------------------------------------------------------------ */
539 #ifdef CONFIG_USB_INVENTRA_DMA
541 /* Peripheral rx (OUT) using Mentor DMA works as follows:
542 - Only mode 0 is used.
544 - Request is queued by the gadget class driver.
545 -> if queue was previously empty, rxstate()
547 - Host sends OUT token which causes an endpoint interrupt
549 | -> if request queued, call rxstate
551 | | -> DMA interrupt on completion
555 | | -> if data recd = max expected
556 | | by the request, or host
557 | | sent a short packet,
558 | | complete the request,
559 | | and start the next one.
560 | |_____________________________________|
561 | else just wait for the host
562 | to send the next OUT token.
563 |__________________________________________________|
565 * Non-Mentor DMA engines can of course work differently.
571 * Context: controller locked, IRQs blocked, endpoint selected
573 static void rxstate(struct musb *musb, struct musb_request *req)
576 const u8 bEnd = req->bEnd;
577 struct usb_request *pRequest = &req->request;
578 struct musb_ep *pEnd = &musb->aLocalEnd[bEnd].ep_out;
579 void __iomem *epio = musb->aLocalEnd[bEnd].regs;
581 u16 wCount = pEnd->wPacketSize;
583 wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
585 if (is_cppi_enabled() && pEnd->dma) {
586 struct dma_controller *c = musb->pDmaController;
587 struct dma_channel *channel = pEnd->dma;
589 /* NOTE: CPPI won't actually stop advancing the DMA
590 * queue after short packet transfers, so this is almost
591 * always going to run as IRQ-per-packet DMA so that
592 * faults will be handled correctly.
594 if (c->channel_program(channel,
596 !pRequest->short_not_ok,
597 pRequest->dma + pRequest->actual,
598 pRequest->length - pRequest->actual)) {
600 /* make sure that if an rxpkt arrived after the irq,
601 * the cppi engine will be ready to take it as soon
604 wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR
605 | MGC_M_RXCSR_DMAMODE);
606 wCsrVal |= MGC_M_RXCSR_DMAENAB | MGC_M_RXCSR_P_WZC_BITS;
607 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
612 if (wCsrVal & MGC_M_RXCSR_RXPKTRDY) {
613 wCount = musb_readw(epio, MGC_O_HDRC_RXCOUNT);
614 if (pRequest->actual < pRequest->length) {
615 #ifdef CONFIG_USB_INVENTRA_DMA
616 if (is_dma_capable() && pEnd->dma) {
617 struct dma_controller *c;
618 struct dma_channel *channel;
621 c = musb->pDmaController;
624 /* We use DMA Req mode 0 in RxCsr, and DMA controller operates in
625 * mode 0 only. So we do not get endpoint interrupts due to DMA
626 * completion. We only get interrupts from DMA controller.
628 * We could operate in DMA mode 1 if we knew the size of the tranfer
629 * in advance. For mass storage class, request->length = what the host
630 * sends, so that'd work. But for pretty much everything else,
631 * request->length is routinely more than what the host sends. For
632 * most these gadgets, end of is signified either by a short packet,
633 * or filling the last byte of the buffer. (Sending extra data in
634 * that last pckate should trigger an overflow fault.) But in mode 1,
635 * we don't get DMA completion interrrupt for short packets.
637 * Theoretically, we could enable DMAReq interrupt (RxCsr_DMAMODE = 1),
638 * to get endpoint interrupt on every DMA req, but that didn't seem
641 * REVISIT an updated g_file_storage can set req->short_not_ok, which
642 * then becomes usable as a runtime "use mode 1" hint...
645 wCsrVal |= MGC_M_RXCSR_DMAENAB;
647 wCsrVal |= MGC_M_RXCSR_AUTOCLEAR;
648 // wCsrVal |= MGC_M_RXCSR_DMAMODE;
650 /* this special sequence (enabling and then
651 disabling MGC_M_RXCSR_DMAMODE) is required
652 to get DMAReq to activate
654 musb_writew(epio, MGC_O_HDRC_RXCSR,
655 wCsrVal | MGC_M_RXCSR_DMAMODE);
657 musb_writew(epio, MGC_O_HDRC_RXCSR,
660 if (pRequest->actual < pRequest->length) {
661 int transfer_size = 0;
663 transfer_size = min(pRequest->length,
664 channel->dwMaxLength);
666 transfer_size = wCount;
668 if (transfer_size <= pEnd->wPacketSize)
669 pEnd->dma->bDesiredMode = 0;
671 pEnd->dma->bDesiredMode = 1;
673 use_dma = c->channel_program(
676 channel->bDesiredMode,
685 #endif /* Mentor's USB */
687 wFifoCount = pRequest->length - pRequest->actual;
688 DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
689 pEnd->end_point.name,
693 wFifoCount = min(wCount, wFifoCount);
695 #ifdef CONFIG_USB_TUSB_OMAP_DMA
696 if (tusb_dma_omap() && pEnd->dma) {
697 struct dma_controller *c = musb->pDmaController;
698 struct dma_channel *channel = pEnd->dma;
699 u32 dma_addr = pRequest->dma + pRequest->actual;
702 ret = c->channel_program(channel,
704 channel->bDesiredMode,
712 musb_read_fifo(pEnd->hw_ep, wFifoCount, (u8 *)
713 (pRequest->buf + pRequest->actual));
714 pRequest->actual += wFifoCount;
716 /* REVISIT if we left anything in the fifo, flush
717 * it and report -EOVERFLOW
721 wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
722 wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY;
723 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
727 /* reach the end or short packet detected */
728 if (pRequest->actual == pRequest->length || wCount < pEnd->wPacketSize)
729 musb_g_giveback(pEnd, pRequest, 0);
733 * Data ready for a request; called from IRQ
735 void musb_g_rx(struct musb *musb, u8 bEnd)
738 struct usb_request *pRequest;
739 void __iomem *pBase = musb->pRegs;
740 struct musb_ep *pEnd = &musb->aLocalEnd[bEnd].ep_out;
741 void __iomem *epio = musb->aLocalEnd[bEnd].regs;
742 struct dma_channel *dma;
744 MGC_SelectEnd(pBase, bEnd);
746 pRequest = next_request(pEnd);
748 wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
749 dma = is_dma_capable() ? pEnd->dma : NULL;
751 DBG(4, "<== %s, rxcsr %04x%s %p\n", pEnd->end_point.name,
752 wCsrVal, dma ? " (dma)" : "", pRequest);
754 if (wCsrVal & MGC_M_RXCSR_P_SENTSTALL) {
755 if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
756 dma->bStatus = MGC_DMA_STATUS_CORE_ABORT;
757 (void) musb->pDmaController->channel_abort(dma);
758 pRequest->actual += pEnd->dma->dwActualLength;
761 wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
762 wCsrVal &= ~MGC_M_RXCSR_P_SENTSTALL;
763 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
766 musb_g_giveback(pEnd, pRequest, -EPIPE);
770 if (wCsrVal & MGC_M_RXCSR_P_OVERRUN) {
771 // wCsrVal |= MGC_M_RXCSR_P_WZC_BITS;
772 wCsrVal &= ~MGC_M_RXCSR_P_OVERRUN;
773 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
775 DBG(3, "%s iso overrun on %p\n", pEnd->name, pRequest);
776 if (pRequest && pRequest->status == -EINPROGRESS)
777 pRequest->status = -EOVERFLOW;
779 if (wCsrVal & MGC_M_RXCSR_INCOMPRX) {
780 /* REVISIT not necessarily an error */
781 DBG(4, "%s, incomprx\n", pEnd->end_point.name);
784 if (dma_channel_status(dma) == MGC_DMA_STATUS_BUSY) {
785 /* "should not happen"; likely RXPKTRDY pending for DMA */
786 DBG((wCsrVal & MGC_M_RXCSR_DMAENAB) ? 4 : 1,
787 "%s busy, csr %04x\n",
788 pEnd->end_point.name, wCsrVal);
792 if (dma && (wCsrVal & MGC_M_RXCSR_DMAENAB)) {
793 wCsrVal &= ~(MGC_M_RXCSR_AUTOCLEAR
794 | MGC_M_RXCSR_DMAENAB
795 | MGC_M_RXCSR_DMAMODE);
796 musb_writew(epio, MGC_O_HDRC_RXCSR,
797 MGC_M_RXCSR_P_WZC_BITS | wCsrVal);
799 pRequest->actual += pEnd->dma->dwActualLength;
801 DBG(4, "RXCSR%d %04x, dma off, %04x, len %Zd, req %p\n",
803 musb_readw(epio, MGC_O_HDRC_RXCSR),
804 pEnd->dma->dwActualLength, pRequest);
806 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
807 /* Autoclear doesn't clear RxPktRdy for short packets */
808 if ((dma->bDesiredMode == 0)
809 || (dma->dwActualLength
810 & (pEnd->wPacketSize - 1))) {
812 wCsrVal &= ~MGC_M_RXCSR_RXPKTRDY;
813 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsrVal);
816 /* incomplete, and not short? wait for next IN packet */
817 if ((pRequest->actual < pRequest->length)
818 && (pEnd->dma->dwActualLength
819 == pEnd->wPacketSize))
822 musb_g_giveback(pEnd, pRequest, 0);
824 pRequest = next_request(pEnd);
828 /* don't start more i/o till the stall clears */
829 MGC_SelectEnd(pBase, bEnd);
830 wCsrVal = musb_readw(epio, MGC_O_HDRC_RXCSR);
831 if (wCsrVal & MGC_M_RXCSR_P_SENDSTALL)
836 /* analyze request if the ep is hot */
838 rxstate(musb, to_musb_request(pRequest));
840 DBG(3, "packet waiting for %s%s request\n",
841 pEnd->desc ? "" : "inactive ",
842 pEnd->end_point.name);
848 /* ------------------------------------------------------------ */
850 static int musb_gadget_enable(struct usb_ep *ep,
851 const struct usb_endpoint_descriptor *desc)
854 struct musb_ep *pEnd;
855 struct musb_hw_ep *hw_ep;
862 int status = -EINVAL;
867 pEnd = to_musb_ep(ep);
872 bEnd = pEnd->bEndNumber;
874 spin_lock_irqsave(&musb->Lock, flags);
880 pEnd->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
882 /* check direction and (later) maxpacket size against endpoint */
883 if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != bEnd)
886 /* REVISIT this rules out high bandwidth periodic transfers */
887 tmp = le16_to_cpu(desc->wMaxPacketSize);
890 pEnd->wPacketSize = tmp;
892 /* enable the interrupts for the endpoint, set the endpoint
893 * packet size (or fail), set the mode, clear the fifo
895 MGC_SelectEnd(pBase, bEnd);
896 if (desc->bEndpointAddress & USB_DIR_IN) {
897 u16 wIntrTxE = musb_readw(pBase, MGC_O_HDRC_INTRTXE);
899 if (hw_ep->bIsSharedFifo)
903 if (tmp > hw_ep->wMaxPacketSizeTx)
906 wIntrTxE |= (1 << bEnd);
907 musb_writew(pBase, MGC_O_HDRC_INTRTXE, wIntrTxE);
909 /* REVISIT if can_bulk_split(), use by updating "tmp";
910 * likewise high bandwidth periodic tx
912 musb_writew(regs, MGC_O_HDRC_TXMAXP, tmp);
914 csr = MGC_M_TXCSR_MODE | MGC_M_TXCSR_CLRDATATOG;
915 if (musb_readw(regs, MGC_O_HDRC_TXCSR)
916 & MGC_M_TXCSR_FIFONOTEMPTY)
917 csr |= MGC_M_TXCSR_FLUSHFIFO;
918 if (pEnd->type == USB_ENDPOINT_XFER_ISOC)
919 csr |= MGC_M_TXCSR_P_ISO;
921 /* set twice in case of double buffering */
922 musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
923 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
924 musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
927 u16 wIntrRxE = musb_readw(pBase, MGC_O_HDRC_INTRRXE);
929 if (hw_ep->bIsSharedFifo)
933 if (tmp > hw_ep->wMaxPacketSizeRx)
936 wIntrRxE |= (1 << bEnd);
937 musb_writew(pBase, MGC_O_HDRC_INTRRXE, wIntrRxE);
939 /* REVISIT if can_bulk_combine() use by updating "tmp"
940 * likewise high bandwidth periodic rx
942 musb_writew(regs, MGC_O_HDRC_RXMAXP, tmp);
944 /* force shared fifo to OUT-only mode */
945 if (hw_ep->bIsSharedFifo) {
946 csr = musb_readw(regs, MGC_O_HDRC_TXCSR);
947 csr &= ~(MGC_M_TXCSR_MODE | MGC_M_TXCSR_TXPKTRDY);
948 musb_writew(regs, MGC_O_HDRC_TXCSR, csr);
951 csr = MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_CLRDATATOG;
952 if (pEnd->type == USB_ENDPOINT_XFER_ISOC)
953 csr |= MGC_M_RXCSR_P_ISO;
954 else if (pEnd->type == USB_ENDPOINT_XFER_INT)
955 csr |= MGC_M_RXCSR_DISNYET;
957 /* set twice in case of double buffering */
958 musb_writew(regs, MGC_O_HDRC_RXCSR, csr);
959 musb_writew(regs, MGC_O_HDRC_RXCSR, csr);
962 /* NOTE: all the I/O code _should_ work fine without DMA, in case
963 * for some reason you run out of channels here.
965 if (is_dma_capable() && musb->pDmaController) {
966 struct dma_controller *c = musb->pDmaController;
968 pEnd->dma = c->channel_alloc(c, hw_ep,
969 (desc->bEndpointAddress & USB_DIR_IN));
977 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
978 musb_driver_name, pEnd->end_point.name,
979 ({ char *s; switch (pEnd->type) {
980 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
981 case USB_ENDPOINT_XFER_INT: s = "int"; break;
982 default: s = "iso"; break;
984 pEnd->is_in ? "IN" : "OUT",
985 pEnd->dma ? "dma, " : "",
988 schedule_work(&musb->irq_work);
991 spin_unlock_irqrestore(&musb->Lock, flags);
996 * Disable an endpoint flushing all requests queued.
998 static int musb_gadget_disable(struct usb_ep *ep)
1000 unsigned long flags;
1003 struct musb_ep *pEnd;
1007 pEnd = to_musb_ep(ep);
1009 bEnd = pEnd->bEndNumber;
1010 epio = musb->aLocalEnd[bEnd].regs;
1012 spin_lock_irqsave(&musb->Lock, flags);
1013 MGC_SelectEnd(musb->pRegs, bEnd);
1015 /* zero the endpoint sizes */
1017 u16 wIntrTxE = musb_readw(musb->pRegs, MGC_O_HDRC_INTRTXE);
1018 wIntrTxE &= ~(1 << bEnd);
1019 musb_writew(musb->pRegs, MGC_O_HDRC_INTRTXE, wIntrTxE);
1020 musb_writew(epio, MGC_O_HDRC_TXMAXP, 0);
1022 u16 wIntrRxE = musb_readw(musb->pRegs, MGC_O_HDRC_INTRRXE);
1023 wIntrRxE &= ~(1 << bEnd);
1024 musb_writew(musb->pRegs, MGC_O_HDRC_INTRRXE, wIntrRxE);
1025 musb_writew(epio, MGC_O_HDRC_RXMAXP, 0);
1030 /* abort all pending DMA and requests */
1031 nuke(pEnd, -ESHUTDOWN);
1033 schedule_work(&musb->irq_work);
1035 spin_unlock_irqrestore(&(musb->Lock), flags);
1037 DBG(2, "%s\n", pEnd->end_point.name);
1043 * Allocate a request for an endpoint.
1044 * Reused by ep0 code.
1046 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1048 struct musb_ep *musb_ep = to_musb_ep(ep);
1049 struct musb_request *pRequest = NULL;
1051 pRequest = kzalloc(sizeof *pRequest, gfp_flags);
1053 INIT_LIST_HEAD(&pRequest->request.list);
1054 pRequest->request.dma = DMA_ADDR_INVALID;
1055 pRequest->bEnd = musb_ep->bEndNumber;
1056 pRequest->ep = musb_ep;
1059 return &pRequest->request;
1064 * Reused by ep0 code.
1066 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1068 kfree(to_musb_request(req));
1072 * dma-coherent memory allocation (for dma-capable endpoints)
1074 * NOTE: the dma_*_coherent() API calls suck; most implementations are
1075 * (a) page-oriented, so small buffers lose big, and (b) asymmetric with
1076 * respect to calls with irqs disabled: alloc is safe, free is not.
1078 static void *musb_gadget_alloc_buffer(struct usb_ep *ep, unsigned bytes,
1079 dma_addr_t * dma, gfp_t gfp_flags)
1081 struct musb_ep *musb_ep = to_musb_ep(ep);
1083 return dma_alloc_coherent(musb_ep->pThis->controller,
1084 bytes, dma, gfp_flags);
1087 static DEFINE_SPINLOCK(buflock);
1088 static LIST_HEAD(buffers);
1090 struct free_record {
1091 struct list_head list;
1097 static void do_free(unsigned long ignored)
1099 spin_lock_irq(&buflock);
1100 while (!list_empty(&buffers)) {
1101 struct free_record *buf;
1103 buf = list_entry(buffers.next, struct free_record, list);
1104 list_del(&buf->list);
1105 spin_unlock_irq(&buflock);
1107 dma_free_coherent(buf->dev, buf->bytes, buf, buf->dma);
1109 spin_lock_irq(&buflock);
1111 spin_unlock_irq(&buflock);
1114 static DECLARE_TASKLET(deferred_free, do_free, 0);
1116 static void musb_gadget_free_buffer(struct usb_ep *ep,
1117 void *address, dma_addr_t dma, unsigned bytes)
1119 struct musb_ep *musb_ep = to_musb_ep(ep);
1120 struct free_record *buf = address;
1121 unsigned long flags;
1123 buf->dev = musb_ep->pThis->controller;
1127 spin_lock_irqsave(&buflock, flags);
1128 list_add_tail(&buf->list, &buffers);
1129 tasklet_schedule(&deferred_free);
1130 spin_unlock_irqrestore(&buflock, flags);
1134 * Context: controller locked, IRQs blocked.
1136 static void musb_ep_restart(struct musb *musb, struct musb_request *req)
1138 DBG(3, "<== %s request %p len %u on hw_ep%d\n",
1139 req->bTx ? "TX/IN" : "RX/OUT",
1140 &req->request, req->request.length, req->bEnd);
1142 MGC_SelectEnd(musb->pRegs, req->bEnd);
1149 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1152 struct musb_ep *pEnd;
1153 struct musb_request *pRequest;
1156 unsigned long lockflags;
1163 pEnd = to_musb_ep(ep);
1166 pRequest = to_musb_request(req);
1167 pRequest->musb = musb;
1169 if (pRequest->ep != pEnd)
1172 DBG(4, "<== to %s request=%p\n", ep->name, req);
1174 /* request is mine now... */
1175 pRequest->request.actual = 0;
1176 pRequest->request.status = -EINPROGRESS;
1177 pRequest->bEnd = pEnd->bEndNumber;
1178 pRequest->bTx = pEnd->is_in;
1180 if (is_dma_capable() && pEnd->dma) {
1181 if (pRequest->request.dma == DMA_ADDR_INVALID) {
1182 pRequest->request.dma = dma_map_single(
1184 pRequest->request.buf,
1185 pRequest->request.length,
1189 pRequest->mapped = 1;
1191 dma_sync_single_for_device(musb->controller,
1192 pRequest->request.dma,
1193 pRequest->request.length,
1197 pRequest->mapped = 0;
1199 } else if (!req->buf) {
1202 pRequest->mapped = 0;
1204 spin_lock_irqsave(&musb->Lock, lockflags);
1206 /* don't queue if the ep is down */
1208 DBG(4, "req %p queued to %s while ep %s\n",
1209 req, ep->name, "disabled");
1210 status = -ESHUTDOWN;
1214 /* add pRequest to the list */
1215 list_add_tail(&(pRequest->request.list), &(pEnd->req_list));
1217 /* it this is the head of the queue, start i/o ... */
1218 if (!pEnd->busy && &pRequest->request.list == pEnd->req_list.next)
1219 musb_ep_restart(musb, pRequest);
1222 spin_unlock_irqrestore(&musb->Lock, lockflags);
1226 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *pRequest)
1228 struct musb_ep *pEnd = to_musb_ep(ep);
1229 struct usb_request *r;
1230 unsigned long flags;
1232 struct musb *musb = pEnd->pThis;
1234 if (!ep || !pRequest || to_musb_request(pRequest)->ep != pEnd)
1237 spin_lock_irqsave(&musb->Lock, flags);
1239 list_for_each_entry(r, &pEnd->req_list, list) {
1243 if (r != pRequest) {
1244 DBG(3, "request %p not queued to %s\n", pRequest, ep->name);
1249 /* if the hardware doesn't have the request, easy ... */
1250 if (pEnd->req_list.next != &pRequest->list || pEnd->busy)
1251 musb_g_giveback(pEnd, pRequest, -ECONNRESET);
1253 /* ... else abort the dma transfer ... */
1254 else if (is_dma_capable() && pEnd->dma) {
1255 struct dma_controller *c = musb->pDmaController;
1257 MGC_SelectEnd(musb->pRegs, pEnd->bEndNumber);
1258 if (c->channel_abort)
1259 status = c->channel_abort(pEnd->dma);
1263 musb_g_giveback(pEnd, pRequest, -ECONNRESET);
1265 /* NOTE: by sticking to easily tested hardware/driver states,
1266 * we leave counting of in-flight packets imprecise.
1268 musb_g_giveback(pEnd, pRequest, -ECONNRESET);
1272 spin_unlock_irqrestore(&musb->Lock, flags);
1277 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1278 * data but will queue requests.
1280 * exported to ep0 code
1282 int musb_gadget_set_halt(struct usb_ep *ep, int value)
1284 struct musb_ep *pEnd = to_musb_ep(ep);
1285 u8 bEnd = pEnd->bEndNumber;
1286 struct musb *musb = pEnd->pThis;
1287 void __iomem *epio = musb->aLocalEnd[bEnd].regs;
1288 void __iomem *pBase;
1289 unsigned long flags;
1291 struct musb_request *pRequest = NULL;
1296 pBase = musb->pRegs;
1298 spin_lock_irqsave(&musb->Lock, flags);
1300 if ((USB_ENDPOINT_XFER_ISOC == pEnd->type)) {
1305 MGC_SelectEnd(pBase, bEnd);
1307 /* cannot portably stall with non-empty FIFO */
1308 pRequest = to_musb_request(next_request(pEnd));
1309 if (value && pEnd->is_in) {
1310 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1311 if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY) {
1312 DBG(3, "%s fifo busy, cannot halt\n", ep->name);
1313 spin_unlock_irqrestore(&musb->Lock, flags);
1319 /* set/clear the stall and toggle bits */
1320 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1322 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1323 if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY)
1324 wCsr |= MGC_M_TXCSR_FLUSHFIFO;
1325 wCsr |= MGC_M_TXCSR_P_WZC_BITS
1326 | MGC_M_TXCSR_CLRDATATOG;
1328 wCsr |= MGC_M_TXCSR_P_SENDSTALL;
1330 wCsr &= ~(MGC_M_TXCSR_P_SENDSTALL
1331 | MGC_M_TXCSR_P_SENTSTALL);
1332 wCsr &= ~MGC_M_TXCSR_TXPKTRDY;
1333 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
1335 wCsr = musb_readw(epio, MGC_O_HDRC_RXCSR);
1336 wCsr |= MGC_M_RXCSR_P_WZC_BITS
1337 | MGC_M_RXCSR_FLUSHFIFO
1338 | MGC_M_RXCSR_CLRDATATOG;
1340 wCsr |= MGC_M_RXCSR_P_SENDSTALL;
1342 wCsr &= ~(MGC_M_RXCSR_P_SENDSTALL
1343 | MGC_M_RXCSR_P_SENTSTALL);
1344 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
1349 /* maybe start the first request in the queue */
1350 if (!pEnd->busy && !value && pRequest) {
1351 DBG(3, "restarting the request\n");
1352 musb_ep_restart(musb, pRequest);
1355 spin_unlock_irqrestore(&musb->Lock, flags);
1359 static int musb_gadget_fifo_status(struct usb_ep *ep)
1361 struct musb_ep *musb_ep = to_musb_ep(ep);
1362 void __iomem *epio = musb_ep->hw_ep->regs;
1363 int retval = -EINVAL;
1365 if (musb_ep->desc && !musb_ep->is_in) {
1366 struct musb *musb = musb_ep->pThis;
1367 int bEnd = musb_ep->bEndNumber;
1368 void __iomem *mbase = musb->pRegs;
1369 unsigned long flags;
1371 spin_lock_irqsave(&musb->Lock, flags);
1373 MGC_SelectEnd(mbase, bEnd);
1374 /* FIXME return zero unless RXPKTRDY is set */
1375 retval = musb_readw(epio, MGC_O_HDRC_RXCOUNT);
1377 spin_unlock_irqrestore(&musb->Lock, flags);
1382 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1384 struct musb_ep *musb_ep = to_musb_ep(ep);
1385 struct musb *musb = musb_ep->pThis;
1386 u8 nEnd = musb_ep->bEndNumber;
1387 void __iomem *epio = musb->aLocalEnd[nEnd].regs;
1388 void __iomem *mbase;
1389 unsigned long flags;
1392 mbase = musb->pRegs;
1394 spin_lock_irqsave(&musb->Lock, flags);
1395 MGC_SelectEnd(mbase, (u8) nEnd);
1397 /* disable interrupts */
1398 wIntrTxE = musb_readw(mbase, MGC_O_HDRC_INTRTXE);
1399 musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE & ~(1 << nEnd));
1401 if (musb_ep->is_in) {
1402 wCsr = musb_readw(epio, MGC_O_HDRC_TXCSR);
1403 if (wCsr & MGC_M_TXCSR_FIFONOTEMPTY) {
1404 wCsr |= MGC_M_TXCSR_FLUSHFIFO | MGC_M_TXCSR_P_WZC_BITS;
1405 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
1406 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1407 musb_writew(epio, MGC_O_HDRC_TXCSR, wCsr);
1410 wCsr = musb_readw(epio, MGC_O_HDRC_RXCSR);
1411 wCsr |= MGC_M_RXCSR_FLUSHFIFO | MGC_M_RXCSR_P_WZC_BITS;
1412 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
1413 musb_writew(epio, MGC_O_HDRC_RXCSR, wCsr);
1416 /* re-enable interrupt */
1417 musb_writew(mbase, MGC_O_HDRC_INTRTXE, wIntrTxE);
1418 spin_unlock_irqrestore(&musb->Lock, flags);
1421 static const struct usb_ep_ops musb_ep_ops = {
1422 .enable = musb_gadget_enable,
1423 .disable = musb_gadget_disable,
1424 .alloc_request = musb_alloc_request,
1425 .free_request = musb_free_request,
1426 .alloc_buffer = musb_gadget_alloc_buffer,
1427 .free_buffer = musb_gadget_free_buffer,
1428 .queue = musb_gadget_queue,
1429 .dequeue = musb_gadget_dequeue,
1430 .set_halt = musb_gadget_set_halt,
1431 .fifo_status = musb_gadget_fifo_status,
1432 .fifo_flush = musb_gadget_fifo_flush
1435 /***********************************************************************/
1437 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1439 struct musb *musb = gadget_to_musb(gadget);
1441 return (int)musb_readw(musb->pRegs, MGC_O_HDRC_FRAME);
1444 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1446 struct musb *musb = gadget_to_musb(gadget);
1447 void __iomem *mregs = musb->pRegs;
1448 unsigned long flags;
1449 int status = -EINVAL;
1452 spin_lock_irqsave(&musb->Lock, flags);
1454 switch (musb->xceiv.state) {
1455 case OTG_STATE_B_PERIPHERAL:
1456 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1457 * that's part of the standard usb 1.1 state machine, and
1458 * doesn't affect OTG transitions.
1460 if (musb->may_wakeup && musb->is_suspended)
1463 case OTG_STATE_B_IDLE:
1464 /* Start SRP ... OTG not required. */
1465 devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
1466 devctl |= MGC_M_DEVCTL_SESSION;
1467 musb_writeb(mregs, MGC_O_HDRC_DEVCTL, devctl);
1477 power = musb_readb(mregs, MGC_O_HDRC_POWER);
1478 power |= MGC_M_POWER_RESUME;
1479 musb_writeb(mregs, MGC_O_HDRC_POWER, power);
1480 DBG(2, "issue wakeup\n");
1482 /* FIXME do this next chunk in a timer callback, no udelay */
1485 power = musb_readb(mregs, MGC_O_HDRC_POWER);
1486 power &= ~MGC_M_POWER_RESUME;
1487 musb_writeb(mregs, MGC_O_HDRC_POWER, power);
1489 spin_unlock_irqrestore(&musb->Lock, flags);
1494 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1496 struct musb *musb = gadget_to_musb(gadget);
1498 musb->is_self_powered = !!is_selfpowered;
1502 static void musb_pullup(struct musb *musb, int is_on)
1506 power = musb_readb(musb->pRegs, MGC_O_HDRC_POWER);
1508 power |= MGC_M_POWER_SOFTCONN;
1510 power &= ~MGC_M_POWER_SOFTCONN;
1512 /* FIXME if on, HdrcStart; if off, HdrcStop */
1514 DBG(3, "gadget %s D+ pullup %s\n",
1515 musb->pGadgetDriver->function, is_on ? "on" : "off");
1516 musb_writeb(musb->pRegs, MGC_O_HDRC_POWER, power);
1520 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1522 DBG(2, "<= %s =>\n", __FUNCTION__);
1524 // FIXME iff driver's softconnect flag is set (as it is during probe,
1525 // though that can clear it), just musb_pullup().
1530 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1532 /* FIXME -- delegate to otg_transciever logic */
1534 DBG(2, "<= vbus_draw %u =>\n", mA);
1539 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1541 struct musb *musb = gadget_to_musb(gadget);
1543 if (!musb->xceiv.set_power)
1545 return otg_set_power(&musb->xceiv, mA);
1548 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1550 struct musb *musb = gadget_to_musb(gadget);
1551 unsigned long flags;
1555 /* NOTE: this assumes we are sensing vbus; we'd rather
1556 * not pullup unless the B-session is active.
1558 spin_lock_irqsave(&musb->Lock, flags);
1559 if (is_on != musb->softconnect) {
1560 musb->softconnect = is_on;
1561 musb_pullup(musb, is_on);
1563 spin_unlock_irqrestore(&musb->Lock, flags);
1567 static const struct usb_gadget_ops musb_gadget_operations = {
1568 .get_frame = musb_gadget_get_frame,
1569 .wakeup = musb_gadget_wakeup,
1570 .set_selfpowered = musb_gadget_set_self_powered,
1571 //.vbus_session = musb_gadget_vbus_session,
1572 .vbus_draw = musb_gadget_vbus_draw,
1573 .pullup = musb_gadget_pullup,
1576 /****************************************************************
1577 * Registration operations
1578 ****************************************************************/
1580 /* Only this registration code "knows" the rule (from USB standards)
1581 * about there being only one external upstream port. It assumes
1582 * all peripheral ports are external...
1584 static struct musb *the_gadget;
1586 static void musb_gadget_release(struct device *dev)
1589 dev_dbg(dev, "%s\n", __FUNCTION__);
1594 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 bEnd, int is_in)
1596 struct musb_hw_ep *hw_ep = musb->aLocalEnd + bEnd;
1598 memset(ep, 0, sizeof *ep);
1600 ep->bEndNumber = bEnd;
1605 INIT_LIST_HEAD(&ep->req_list);
1607 sprintf(ep->name, "ep%d%s", bEnd,
1608 (!bEnd || hw_ep->bIsSharedFifo) ? "" : (
1609 is_in ? "in" : "out"));
1610 ep->end_point.name = ep->name;
1611 INIT_LIST_HEAD(&ep->end_point.ep_list);
1613 ep->end_point.maxpacket = 64;
1614 ep->end_point.ops = &musb_g_ep0_ops;
1615 musb->g.ep0 = &ep->end_point;
1618 ep->end_point.maxpacket = hw_ep->wMaxPacketSizeTx;
1620 ep->end_point.maxpacket = hw_ep->wMaxPacketSizeRx;
1621 ep->end_point.ops = &musb_ep_ops;
1622 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1627 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1628 * to the rest of the driver state.
1630 static inline void __init musb_g_init_endpoints(struct musb *musb)
1633 struct musb_hw_ep *hw_ep;
1636 /* intialize endpoint list just once */
1637 INIT_LIST_HEAD(&(musb->g.ep_list));
1639 for (bEnd = 0, hw_ep = musb->aLocalEnd;
1640 bEnd < musb->bEndCount;
1642 if (hw_ep->bIsSharedFifo /* || !bEnd */) {
1643 init_peripheral_ep(musb, &hw_ep->ep_in, bEnd, 0);
1646 if (hw_ep->wMaxPacketSizeTx) {
1647 init_peripheral_ep(musb, &hw_ep->ep_in,
1651 if (hw_ep->wMaxPacketSizeRx) {
1652 init_peripheral_ep(musb, &hw_ep->ep_out,
1660 /* called once during driver setup to initialize and link into
1661 * the driver model; memory is zeroed.
1663 int __init musb_gadget_setup(struct musb *musb)
1667 /* REVISIT minor race: if (erroneously) setting up two
1668 * musb peripherals at the same time, only the bus lock
1675 musb->g.ops = &musb_gadget_operations;
1676 musb->g.is_dualspeed = 1;
1677 musb->g.speed = USB_SPEED_UNKNOWN;
1679 /* this "gadget" abstracts/virtualizes the controller */
1680 strcpy(musb->g.dev.bus_id, "gadget");
1681 musb->g.dev.parent = musb->controller;
1682 musb->g.dev.dma_mask = musb->controller->dma_mask;
1683 musb->g.dev.release = musb_gadget_release;
1684 musb->g.name = musb_driver_name;
1686 if (is_otg_enabled(musb))
1689 musb_g_init_endpoints(musb);
1691 musb->is_active = 0;
1692 musb_platform_try_idle(musb);
1694 status = device_register(&musb->g.dev);
1700 void musb_gadget_cleanup(struct musb *musb)
1702 if (musb != the_gadget)
1705 device_unregister(&musb->g.dev);
1710 * Register the gadget driver. Used by gadget drivers when
1711 * registering themselves with the controller.
1713 * -EINVAL something went wrong (not driver)
1714 * -EBUSY another gadget is already using the controller
1715 * -ENOMEM no memeory to perform the operation
1717 * @param driver the gadget driver
1718 * @return <0 if error, 0 if everything is fine
1720 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1723 unsigned long flags;
1724 struct musb *musb = the_gadget;
1727 || driver->speed != USB_SPEED_HIGH
1732 /* driver must be initialized to support peripheral mode */
1733 if (!musb || !(musb->board_mode == MUSB_OTG
1734 || musb->board_mode != MUSB_OTG)) {
1735 DBG(1,"%s, no dev??\n", __FUNCTION__);
1739 DBG(3, "registering driver %s\n", driver->function);
1740 spin_lock_irqsave(&musb->Lock, flags);
1742 if (musb->pGadgetDriver) {
1743 DBG(1, "%s is already bound to %s\n",
1745 musb->pGadgetDriver->driver.name);
1748 musb->pGadgetDriver = driver;
1749 musb->g.dev.driver = &driver->driver;
1750 driver->driver.bus = NULL;
1751 musb->softconnect = 1;
1755 spin_unlock_irqrestore(&musb->Lock, flags);
1758 retval = driver->bind(&musb->g);
1760 DBG(3, "bind to driver %s failed --> %d\n",
1761 driver->driver.name, retval);
1762 musb->pGadgetDriver = NULL;
1763 musb->g.dev.driver = NULL;
1766 /* start peripheral and/or OTG engines */
1768 spin_lock_irqsave(&musb->Lock, flags);
1770 /* REVISIT always use otg_set_peripheral(), handling
1771 * issues including the root hub one below ...
1773 musb->xceiv.gadget = &musb->g;
1774 musb->xceiv.state = OTG_STATE_B_IDLE;
1775 musb->is_active = 1;
1777 /* FIXME this ignores the softconnect flag. Drivers are
1778 * allowed hold the peripheral inactive until for example
1779 * userspace hooks up printer hardware or DSP codecs, so
1780 * hosts only see fully functional devices.
1783 if (!is_otg_enabled(musb))
1786 spin_unlock_irqrestore(&musb->Lock, flags);
1788 if (is_otg_enabled(musb)) {
1789 DBG(3, "OTG startup...\n");
1791 /* REVISIT: funcall to other code, which also
1792 * handles power budgeting ... this way also
1793 * ensures HdrcStart is indirectly called.
1795 retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1797 DBG(1, "add_hcd failed, %d\n", retval);
1798 spin_lock_irqsave(&musb->Lock, flags);
1799 musb->xceiv.gadget = NULL;
1800 musb->xceiv.state = OTG_STATE_UNDEFINED;
1801 musb->pGadgetDriver = NULL;
1802 musb->g.dev.driver = NULL;
1803 spin_unlock_irqrestore(&musb->Lock, flags);
1810 EXPORT_SYMBOL(usb_gadget_register_driver);
1813 stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1816 struct musb_hw_ep *hw_ep;
1818 /* don't disconnect if it's not connected */
1819 if (musb->g.speed == USB_SPEED_UNKNOWN)
1822 musb->g.speed = USB_SPEED_UNKNOWN;
1824 /* deactivate the hardware */
1825 if (musb->softconnect) {
1826 musb->softconnect = 0;
1827 musb_pullup(musb, 0);
1831 /* killing any outstanding requests will quiesce the driver;
1832 * then report disconnect
1835 for (i = 0, hw_ep = musb->aLocalEnd;
1836 i < musb->bEndCount;
1838 MGC_SelectEnd(musb->pRegs, i);
1839 if (hw_ep->bIsSharedFifo /* || !bEnd */) {
1840 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1842 if (hw_ep->wMaxPacketSizeTx)
1843 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1844 if (hw_ep->wMaxPacketSizeRx)
1845 nuke(&hw_ep->ep_out, -ESHUTDOWN);
1849 spin_unlock(&musb->Lock);
1850 driver->disconnect (&musb->g);
1851 spin_lock(&musb->Lock);
1856 * Unregister the gadget driver. Used by gadget drivers when
1857 * unregistering themselves from the controller.
1859 * @param driver the gadget driver to unregister
1861 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1863 unsigned long flags;
1865 struct musb *musb = the_gadget;
1867 if (!driver || !driver->unbind || !musb)
1870 /* REVISIT always use otg_set_peripheral() here too;
1871 * this needs to shut down the OTG engine.
1874 spin_lock_irqsave(&musb->Lock, flags);
1875 if (musb->pGadgetDriver == driver) {
1876 musb->xceiv.state = OTG_STATE_UNDEFINED;
1877 stop_activity(musb, driver);
1879 DBG(3, "unregistering driver %s\n", driver->function);
1880 spin_unlock_irqrestore(&musb->Lock, flags);
1881 driver->unbind(&musb->g);
1882 spin_lock_irqsave(&musb->Lock, flags);
1884 musb->pGadgetDriver = NULL;
1885 musb->g.dev.driver = NULL;
1887 musb->is_active = 0;
1888 musb_platform_try_idle(musb);
1891 spin_unlock_irqrestore(&musb->Lock, flags);
1893 if (is_otg_enabled(musb) && retval == 0) {
1894 usb_remove_hcd(musb_to_hcd(musb));
1895 /* FIXME we need to be able to register another
1896 * gadget driver here and have everything work;
1897 * that currently misbehaves.
1903 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1906 /***********************************************************************/
1908 /* lifecycle operations called through plat_uds.c */
1910 void musb_g_resume(struct musb *musb)
1912 musb->is_suspended = 0;
1913 switch (musb->xceiv.state) {
1914 case OTG_STATE_B_IDLE:
1916 case OTG_STATE_B_WAIT_ACON:
1917 case OTG_STATE_B_PERIPHERAL:
1918 musb->is_active = 1;
1919 if (musb->pGadgetDriver && musb->pGadgetDriver->resume) {
1920 spin_unlock(&musb->Lock);
1921 musb->pGadgetDriver->resume(&musb->g);
1922 spin_lock(&musb->Lock);
1926 WARN("unhandled RESUME transition (%s)\n",
1927 otg_state_string(musb));
1931 /* called when SOF packets stop for 3+ msec */
1932 void musb_g_suspend(struct musb *musb)
1936 devctl = musb_readb(musb->pRegs, MGC_O_HDRC_DEVCTL);
1937 DBG(3, "devctl %02x\n", devctl);
1939 switch (musb->xceiv.state) {
1940 case OTG_STATE_B_IDLE:
1941 if ((devctl & MGC_M_DEVCTL_VBUS) == MGC_M_DEVCTL_VBUS)
1942 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
1944 case OTG_STATE_B_PERIPHERAL:
1945 musb->is_suspended = 1;
1946 if (musb->pGadgetDriver && musb->pGadgetDriver->suspend) {
1947 spin_unlock(&musb->Lock);
1948 musb->pGadgetDriver->suspend(&musb->g);
1949 spin_lock(&musb->Lock);
1953 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1954 * A_PERIPHERAL may need care too
1956 WARN("unhandled SUSPEND transition (%s)\n",
1957 otg_state_string(musb));
1961 /* called when VBUS drops below session threshold, and in other cases */
1962 void musb_g_disconnect(struct musb *musb)
1964 void __iomem *mregs = musb->pRegs;
1965 u8 devctl = musb_readb(mregs, MGC_O_HDRC_DEVCTL);
1967 DBG(3, "devctl %02x\n", devctl);
1970 musb_writeb(mregs, MGC_O_HDRC_DEVCTL, devctl & MGC_M_DEVCTL_SESSION);
1972 /* don't draw vbus until new b-default session */
1973 (void) musb_gadget_vbus_draw(&musb->g, 0);
1975 musb->g.speed = USB_SPEED_UNKNOWN;
1976 if (musb->pGadgetDriver && musb->pGadgetDriver->disconnect) {
1977 spin_unlock(&musb->Lock);
1978 musb->pGadgetDriver->disconnect(&musb->g);
1979 spin_lock(&musb->Lock);
1982 switch (musb->xceiv.state) {
1984 #ifdef CONFIG_USB_MUSB_OTG
1985 musb->xceiv.state = OTG_STATE_A_IDLE;
1987 case OTG_STATE_B_WAIT_ACON:
1988 case OTG_STATE_B_HOST:
1990 case OTG_STATE_B_PERIPHERAL:
1991 musb->xceiv.state = OTG_STATE_B_IDLE;
1993 case OTG_STATE_B_SRP_INIT:
1997 musb->is_active = 0;
2000 void musb_g_reset(struct musb *musb)
2001 __releases(musb->Lock)
2002 __acquires(musb->Lock)
2004 void __iomem *pBase = musb->pRegs;
2005 u8 devctl = musb_readb(pBase, MGC_O_HDRC_DEVCTL);
2008 DBG(3, "<== %s addr=%x driver '%s'\n",
2009 (devctl & MGC_M_DEVCTL_BDEVICE)
2010 ? "B-Device" : "A-Device",
2011 musb_readb(pBase, MGC_O_HDRC_FADDR),
2013 ? musb->pGadgetDriver->driver.name
2017 /* report disconnect, if we didn't already (flushing EP state) */
2018 if (musb->g.speed != USB_SPEED_UNKNOWN)
2019 musb_g_disconnect(musb);
2022 else if (devctl & MGC_M_DEVCTL_HR)
2023 musb_writeb(pBase, MGC_O_HDRC_DEVCTL, MGC_M_DEVCTL_SESSION);
2026 /* what speed did we negotiate? */
2027 power = musb_readb(pBase, MGC_O_HDRC_POWER);
2028 musb->g.speed = (power & MGC_M_POWER_HSMODE)
2029 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2031 /* start in USB_STATE_DEFAULT */
2032 musb->is_active = 1;
2033 musb->is_suspended = 0;
2034 MUSB_DEV_MODE(musb);
2036 musb->ep0_state = MGC_END0_STAGE_SETUP;
2038 musb->may_wakeup = 0;
2039 musb->g.b_hnp_enable = 0;
2040 musb->g.a_alt_hnp_support = 0;
2041 musb->g.a_hnp_support = 0;
2043 /* Normal reset, as B-Device;
2044 * or else after HNP, as A-Device
2046 if (devctl & MGC_M_DEVCTL_BDEVICE) {
2047 musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
2048 musb->g.is_a_peripheral = 0;
2049 } else if (is_otg_enabled(musb)) {
2050 musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
2051 musb->g.is_a_peripheral = 1;
2055 /* start with default limits on VBUS power draw */
2056 (void) musb_gadget_vbus_draw(&musb->g,
2057 is_otg_enabled(musb) ? 8 : 100);