Home | History | Annotate | Download | only in musb-new
      1 // SPDX-License-Identifier: GPL-2.0
      2 /*
      3  * MUSB OTG driver peripheral support
      4  *
      5  * Copyright 2005 Mentor Graphics Corporation
      6  * Copyright (C) 2005-2006 by Texas Instruments
      7  * Copyright (C) 2006-2007 Nokia Corporation
      8  * Copyright (C) 2009 MontaVista Software, Inc. <source (at) mvista.com>
      9  */
     10 
     11 #ifndef __UBOOT__
     12 #include <linux/kernel.h>
     13 #include <linux/list.h>
     14 #include <linux/timer.h>
     15 #include <linux/module.h>
     16 #include <linux/smp.h>
     17 #include <linux/spinlock.h>
     18 #include <linux/delay.h>
     19 #include <linux/dma-mapping.h>
     20 #include <linux/slab.h>
     21 #else
     22 #include <common.h>
     23 #include <linux/usb/ch9.h>
     24 #include "linux-compat.h"
     25 #endif
     26 
     27 #include "musb_core.h"
     28 
     29 
     30 /* MUSB PERIPHERAL status 3-mar-2006:
     31  *
     32  * - EP0 seems solid.  It passes both USBCV and usbtest control cases.
     33  *   Minor glitches:
     34  *
     35  *     + remote wakeup to Linux hosts work, but saw USBCV failures;
     36  *       in one test run (operator error?)
     37  *     + endpoint halt tests -- in both usbtest and usbcv -- seem
     38  *       to break when dma is enabled ... is something wrongly
     39  *       clearing SENDSTALL?
     40  *
     41  * - Mass storage behaved ok when last tested.  Network traffic patterns
     42  *   (with lots of short transfers etc) need retesting; they turn up the
     43  *   worst cases of the DMA, since short packets are typical but are not
     44  *   required.
     45  *
     46  * - TX/IN
     47  *     + both pio and dma behave in with network and g_zero tests
     48  *     + no cppi throughput issues other than no-hw-queueing
     49  *     + failed with FLAT_REG (DaVinci)
     50  *     + seems to behave with double buffering, PIO -and- CPPI
     51  *     + with gadgetfs + AIO, requests got lost?
     52  *
     53  * - RX/OUT
     54  *     + both pio and dma behave in with network and g_zero tests
     55  *     + dma is slow in typical case (short_not_ok is clear)
     56  *     + double buffering ok with PIO
     57  *     + double buffering *FAILS* with CPPI, wrong data bytes sometimes
     58  *     + request lossage observed with gadgetfs
     59  *
     60  * - ISO not tested ... might work, but only weakly isochronous
     61  *
     62  * - Gadget driver disabling of softconnect during bind() is ignored; so
     63  *   drivers can't hold off host requests until userspace is ready.
     64  *   (Workaround:  they can turn it off later.)
     65  *
     66  * - PORTABILITY (assumes PIO works):
     67  *     + DaVinci, basically works with cppi dma
     68  *     + OMAP 2430, ditto with mentor dma
     69  *     + TUSB 6010, platform-specific dma in the works
     70  */
     71 
     72 /* ----------------------------------------------------------------------- */
     73 
     74 #define is_buffer_mapped(req) (is_dma_capable() && \
     75 					(req->map_state != UN_MAPPED))
     76 
     77 #ifndef CONFIG_USB_MUSB_PIO_ONLY
     78 /* Maps the buffer to dma  */
     79 
     80 static inline void map_dma_buffer(struct musb_request *request,
     81 			struct musb *musb, struct musb_ep *musb_ep)
     82 {
     83 	int compatible = true;
     84 	struct dma_controller *dma = musb->dma_controller;
     85 
     86 	request->map_state = UN_MAPPED;
     87 
     88 	if (!is_dma_capable() || !musb_ep->dma)
     89 		return;
     90 
     91 	/* Check if DMA engine can handle this request.
     92 	 * DMA code must reject the USB request explicitly.
     93 	 * Default behaviour is to map the request.
     94 	 */
     95 	if (dma->is_compatible)
     96 		compatible = dma->is_compatible(musb_ep->dma,
     97 				musb_ep->packet_sz, request->request.buf,
     98 				request->request.length);
     99 	if (!compatible)
    100 		return;
    101 
    102 	if (request->request.dma == DMA_ADDR_INVALID) {
    103 		request->request.dma = dma_map_single(
    104 				musb->controller,
    105 				request->request.buf,
    106 				request->request.length,
    107 				request->tx
    108 					? DMA_TO_DEVICE
    109 					: DMA_FROM_DEVICE);
    110 		request->map_state = MUSB_MAPPED;
    111 	} else {
    112 		dma_sync_single_for_device(musb->controller,
    113 			request->request.dma,
    114 			request->request.length,
    115 			request->tx
    116 				? DMA_TO_DEVICE
    117 				: DMA_FROM_DEVICE);
    118 		request->map_state = PRE_MAPPED;
    119 	}
    120 }
    121 
    122 /* Unmap the buffer from dma and maps it back to cpu */
    123 static inline void unmap_dma_buffer(struct musb_request *request,
    124 				struct musb *musb)
    125 {
    126 	if (!is_buffer_mapped(request))
    127 		return;
    128 
    129 	if (request->request.dma == DMA_ADDR_INVALID) {
    130 		dev_vdbg(musb->controller,
    131 				"not unmapping a never mapped buffer\n");
    132 		return;
    133 	}
    134 	if (request->map_state == MUSB_MAPPED) {
    135 		dma_unmap_single(musb->controller,
    136 			request->request.dma,
    137 			request->request.length,
    138 			request->tx
    139 				? DMA_TO_DEVICE
    140 				: DMA_FROM_DEVICE);
    141 		request->request.dma = DMA_ADDR_INVALID;
    142 	} else { /* PRE_MAPPED */
    143 		dma_sync_single_for_cpu(musb->controller,
    144 			request->request.dma,
    145 			request->request.length,
    146 			request->tx
    147 				? DMA_TO_DEVICE
    148 				: DMA_FROM_DEVICE);
    149 	}
    150 	request->map_state = UN_MAPPED;
    151 }
    152 #else
    153 static inline void map_dma_buffer(struct musb_request *request,
    154 			struct musb *musb, struct musb_ep *musb_ep)
    155 {
    156 }
    157 
    158 static inline void unmap_dma_buffer(struct musb_request *request,
    159 				struct musb *musb)
    160 {
    161 }
    162 #endif
    163 
    164 /*
    165  * Immediately complete a request.
    166  *
    167  * @param request the request to complete
    168  * @param status the status to complete the request with
    169  * Context: controller locked, IRQs blocked.
    170  */
    171 void musb_g_giveback(
    172 	struct musb_ep		*ep,
    173 	struct usb_request	*request,
    174 	int			status)
    175 __releases(ep->musb->lock)
    176 __acquires(ep->musb->lock)
    177 {
    178 	struct musb_request	*req;
    179 	struct musb		*musb;
    180 	int			busy = ep->busy;
    181 
    182 	req = to_musb_request(request);
    183 
    184 	list_del(&req->list);
    185 	if (req->request.status == -EINPROGRESS)
    186 		req->request.status = status;
    187 	musb = req->musb;
    188 
    189 	ep->busy = 1;
    190 	spin_unlock(&musb->lock);
    191 	unmap_dma_buffer(req, musb);
    192 	if (request->status == 0)
    193 		dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
    194 				ep->end_point.name, request,
    195 				req->request.actual, req->request.length);
    196 	else
    197 		dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
    198 				ep->end_point.name, request,
    199 				req->request.actual, req->request.length,
    200 				request->status);
    201 	req->request.complete(&req->ep->end_point, &req->request);
    202 	spin_lock(&musb->lock);
    203 	ep->busy = busy;
    204 }
    205 
    206 /* ----------------------------------------------------------------------- */
    207 
    208 /*
    209  * Abort requests queued to an endpoint using the status. Synchronous.
    210  * caller locked controller and blocked irqs, and selected this ep.
    211  */
    212 static void nuke(struct musb_ep *ep, const int status)
    213 {
    214 	struct musb		*musb = ep->musb;
    215 	struct musb_request	*req = NULL;
    216 	void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
    217 
    218 	ep->busy = 1;
    219 
    220 	if (is_dma_capable() && ep->dma) {
    221 		struct dma_controller	*c = ep->musb->dma_controller;
    222 		int value;
    223 
    224 		if (ep->is_in) {
    225 			/*
    226 			 * The programming guide says that we must not clear
    227 			 * the DMAMODE bit before DMAENAB, so we only
    228 			 * clear it in the second write...
    229 			 */
    230 			musb_writew(epio, MUSB_TXCSR,
    231 				    MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
    232 			musb_writew(epio, MUSB_TXCSR,
    233 					0 | MUSB_TXCSR_FLUSHFIFO);
    234 		} else {
    235 			musb_writew(epio, MUSB_RXCSR,
    236 					0 | MUSB_RXCSR_FLUSHFIFO);
    237 			musb_writew(epio, MUSB_RXCSR,
    238 					0 | MUSB_RXCSR_FLUSHFIFO);
    239 		}
    240 
    241 		value = c->channel_abort(ep->dma);
    242 		dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
    243 				ep->name, value);
    244 		c->channel_release(ep->dma);
    245 		ep->dma = NULL;
    246 	}
    247 
    248 	while (!list_empty(&ep->req_list)) {
    249 		req = list_first_entry(&ep->req_list, struct musb_request, list);
    250 		musb_g_giveback(ep, &req->request, status);
    251 	}
    252 }
    253 
    254 /* ----------------------------------------------------------------------- */
    255 
    256 /* Data transfers - pure PIO, pure DMA, or mixed mode */
    257 
    258 /*
    259  * This assumes the separate CPPI engine is responding to DMA requests
    260  * from the usb core ... sequenced a bit differently from mentor dma.
    261  */
    262 
    263 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
    264 {
    265 	if (can_bulk_split(musb, ep->type))
    266 		return ep->hw_ep->max_packet_sz_tx;
    267 	else
    268 		return ep->packet_sz;
    269 }
    270 
    271 
    272 #ifdef CONFIG_USB_INVENTRA_DMA
    273 
    274 /* Peripheral tx (IN) using Mentor DMA works as follows:
    275 	Only mode 0 is used for transfers <= wPktSize,
    276 	mode 1 is used for larger transfers,
    277 
    278 	One of the following happens:
    279 	- Host sends IN token which causes an endpoint interrupt
    280 		-> TxAvail
    281 			-> if DMA is currently busy, exit.
    282 			-> if queue is non-empty, txstate().
    283 
    284 	- Request is queued by the gadget driver.
    285 		-> if queue was previously empty, txstate()
    286 
    287 	txstate()
    288 		-> start
    289 		  /\	-> setup DMA
    290 		  |     (data is transferred to the FIFO, then sent out when
    291 		  |	IN token(s) are recd from Host.
    292 		  |		-> DMA interrupt on completion
    293 		  |		   calls TxAvail.
    294 		  |		      -> stop DMA, ~DMAENAB,
    295 		  |		      -> set TxPktRdy for last short pkt or zlp
    296 		  |		      -> Complete Request
    297 		  |		      -> Continue next request (call txstate)
    298 		  |___________________________________|
    299 
    300  * Non-Mentor DMA engines can of course work differently, such as by
    301  * upleveling from irq-per-packet to irq-per-buffer.
    302  */
    303 
    304 #endif
    305 
    306 /*
    307  * An endpoint is transmitting data. This can be called either from
    308  * the IRQ routine or from ep.queue() to kickstart a request on an
    309  * endpoint.
    310  *
    311  * Context: controller locked, IRQs blocked, endpoint selected
    312  */
    313 static void txstate(struct musb *musb, struct musb_request *req)
    314 {
    315 	u8			epnum = req->epnum;
    316 	struct musb_ep		*musb_ep;
    317 	void __iomem		*epio = musb->endpoints[epnum].regs;
    318 	struct usb_request	*request;
    319 	u16			fifo_count = 0, csr;
    320 	int			use_dma = 0;
    321 
    322 	musb_ep = req->ep;
    323 
    324 	/* Check if EP is disabled */
    325 	if (!musb_ep->desc) {
    326 		dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
    327 						musb_ep->end_point.name);
    328 		return;
    329 	}
    330 
    331 	/* we shouldn't get here while DMA is active ... but we do ... */
    332 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
    333 		dev_dbg(musb->controller, "dma pending...\n");
    334 		return;
    335 	}
    336 
    337 	/* read TXCSR before */
    338 	csr = musb_readw(epio, MUSB_TXCSR);
    339 
    340 	request = &req->request;
    341 	fifo_count = min(max_ep_writesize(musb, musb_ep),
    342 			(int)(request->length - request->actual));
    343 
    344 	if (csr & MUSB_TXCSR_TXPKTRDY) {
    345 		dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
    346 				musb_ep->end_point.name, csr);
    347 		return;
    348 	}
    349 
    350 	if (csr & MUSB_TXCSR_P_SENDSTALL) {
    351 		dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
    352 				musb_ep->end_point.name, csr);
    353 		return;
    354 	}
    355 
    356 	dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
    357 			epnum, musb_ep->packet_sz, fifo_count,
    358 			csr);
    359 
    360 #ifndef	CONFIG_USB_MUSB_PIO_ONLY
    361 	if (is_buffer_mapped(req)) {
    362 		struct dma_controller	*c = musb->dma_controller;
    363 		size_t request_size;
    364 
    365 		/* setup DMA, then program endpoint CSR */
    366 		request_size = min_t(size_t, request->length - request->actual,
    367 					musb_ep->dma->max_len);
    368 
    369 		use_dma = (request->dma != DMA_ADDR_INVALID);
    370 
    371 		/* MUSB_TXCSR_P_ISO is still set correctly */
    372 
    373 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
    374 		{
    375 			if (request_size < musb_ep->packet_sz)
    376 				musb_ep->dma->desired_mode = 0;
    377 			else
    378 				musb_ep->dma->desired_mode = 1;
    379 
    380 			use_dma = use_dma && c->channel_program(
    381 					musb_ep->dma, musb_ep->packet_sz,
    382 					musb_ep->dma->desired_mode,
    383 					request->dma + request->actual, request_size);
    384 			if (use_dma) {
    385 				if (musb_ep->dma->desired_mode == 0) {
    386 					/*
    387 					 * We must not clear the DMAMODE bit
    388 					 * before the DMAENAB bit -- and the
    389 					 * latter doesn't always get cleared
    390 					 * before we get here...
    391 					 */
    392 					csr &= ~(MUSB_TXCSR_AUTOSET
    393 						| MUSB_TXCSR_DMAENAB);
    394 					musb_writew(epio, MUSB_TXCSR, csr
    395 						| MUSB_TXCSR_P_WZC_BITS);
    396 					csr &= ~MUSB_TXCSR_DMAMODE;
    397 					csr |= (MUSB_TXCSR_DMAENAB |
    398 							MUSB_TXCSR_MODE);
    399 					/* against programming guide */
    400 				} else {
    401 					csr |= (MUSB_TXCSR_DMAENAB
    402 							| MUSB_TXCSR_DMAMODE
    403 							| MUSB_TXCSR_MODE);
    404 					if (!musb_ep->hb_mult)
    405 						csr |= MUSB_TXCSR_AUTOSET;
    406 				}
    407 				csr &= ~MUSB_TXCSR_P_UNDERRUN;
    408 
    409 				musb_writew(epio, MUSB_TXCSR, csr);
    410 			}
    411 		}
    412 
    413 #elif defined(CONFIG_USB_TI_CPPI_DMA)
    414 		/* program endpoint CSR first, then setup DMA */
    415 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
    416 		csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
    417 		       MUSB_TXCSR_MODE;
    418 		musb_writew(epio, MUSB_TXCSR,
    419 			(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
    420 				| csr);
    421 
    422 		/* ensure writebuffer is empty */
    423 		csr = musb_readw(epio, MUSB_TXCSR);
    424 
    425 		/* NOTE host side sets DMAENAB later than this; both are
    426 		 * OK since the transfer dma glue (between CPPI and Mentor
    427 		 * fifos) just tells CPPI it could start.  Data only moves
    428 		 * to the USB TX fifo when both fifos are ready.
    429 		 */
    430 
    431 		/* "mode" is irrelevant here; handle terminating ZLPs like
    432 		 * PIO does, since the hardware RNDIS mode seems unreliable
    433 		 * except for the last-packet-is-already-short case.
    434 		 */
    435 		use_dma = use_dma && c->channel_program(
    436 				musb_ep->dma, musb_ep->packet_sz,
    437 				0,
    438 				request->dma + request->actual,
    439 				request_size);
    440 		if (!use_dma) {
    441 			c->channel_release(musb_ep->dma);
    442 			musb_ep->dma = NULL;
    443 			csr &= ~MUSB_TXCSR_DMAENAB;
    444 			musb_writew(epio, MUSB_TXCSR, csr);
    445 			/* invariant: prequest->buf is non-null */
    446 		}
    447 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
    448 		use_dma = use_dma && c->channel_program(
    449 				musb_ep->dma, musb_ep->packet_sz,
    450 				request->zero,
    451 				request->dma + request->actual,
    452 				request_size);
    453 #endif
    454 	}
    455 #endif
    456 
    457 	if (!use_dma) {
    458 		/*
    459 		 * Unmap the dma buffer back to cpu if dma channel
    460 		 * programming fails
    461 		 */
    462 		unmap_dma_buffer(req, musb);
    463 
    464 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
    465 				(u8 *) (request->buf + request->actual));
    466 		request->actual += fifo_count;
    467 		csr |= MUSB_TXCSR_TXPKTRDY;
    468 		csr &= ~MUSB_TXCSR_P_UNDERRUN;
    469 		musb_writew(epio, MUSB_TXCSR, csr);
    470 	}
    471 
    472 	/* host may already have the data when this message shows... */
    473 	dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
    474 			musb_ep->end_point.name, use_dma ? "dma" : "pio",
    475 			request->actual, request->length,
    476 			musb_readw(epio, MUSB_TXCSR),
    477 			fifo_count,
    478 			musb_readw(epio, MUSB_TXMAXP));
    479 }
    480 
    481 /*
    482  * FIFO state update (e.g. data ready).
    483  * Called from IRQ,  with controller locked.
    484  */
    485 void musb_g_tx(struct musb *musb, u8 epnum)
    486 {
    487 	u16			csr;
    488 	struct musb_request	*req;
    489 	struct usb_request	*request;
    490 	u8 __iomem		*mbase = musb->mregs;
    491 	struct musb_ep		*musb_ep = &musb->endpoints[epnum].ep_in;
    492 	void __iomem		*epio = musb->endpoints[epnum].regs;
    493 	struct dma_channel	*dma;
    494 
    495 	musb_ep_select(mbase, epnum);
    496 	req = next_request(musb_ep);
    497 	request = &req->request;
    498 
    499 	csr = musb_readw(epio, MUSB_TXCSR);
    500 	dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
    501 
    502 	dma = is_dma_capable() ? musb_ep->dma : NULL;
    503 
    504 	/*
    505 	 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
    506 	 * probably rates reporting as a host error.
    507 	 */
    508 	if (csr & MUSB_TXCSR_P_SENTSTALL) {
    509 		csr |=	MUSB_TXCSR_P_WZC_BITS;
    510 		csr &= ~MUSB_TXCSR_P_SENTSTALL;
    511 		musb_writew(epio, MUSB_TXCSR, csr);
    512 		return;
    513 	}
    514 
    515 	if (csr & MUSB_TXCSR_P_UNDERRUN) {
    516 		/* We NAKed, no big deal... little reason to care. */
    517 		csr |=	 MUSB_TXCSR_P_WZC_BITS;
    518 		csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
    519 		musb_writew(epio, MUSB_TXCSR, csr);
    520 		dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
    521 				epnum, request);
    522 	}
    523 
    524 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
    525 		/*
    526 		 * SHOULD NOT HAPPEN... has with CPPI though, after
    527 		 * changing SENDSTALL (and other cases); harmless?
    528 		 */
    529 		dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
    530 		return;
    531 	}
    532 
    533 	if (request) {
    534 		u8	is_dma = 0;
    535 
    536 		if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
    537 			is_dma = 1;
    538 			csr |= MUSB_TXCSR_P_WZC_BITS;
    539 			csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
    540 				 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
    541 			musb_writew(epio, MUSB_TXCSR, csr);
    542 			/* Ensure writebuffer is empty. */
    543 			csr = musb_readw(epio, MUSB_TXCSR);
    544 			request->actual += musb_ep->dma->actual_len;
    545 			dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
    546 				epnum, csr, musb_ep->dma->actual_len, request);
    547 		}
    548 
    549 		/*
    550 		 * First, maybe a terminating short packet. Some DMA
    551 		 * engines might handle this by themselves.
    552 		 */
    553 		if ((request->zero && request->length
    554 			&& (request->length % musb_ep->packet_sz == 0)
    555 			&& (request->actual == request->length))
    556 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
    557 			|| (is_dma && (!dma->desired_mode ||
    558 				(request->actual &
    559 					(musb_ep->packet_sz - 1))))
    560 #endif
    561 		) {
    562 			/*
    563 			 * On DMA completion, FIFO may not be
    564 			 * available yet...
    565 			 */
    566 			if (csr & MUSB_TXCSR_TXPKTRDY)
    567 				return;
    568 
    569 			dev_dbg(musb->controller, "sending zero pkt\n");
    570 			musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
    571 					| MUSB_TXCSR_TXPKTRDY);
    572 			request->zero = 0;
    573 		}
    574 
    575 		if (request->actual == request->length) {
    576 			musb_g_giveback(musb_ep, request, 0);
    577 			/*
    578 			 * In the giveback function the MUSB lock is
    579 			 * released and acquired after sometime. During
    580 			 * this time period the INDEX register could get
    581 			 * changed by the gadget_queue function especially
    582 			 * on SMP systems. Reselect the INDEX to be sure
    583 			 * we are reading/modifying the right registers
    584 			 */
    585 			musb_ep_select(mbase, epnum);
    586 			req = musb_ep->desc ? next_request(musb_ep) : NULL;
    587 			if (!req) {
    588 				dev_dbg(musb->controller, "%s idle now\n",
    589 					musb_ep->end_point.name);
    590 				return;
    591 			}
    592 		}
    593 
    594 		txstate(musb, req);
    595 	}
    596 }
    597 
    598 /* ------------------------------------------------------------ */
    599 
    600 #ifdef CONFIG_USB_INVENTRA_DMA
    601 
    602 /* Peripheral rx (OUT) using Mentor DMA works as follows:
    603 	- Only mode 0 is used.
    604 
    605 	- Request is queued by the gadget class driver.
    606 		-> if queue was previously empty, rxstate()
    607 
    608 	- Host sends OUT token which causes an endpoint interrupt
    609 	  /\      -> RxReady
    610 	  |	      -> if request queued, call rxstate
    611 	  |		/\	-> setup DMA
    612 	  |		|	     -> DMA interrupt on completion
    613 	  |		|		-> RxReady
    614 	  |		|		      -> stop DMA
    615 	  |		|		      -> ack the read
    616 	  |		|		      -> if data recd = max expected
    617 	  |		|				by the request, or host
    618 	  |		|				sent a short packet,
    619 	  |		|				complete the request,
    620 	  |		|				and start the next one.
    621 	  |		|_____________________________________|
    622 	  |					 else just wait for the host
    623 	  |					    to send the next OUT token.
    624 	  |__________________________________________________|
    625 
    626  * Non-Mentor DMA engines can of course work differently.
    627  */
    628 
    629 #endif
    630 
    631 /*
    632  * Context: controller locked, IRQs blocked, endpoint selected
    633  */
    634 static void rxstate(struct musb *musb, struct musb_request *req)
    635 {
    636 	const u8		epnum = req->epnum;
    637 	struct usb_request	*request = &req->request;
    638 	struct musb_ep		*musb_ep;
    639 	void __iomem		*epio = musb->endpoints[epnum].regs;
    640 	unsigned		fifo_count = 0;
    641 	u16			len;
    642 	u16			csr = musb_readw(epio, MUSB_RXCSR);
    643 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
    644 	u8			use_mode_1;
    645 
    646 	if (hw_ep->is_shared_fifo)
    647 		musb_ep = &hw_ep->ep_in;
    648 	else
    649 		musb_ep = &hw_ep->ep_out;
    650 
    651 	len = musb_ep->packet_sz;
    652 
    653 	/* Check if EP is disabled */
    654 	if (!musb_ep->desc) {
    655 		dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
    656 						musb_ep->end_point.name);
    657 		return;
    658 	}
    659 
    660 	/* We shouldn't get here while DMA is active, but we do... */
    661 	if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
    662 		dev_dbg(musb->controller, "DMA pending...\n");
    663 		return;
    664 	}
    665 
    666 	if (csr & MUSB_RXCSR_P_SENDSTALL) {
    667 		dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
    668 		    musb_ep->end_point.name, csr);
    669 		return;
    670 	}
    671 
    672 	if (is_cppi_enabled() && is_buffer_mapped(req)) {
    673 		struct dma_controller	*c = musb->dma_controller;
    674 		struct dma_channel	*channel = musb_ep->dma;
    675 
    676 		/* NOTE:  CPPI won't actually stop advancing the DMA
    677 		 * queue after short packet transfers, so this is almost
    678 		 * always going to run as IRQ-per-packet DMA so that
    679 		 * faults will be handled correctly.
    680 		 */
    681 		if (c->channel_program(channel,
    682 				musb_ep->packet_sz,
    683 				!request->short_not_ok,
    684 				request->dma + request->actual,
    685 				request->length - request->actual)) {
    686 
    687 			/* make sure that if an rxpkt arrived after the irq,
    688 			 * the cppi engine will be ready to take it as soon
    689 			 * as DMA is enabled
    690 			 */
    691 			csr &= ~(MUSB_RXCSR_AUTOCLEAR
    692 					| MUSB_RXCSR_DMAMODE);
    693 			csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
    694 			musb_writew(epio, MUSB_RXCSR, csr);
    695 			return;
    696 		}
    697 	}
    698 
    699 	if (csr & MUSB_RXCSR_RXPKTRDY) {
    700 		len = musb_readw(epio, MUSB_RXCOUNT);
    701 
    702 		/*
    703 		 * Enable Mode 1 on RX transfers only when short_not_ok flag
    704 		 * is set. Currently short_not_ok flag is set only from
    705 		 * file_storage and f_mass_storage drivers
    706 		 */
    707 
    708 		if (request->short_not_ok && len == musb_ep->packet_sz)
    709 			use_mode_1 = 1;
    710 		else
    711 			use_mode_1 = 0;
    712 
    713 		if (request->actual < request->length) {
    714 #ifdef CONFIG_USB_INVENTRA_DMA
    715 			if (is_buffer_mapped(req)) {
    716 				struct dma_controller	*c;
    717 				struct dma_channel	*channel;
    718 				int			use_dma = 0;
    719 
    720 				c = musb->dma_controller;
    721 				channel = musb_ep->dma;
    722 
    723 	/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
    724 	 * mode 0 only. So we do not get endpoint interrupts due to DMA
    725 	 * completion. We only get interrupts from DMA controller.
    726 	 *
    727 	 * We could operate in DMA mode 1 if we knew the size of the tranfer
    728 	 * in advance. For mass storage class, request->length = what the host
    729 	 * sends, so that'd work.  But for pretty much everything else,
    730 	 * request->length is routinely more than what the host sends. For
    731 	 * most these gadgets, end of is signified either by a short packet,
    732 	 * or filling the last byte of the buffer.  (Sending extra data in
    733 	 * that last pckate should trigger an overflow fault.)  But in mode 1,
    734 	 * we don't get DMA completion interrupt for short packets.
    735 	 *
    736 	 * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
    737 	 * to get endpoint interrupt on every DMA req, but that didn't seem
    738 	 * to work reliably.
    739 	 *
    740 	 * REVISIT an updated g_file_storage can set req->short_not_ok, which
    741 	 * then becomes usable as a runtime "use mode 1" hint...
    742 	 */
    743 
    744 				/* Experimental: Mode1 works with mass storage use cases */
    745 				if (use_mode_1) {
    746 					csr |= MUSB_RXCSR_AUTOCLEAR;
    747 					musb_writew(epio, MUSB_RXCSR, csr);
    748 					csr |= MUSB_RXCSR_DMAENAB;
    749 					musb_writew(epio, MUSB_RXCSR, csr);
    750 
    751 					/*
    752 					 * this special sequence (enabling and then
    753 					 * disabling MUSB_RXCSR_DMAMODE) is required
    754 					 * to get DMAReq to activate
    755 					 */
    756 					musb_writew(epio, MUSB_RXCSR,
    757 						csr | MUSB_RXCSR_DMAMODE);
    758 					musb_writew(epio, MUSB_RXCSR, csr);
    759 
    760 				} else {
    761 					if (!musb_ep->hb_mult &&
    762 						musb_ep->hw_ep->rx_double_buffered)
    763 						csr |= MUSB_RXCSR_AUTOCLEAR;
    764 					csr |= MUSB_RXCSR_DMAENAB;
    765 					musb_writew(epio, MUSB_RXCSR, csr);
    766 				}
    767 
    768 				if (request->actual < request->length) {
    769 					int transfer_size = 0;
    770 					if (use_mode_1) {
    771 						transfer_size = min(request->length - request->actual,
    772 								channel->max_len);
    773 						musb_ep->dma->desired_mode = 1;
    774 					} else {
    775 						transfer_size = min(request->length - request->actual,
    776 								(unsigned)len);
    777 						musb_ep->dma->desired_mode = 0;
    778 					}
    779 
    780 					use_dma = c->channel_program(
    781 							channel,
    782 							musb_ep->packet_sz,
    783 							channel->desired_mode,
    784 							request->dma
    785 							+ request->actual,
    786 							transfer_size);
    787 				}
    788 
    789 				if (use_dma)
    790 					return;
    791 			}
    792 #elif defined(CONFIG_USB_UX500_DMA)
    793 			if ((is_buffer_mapped(req)) &&
    794 				(request->actual < request->length)) {
    795 
    796 				struct dma_controller *c;
    797 				struct dma_channel *channel;
    798 				int transfer_size = 0;
    799 
    800 				c = musb->dma_controller;
    801 				channel = musb_ep->dma;
    802 
    803 				/* In case first packet is short */
    804 				if (len < musb_ep->packet_sz)
    805 					transfer_size = len;
    806 				else if (request->short_not_ok)
    807 					transfer_size =	min(request->length -
    808 							request->actual,
    809 							channel->max_len);
    810 				else
    811 					transfer_size = min(request->length -
    812 							request->actual,
    813 							(unsigned)len);
    814 
    815 				csr &= ~MUSB_RXCSR_DMAMODE;
    816 				csr |= (MUSB_RXCSR_DMAENAB |
    817 					MUSB_RXCSR_AUTOCLEAR);
    818 
    819 				musb_writew(epio, MUSB_RXCSR, csr);
    820 
    821 				if (transfer_size <= musb_ep->packet_sz) {
    822 					musb_ep->dma->desired_mode = 0;
    823 				} else {
    824 					musb_ep->dma->desired_mode = 1;
    825 					/* Mode must be set after DMAENAB */
    826 					csr |= MUSB_RXCSR_DMAMODE;
    827 					musb_writew(epio, MUSB_RXCSR, csr);
    828 				}
    829 
    830 				if (c->channel_program(channel,
    831 							musb_ep->packet_sz,
    832 							channel->desired_mode,
    833 							request->dma
    834 							+ request->actual,
    835 							transfer_size))
    836 
    837 					return;
    838 			}
    839 #endif	/* Mentor's DMA */
    840 
    841 			fifo_count = request->length - request->actual;
    842 			dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
    843 					musb_ep->end_point.name,
    844 					len, fifo_count,
    845 					musb_ep->packet_sz);
    846 
    847 			fifo_count = min_t(unsigned, len, fifo_count);
    848 
    849 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
    850 			if (tusb_dma_omap() && is_buffer_mapped(req)) {
    851 				struct dma_controller *c = musb->dma_controller;
    852 				struct dma_channel *channel = musb_ep->dma;
    853 				u32 dma_addr = request->dma + request->actual;
    854 				int ret;
    855 
    856 				ret = c->channel_program(channel,
    857 						musb_ep->packet_sz,
    858 						channel->desired_mode,
    859 						dma_addr,
    860 						fifo_count);
    861 				if (ret)
    862 					return;
    863 			}
    864 #endif
    865 			/*
    866 			 * Unmap the dma buffer back to cpu if dma channel
    867 			 * programming fails. This buffer is mapped if the
    868 			 * channel allocation is successful
    869 			 */
    870 			 if (is_buffer_mapped(req)) {
    871 				unmap_dma_buffer(req, musb);
    872 
    873 				/*
    874 				 * Clear DMAENAB and AUTOCLEAR for the
    875 				 * PIO mode transfer
    876 				 */
    877 				csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
    878 				musb_writew(epio, MUSB_RXCSR, csr);
    879 			}
    880 
    881 			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
    882 					(request->buf + request->actual));
    883 			request->actual += fifo_count;
    884 
    885 			/* REVISIT if we left anything in the fifo, flush
    886 			 * it and report -EOVERFLOW
    887 			 */
    888 
    889 			/* ack the read! */
    890 			csr |= MUSB_RXCSR_P_WZC_BITS;
    891 			csr &= ~MUSB_RXCSR_RXPKTRDY;
    892 			musb_writew(epio, MUSB_RXCSR, csr);
    893 		}
    894 	}
    895 
    896 	/* reach the end or short packet detected */
    897 	if (request->actual == request->length || len < musb_ep->packet_sz)
    898 		musb_g_giveback(musb_ep, request, 0);
    899 }
    900 
    901 /*
    902  * Data ready for a request; called from IRQ
    903  */
    904 void musb_g_rx(struct musb *musb, u8 epnum)
    905 {
    906 	u16			csr;
    907 	struct musb_request	*req;
    908 	struct usb_request	*request;
    909 	void __iomem		*mbase = musb->mregs;
    910 	struct musb_ep		*musb_ep;
    911 	void __iomem		*epio = musb->endpoints[epnum].regs;
    912 	struct dma_channel	*dma;
    913 	struct musb_hw_ep	*hw_ep = &musb->endpoints[epnum];
    914 
    915 	if (hw_ep->is_shared_fifo)
    916 		musb_ep = &hw_ep->ep_in;
    917 	else
    918 		musb_ep = &hw_ep->ep_out;
    919 
    920 	musb_ep_select(mbase, epnum);
    921 
    922 	req = next_request(musb_ep);
    923 	if (!req)
    924 		return;
    925 
    926 	request = &req->request;
    927 
    928 	csr = musb_readw(epio, MUSB_RXCSR);
    929 	dma = is_dma_capable() ? musb_ep->dma : NULL;
    930 
    931 	dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
    932 			csr, dma ? " (dma)" : "", request);
    933 
    934 	if (csr & MUSB_RXCSR_P_SENTSTALL) {
    935 		csr |= MUSB_RXCSR_P_WZC_BITS;
    936 		csr &= ~MUSB_RXCSR_P_SENTSTALL;
    937 		musb_writew(epio, MUSB_RXCSR, csr);
    938 		return;
    939 	}
    940 
    941 	if (csr & MUSB_RXCSR_P_OVERRUN) {
    942 		/* csr |= MUSB_RXCSR_P_WZC_BITS; */
    943 		csr &= ~MUSB_RXCSR_P_OVERRUN;
    944 		musb_writew(epio, MUSB_RXCSR, csr);
    945 
    946 		dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
    947 		if (request->status == -EINPROGRESS)
    948 			request->status = -EOVERFLOW;
    949 	}
    950 	if (csr & MUSB_RXCSR_INCOMPRX) {
    951 		/* REVISIT not necessarily an error */
    952 		dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
    953 	}
    954 
    955 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
    956 		/* "should not happen"; likely RXPKTRDY pending for DMA */
    957 		dev_dbg(musb->controller, "%s busy, csr %04x\n",
    958 			musb_ep->end_point.name, csr);
    959 		return;
    960 	}
    961 
    962 	if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
    963 		csr &= ~(MUSB_RXCSR_AUTOCLEAR
    964 				| MUSB_RXCSR_DMAENAB
    965 				| MUSB_RXCSR_DMAMODE);
    966 		musb_writew(epio, MUSB_RXCSR,
    967 			MUSB_RXCSR_P_WZC_BITS | csr);
    968 
    969 		request->actual += musb_ep->dma->actual_len;
    970 
    971 		dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
    972 			epnum, csr,
    973 			musb_readw(epio, MUSB_RXCSR),
    974 			musb_ep->dma->actual_len, request);
    975 
    976 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
    977 	defined(CONFIG_USB_UX500_DMA)
    978 		/* Autoclear doesn't clear RxPktRdy for short packets */
    979 		if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
    980 				|| (dma->actual_len
    981 					& (musb_ep->packet_sz - 1))) {
    982 			/* ack the read! */
    983 			csr &= ~MUSB_RXCSR_RXPKTRDY;
    984 			musb_writew(epio, MUSB_RXCSR, csr);
    985 		}
    986 
    987 		/* incomplete, and not short? wait for next IN packet */
    988 		if ((request->actual < request->length)
    989 				&& (musb_ep->dma->actual_len
    990 					== musb_ep->packet_sz)) {
    991 			/* In double buffer case, continue to unload fifo if
    992  			 * there is Rx packet in FIFO.
    993  			 **/
    994 			csr = musb_readw(epio, MUSB_RXCSR);
    995 			if ((csr & MUSB_RXCSR_RXPKTRDY) &&
    996 				hw_ep->rx_double_buffered)
    997 				goto exit;
    998 			return;
    999 		}
   1000 #endif
   1001 		musb_g_giveback(musb_ep, request, 0);
   1002 		/*
   1003 		 * In the giveback function the MUSB lock is
   1004 		 * released and acquired after sometime. During
   1005 		 * this time period the INDEX register could get
   1006 		 * changed by the gadget_queue function especially
   1007 		 * on SMP systems. Reselect the INDEX to be sure
   1008 		 * we are reading/modifying the right registers
   1009 		 */
   1010 		musb_ep_select(mbase, epnum);
   1011 
   1012 		req = next_request(musb_ep);
   1013 		if (!req)
   1014 			return;
   1015 	}
   1016 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
   1017 	defined(CONFIG_USB_UX500_DMA)
   1018 exit:
   1019 #endif
   1020 	/* Analyze request */
   1021 	rxstate(musb, req);
   1022 }
   1023 
   1024 /* ------------------------------------------------------------ */
   1025 
   1026 static int musb_gadget_enable(struct usb_ep *ep,
   1027 			const struct usb_endpoint_descriptor *desc)
   1028 {
   1029 	unsigned long		flags;
   1030 	struct musb_ep		*musb_ep;
   1031 	struct musb_hw_ep	*hw_ep;
   1032 	void __iomem		*regs;
   1033 	struct musb		*musb;
   1034 	void __iomem	*mbase;
   1035 	u8		epnum;
   1036 	u16		csr;
   1037 	unsigned	tmp;
   1038 	int		status = -EINVAL;
   1039 
   1040 	if (!ep || !desc)
   1041 		return -EINVAL;
   1042 
   1043 	musb_ep = to_musb_ep(ep);
   1044 	hw_ep = musb_ep->hw_ep;
   1045 	regs = hw_ep->regs;
   1046 	musb = musb_ep->musb;
   1047 	mbase = musb->mregs;
   1048 	epnum = musb_ep->current_epnum;
   1049 
   1050 	spin_lock_irqsave(&musb->lock, flags);
   1051 
   1052 	if (musb_ep->desc) {
   1053 		status = -EBUSY;
   1054 		goto fail;
   1055 	}
   1056 	musb_ep->type = usb_endpoint_type(desc);
   1057 
   1058 	/* check direction and (later) maxpacket size against endpoint */
   1059 	if (usb_endpoint_num(desc) != epnum)
   1060 		goto fail;
   1061 
   1062 	/* REVISIT this rules out high bandwidth periodic transfers */
   1063 	tmp = usb_endpoint_maxp(desc);
   1064 	if (tmp & ~0x07ff) {
   1065 		int ok;
   1066 
   1067 		if (usb_endpoint_dir_in(desc))
   1068 			ok = musb->hb_iso_tx;
   1069 		else
   1070 			ok = musb->hb_iso_rx;
   1071 
   1072 		if (!ok) {
   1073 			dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
   1074 			goto fail;
   1075 		}
   1076 		musb_ep->hb_mult = (tmp >> 11) & 3;
   1077 	} else {
   1078 		musb_ep->hb_mult = 0;
   1079 	}
   1080 
   1081 	musb_ep->packet_sz = tmp & 0x7ff;
   1082 	tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
   1083 
   1084 	/* enable the interrupts for the endpoint, set the endpoint
   1085 	 * packet size (or fail), set the mode, clear the fifo
   1086 	 */
   1087 	musb_ep_select(mbase, epnum);
   1088 	if (usb_endpoint_dir_in(desc)) {
   1089 		u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
   1090 
   1091 		if (hw_ep->is_shared_fifo)
   1092 			musb_ep->is_in = 1;
   1093 		if (!musb_ep->is_in)
   1094 			goto fail;
   1095 
   1096 		if (tmp > hw_ep->max_packet_sz_tx) {
   1097 			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
   1098 			goto fail;
   1099 		}
   1100 
   1101 		int_txe |= (1 << epnum);
   1102 		musb_writew(mbase, MUSB_INTRTXE, int_txe);
   1103 
   1104 		/* REVISIT if can_bulk_split(), use by updating "tmp";
   1105 		 * likewise high bandwidth periodic tx
   1106 		 */
   1107 		/* Set TXMAXP with the FIFO size of the endpoint
   1108 		 * to disable double buffering mode.
   1109 		 */
   1110 		if (musb->double_buffer_not_ok)
   1111 			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
   1112 		else
   1113 			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
   1114 					| (musb_ep->hb_mult << 11));
   1115 
   1116 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
   1117 		if (musb_readw(regs, MUSB_TXCSR)
   1118 				& MUSB_TXCSR_FIFONOTEMPTY)
   1119 			csr |= MUSB_TXCSR_FLUSHFIFO;
   1120 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
   1121 			csr |= MUSB_TXCSR_P_ISO;
   1122 
   1123 		/* set twice in case of double buffering */
   1124 		musb_writew(regs, MUSB_TXCSR, csr);
   1125 		/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
   1126 		musb_writew(regs, MUSB_TXCSR, csr);
   1127 
   1128 	} else {
   1129 		u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
   1130 
   1131 		if (hw_ep->is_shared_fifo)
   1132 			musb_ep->is_in = 0;
   1133 		if (musb_ep->is_in)
   1134 			goto fail;
   1135 
   1136 		if (tmp > hw_ep->max_packet_sz_rx) {
   1137 			dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
   1138 			goto fail;
   1139 		}
   1140 
   1141 		int_rxe |= (1 << epnum);
   1142 		musb_writew(mbase, MUSB_INTRRXE, int_rxe);
   1143 
   1144 		/* REVISIT if can_bulk_combine() use by updating "tmp"
   1145 		 * likewise high bandwidth periodic rx
   1146 		 */
   1147 		/* Set RXMAXP with the FIFO size of the endpoint
   1148 		 * to disable double buffering mode.
   1149 		 */
   1150 		if (musb->double_buffer_not_ok)
   1151 			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
   1152 		else
   1153 			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
   1154 					| (musb_ep->hb_mult << 11));
   1155 
   1156 		/* force shared fifo to OUT-only mode */
   1157 		if (hw_ep->is_shared_fifo) {
   1158 			csr = musb_readw(regs, MUSB_TXCSR);
   1159 			csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
   1160 			musb_writew(regs, MUSB_TXCSR, csr);
   1161 		}
   1162 
   1163 		csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
   1164 		if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
   1165 			csr |= MUSB_RXCSR_P_ISO;
   1166 		else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
   1167 			csr |= MUSB_RXCSR_DISNYET;
   1168 
   1169 		/* set twice in case of double buffering */
   1170 		musb_writew(regs, MUSB_RXCSR, csr);
   1171 		musb_writew(regs, MUSB_RXCSR, csr);
   1172 	}
   1173 
   1174 	/* NOTE:  all the I/O code _should_ work fine without DMA, in case
   1175 	 * for some reason you run out of channels here.
   1176 	 */
   1177 	if (is_dma_capable() && musb->dma_controller) {
   1178 		struct dma_controller	*c = musb->dma_controller;
   1179 
   1180 		musb_ep->dma = c->channel_alloc(c, hw_ep,
   1181 				(desc->bEndpointAddress & USB_DIR_IN));
   1182 	} else
   1183 		musb_ep->dma = NULL;
   1184 
   1185 	musb_ep->desc = desc;
   1186 	musb_ep->busy = 0;
   1187 	musb_ep->wedged = 0;
   1188 	status = 0;
   1189 
   1190 	pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
   1191 			musb_driver_name, musb_ep->end_point.name,
   1192 			({ char *s; switch (musb_ep->type) {
   1193 			case USB_ENDPOINT_XFER_BULK:	s = "bulk"; break;
   1194 			case USB_ENDPOINT_XFER_INT:	s = "int"; break;
   1195 			default:			s = "iso"; break;
   1196 			}; s; }),
   1197 			musb_ep->is_in ? "IN" : "OUT",
   1198 			musb_ep->dma ? "dma, " : "",
   1199 			musb_ep->packet_sz);
   1200 
   1201 	schedule_work(&musb->irq_work);
   1202 
   1203 fail:
   1204 	spin_unlock_irqrestore(&musb->lock, flags);
   1205 	return status;
   1206 }
   1207 
   1208 /*
   1209  * Disable an endpoint flushing all requests queued.
   1210  */
   1211 static int musb_gadget_disable(struct usb_ep *ep)
   1212 {
   1213 	unsigned long	flags;
   1214 	struct musb	*musb;
   1215 	u8		epnum;
   1216 	struct musb_ep	*musb_ep;
   1217 	void __iomem	*epio;
   1218 	int		status = 0;
   1219 
   1220 	musb_ep = to_musb_ep(ep);
   1221 	musb = musb_ep->musb;
   1222 	epnum = musb_ep->current_epnum;
   1223 	epio = musb->endpoints[epnum].regs;
   1224 
   1225 	spin_lock_irqsave(&musb->lock, flags);
   1226 	musb_ep_select(musb->mregs, epnum);
   1227 
   1228 	/* zero the endpoint sizes */
   1229 	if (musb_ep->is_in) {
   1230 		u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
   1231 		int_txe &= ~(1 << epnum);
   1232 		musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
   1233 		musb_writew(epio, MUSB_TXMAXP, 0);
   1234 	} else {
   1235 		u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
   1236 		int_rxe &= ~(1 << epnum);
   1237 		musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
   1238 		musb_writew(epio, MUSB_RXMAXP, 0);
   1239 	}
   1240 
   1241 	musb_ep->desc = NULL;
   1242 #ifndef __UBOOT__
   1243 	musb_ep->end_point.desc = NULL;
   1244 #endif
   1245 
   1246 	/* abort all pending DMA and requests */
   1247 	nuke(musb_ep, -ESHUTDOWN);
   1248 
   1249 	schedule_work(&musb->irq_work);
   1250 
   1251 	spin_unlock_irqrestore(&(musb->lock), flags);
   1252 
   1253 	dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
   1254 
   1255 	return status;
   1256 }
   1257 
   1258 /*
   1259  * Allocate a request for an endpoint.
   1260  * Reused by ep0 code.
   1261  */
   1262 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
   1263 {
   1264 	struct musb_ep		*musb_ep = to_musb_ep(ep);
   1265 	struct musb		*musb = musb_ep->musb;
   1266 	struct musb_request	*request = NULL;
   1267 
   1268 	request = kzalloc(sizeof *request, gfp_flags);
   1269 	if (!request) {
   1270 		dev_dbg(musb->controller, "not enough memory\n");
   1271 		return NULL;
   1272 	}
   1273 
   1274 	request->request.dma = DMA_ADDR_INVALID;
   1275 	request->epnum = musb_ep->current_epnum;
   1276 	request->ep = musb_ep;
   1277 
   1278 	return &request->request;
   1279 }
   1280 
   1281 /*
   1282  * Free a request
   1283  * Reused by ep0 code.
   1284  */
   1285 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
   1286 {
   1287 	kfree(to_musb_request(req));
   1288 }
   1289 
   1290 static LIST_HEAD(buffers);
   1291 
   1292 struct free_record {
   1293 	struct list_head	list;
   1294 	struct device		*dev;
   1295 	unsigned		bytes;
   1296 	dma_addr_t		dma;
   1297 };
   1298 
   1299 /*
   1300  * Context: controller locked, IRQs blocked.
   1301  */
   1302 void musb_ep_restart(struct musb *musb, struct musb_request *req)
   1303 {
   1304 	dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
   1305 		req->tx ? "TX/IN" : "RX/OUT",
   1306 		&req->request, req->request.length, req->epnum);
   1307 
   1308 	musb_ep_select(musb->mregs, req->epnum);
   1309 	if (req->tx)
   1310 		txstate(musb, req);
   1311 	else
   1312 		rxstate(musb, req);
   1313 }
   1314 
   1315 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
   1316 			gfp_t gfp_flags)
   1317 {
   1318 	struct musb_ep		*musb_ep;
   1319 	struct musb_request	*request;
   1320 	struct musb		*musb;
   1321 	int			status = 0;
   1322 	unsigned long		lockflags;
   1323 
   1324 	if (!ep || !req)
   1325 		return -EINVAL;
   1326 	if (!req->buf)
   1327 		return -ENODATA;
   1328 
   1329 	musb_ep = to_musb_ep(ep);
   1330 	musb = musb_ep->musb;
   1331 
   1332 	request = to_musb_request(req);
   1333 	request->musb = musb;
   1334 
   1335 	if (request->ep != musb_ep)
   1336 		return -EINVAL;
   1337 
   1338 	dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
   1339 
   1340 	/* request is mine now... */
   1341 	request->request.actual = 0;
   1342 	request->request.status = -EINPROGRESS;
   1343 	request->epnum = musb_ep->current_epnum;
   1344 	request->tx = musb_ep->is_in;
   1345 
   1346 	map_dma_buffer(request, musb, musb_ep);
   1347 
   1348 	spin_lock_irqsave(&musb->lock, lockflags);
   1349 
   1350 	/* don't queue if the ep is down */
   1351 	if (!musb_ep->desc) {
   1352 		dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
   1353 				req, ep->name, "disabled");
   1354 		status = -ESHUTDOWN;
   1355 		goto cleanup;
   1356 	}
   1357 
   1358 	/* add request to the list */
   1359 	list_add_tail(&request->list, &musb_ep->req_list);
   1360 
   1361 	/* it this is the head of the queue, start i/o ... */
   1362 	if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
   1363 		musb_ep_restart(musb, request);
   1364 
   1365 cleanup:
   1366 	spin_unlock_irqrestore(&musb->lock, lockflags);
   1367 	return status;
   1368 }
   1369 
   1370 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
   1371 {
   1372 	struct musb_ep		*musb_ep = to_musb_ep(ep);
   1373 	struct musb_request	*req = to_musb_request(request);
   1374 	struct musb_request	*r;
   1375 	unsigned long		flags;
   1376 	int			status = 0;
   1377 	struct musb		*musb = musb_ep->musb;
   1378 
   1379 	if (!ep || !request || to_musb_request(request)->ep != musb_ep)
   1380 		return -EINVAL;
   1381 
   1382 	spin_lock_irqsave(&musb->lock, flags);
   1383 
   1384 	list_for_each_entry(r, &musb_ep->req_list, list) {
   1385 		if (r == req)
   1386 			break;
   1387 	}
   1388 	if (r != req) {
   1389 		dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
   1390 		status = -EINVAL;
   1391 		goto done;
   1392 	}
   1393 
   1394 	/* if the hardware doesn't have the request, easy ... */
   1395 	if (musb_ep->req_list.next != &req->list || musb_ep->busy)
   1396 		musb_g_giveback(musb_ep, request, -ECONNRESET);
   1397 
   1398 	/* ... else abort the dma transfer ... */
   1399 	else if (is_dma_capable() && musb_ep->dma) {
   1400 		struct dma_controller	*c = musb->dma_controller;
   1401 
   1402 		musb_ep_select(musb->mregs, musb_ep->current_epnum);
   1403 		if (c->channel_abort)
   1404 			status = c->channel_abort(musb_ep->dma);
   1405 		else
   1406 			status = -EBUSY;
   1407 		if (status == 0)
   1408 			musb_g_giveback(musb_ep, request, -ECONNRESET);
   1409 	} else {
   1410 		/* NOTE: by sticking to easily tested hardware/driver states,
   1411 		 * we leave counting of in-flight packets imprecise.
   1412 		 */
   1413 		musb_g_giveback(musb_ep, request, -ECONNRESET);
   1414 	}
   1415 
   1416 done:
   1417 	spin_unlock_irqrestore(&musb->lock, flags);
   1418 	return status;
   1419 }
   1420 
   1421 /*
   1422  * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
   1423  * data but will queue requests.
   1424  *
   1425  * exported to ep0 code
   1426  */
   1427 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
   1428 {
   1429 	struct musb_ep		*musb_ep = to_musb_ep(ep);
   1430 	u8			epnum = musb_ep->current_epnum;
   1431 	struct musb		*musb = musb_ep->musb;
   1432 	void __iomem		*epio = musb->endpoints[epnum].regs;
   1433 	void __iomem		*mbase;
   1434 	unsigned long		flags;
   1435 	u16			csr;
   1436 	struct musb_request	*request;
   1437 	int			status = 0;
   1438 
   1439 	if (!ep)
   1440 		return -EINVAL;
   1441 	mbase = musb->mregs;
   1442 
   1443 	spin_lock_irqsave(&musb->lock, flags);
   1444 
   1445 	if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
   1446 		status = -EINVAL;
   1447 		goto done;
   1448 	}
   1449 
   1450 	musb_ep_select(mbase, epnum);
   1451 
   1452 	request = next_request(musb_ep);
   1453 	if (value) {
   1454 		if (request) {
   1455 			dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
   1456 			    ep->name);
   1457 			status = -EAGAIN;
   1458 			goto done;
   1459 		}
   1460 		/* Cannot portably stall with non-empty FIFO */
   1461 		if (musb_ep->is_in) {
   1462 			csr = musb_readw(epio, MUSB_TXCSR);
   1463 			if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
   1464 				dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
   1465 				status = -EAGAIN;
   1466 				goto done;
   1467 			}
   1468 		}
   1469 	} else
   1470 		musb_ep->wedged = 0;
   1471 
   1472 	/* set/clear the stall and toggle bits */
   1473 	dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
   1474 	if (musb_ep->is_in) {
   1475 		csr = musb_readw(epio, MUSB_TXCSR);
   1476 		csr |= MUSB_TXCSR_P_WZC_BITS
   1477 			| MUSB_TXCSR_CLRDATATOG;
   1478 		if (value)
   1479 			csr |= MUSB_TXCSR_P_SENDSTALL;
   1480 		else
   1481 			csr &= ~(MUSB_TXCSR_P_SENDSTALL
   1482 				| MUSB_TXCSR_P_SENTSTALL);
   1483 		csr &= ~MUSB_TXCSR_TXPKTRDY;
   1484 		musb_writew(epio, MUSB_TXCSR, csr);
   1485 	} else {
   1486 		csr = musb_readw(epio, MUSB_RXCSR);
   1487 		csr |= MUSB_RXCSR_P_WZC_BITS
   1488 			| MUSB_RXCSR_FLUSHFIFO
   1489 			| MUSB_RXCSR_CLRDATATOG;
   1490 		if (value)
   1491 			csr |= MUSB_RXCSR_P_SENDSTALL;
   1492 		else
   1493 			csr &= ~(MUSB_RXCSR_P_SENDSTALL
   1494 				| MUSB_RXCSR_P_SENTSTALL);
   1495 		musb_writew(epio, MUSB_RXCSR, csr);
   1496 	}
   1497 
   1498 	/* maybe start the first request in the queue */
   1499 	if (!musb_ep->busy && !value && request) {
   1500 		dev_dbg(musb->controller, "restarting the request\n");
   1501 		musb_ep_restart(musb, request);
   1502 	}
   1503 
   1504 done:
   1505 	spin_unlock_irqrestore(&musb->lock, flags);
   1506 	return status;
   1507 }
   1508 
   1509 #ifndef __UBOOT__
   1510 /*
   1511  * Sets the halt feature with the clear requests ignored
   1512  */
   1513 static int musb_gadget_set_wedge(struct usb_ep *ep)
   1514 {
   1515 	struct musb_ep		*musb_ep = to_musb_ep(ep);
   1516 
   1517 	if (!ep)
   1518 		return -EINVAL;
   1519 
   1520 	musb_ep->wedged = 1;
   1521 
   1522 	return usb_ep_set_halt(ep);
   1523 }
   1524 #endif
   1525 
   1526 static int musb_gadget_fifo_status(struct usb_ep *ep)
   1527 {
   1528 	struct musb_ep		*musb_ep = to_musb_ep(ep);
   1529 	void __iomem		*epio = musb_ep->hw_ep->regs;
   1530 	int			retval = -EINVAL;
   1531 
   1532 	if (musb_ep->desc && !musb_ep->is_in) {
   1533 		struct musb		*musb = musb_ep->musb;
   1534 		int			epnum = musb_ep->current_epnum;
   1535 		void __iomem		*mbase = musb->mregs;
   1536 		unsigned long		flags;
   1537 
   1538 		spin_lock_irqsave(&musb->lock, flags);
   1539 
   1540 		musb_ep_select(mbase, epnum);
   1541 		/* FIXME return zero unless RXPKTRDY is set */
   1542 		retval = musb_readw(epio, MUSB_RXCOUNT);
   1543 
   1544 		spin_unlock_irqrestore(&musb->lock, flags);
   1545 	}
   1546 	return retval;
   1547 }
   1548 
   1549 static void musb_gadget_fifo_flush(struct usb_ep *ep)
   1550 {
   1551 	struct musb_ep	*musb_ep = to_musb_ep(ep);
   1552 	struct musb	*musb = musb_ep->musb;
   1553 	u8		epnum = musb_ep->current_epnum;
   1554 	void __iomem	*epio = musb->endpoints[epnum].regs;
   1555 	void __iomem	*mbase;
   1556 	unsigned long	flags;
   1557 	u16		csr, int_txe;
   1558 
   1559 	mbase = musb->mregs;
   1560 
   1561 	spin_lock_irqsave(&musb->lock, flags);
   1562 	musb_ep_select(mbase, (u8) epnum);
   1563 
   1564 	/* disable interrupts */
   1565 	int_txe = musb_readw(mbase, MUSB_INTRTXE);
   1566 	musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
   1567 
   1568 	if (musb_ep->is_in) {
   1569 		csr = musb_readw(epio, MUSB_TXCSR);
   1570 		if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
   1571 			csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
   1572 			/*
   1573 			 * Setting both TXPKTRDY and FLUSHFIFO makes controller
   1574 			 * to interrupt current FIFO loading, but not flushing
   1575 			 * the already loaded ones.
   1576 			 */
   1577 			csr &= ~MUSB_TXCSR_TXPKTRDY;
   1578 			musb_writew(epio, MUSB_TXCSR, csr);
   1579 			/* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
   1580 			musb_writew(epio, MUSB_TXCSR, csr);
   1581 		}
   1582 	} else {
   1583 		csr = musb_readw(epio, MUSB_RXCSR);
   1584 		csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
   1585 		musb_writew(epio, MUSB_RXCSR, csr);
   1586 		musb_writew(epio, MUSB_RXCSR, csr);
   1587 	}
   1588 
   1589 	/* re-enable interrupt */
   1590 	musb_writew(mbase, MUSB_INTRTXE, int_txe);
   1591 	spin_unlock_irqrestore(&musb->lock, flags);
   1592 }
   1593 
   1594 static const struct usb_ep_ops musb_ep_ops = {
   1595 	.enable		= musb_gadget_enable,
   1596 	.disable	= musb_gadget_disable,
   1597 	.alloc_request	= musb_alloc_request,
   1598 	.free_request	= musb_free_request,
   1599 	.queue		= musb_gadget_queue,
   1600 	.dequeue	= musb_gadget_dequeue,
   1601 	.set_halt	= musb_gadget_set_halt,
   1602 #ifndef __UBOOT__
   1603 	.set_wedge	= musb_gadget_set_wedge,
   1604 #endif
   1605 	.fifo_status	= musb_gadget_fifo_status,
   1606 	.fifo_flush	= musb_gadget_fifo_flush
   1607 };
   1608 
   1609 /* ----------------------------------------------------------------------- */
   1610 
   1611 static int musb_gadget_get_frame(struct usb_gadget *gadget)
   1612 {
   1613 	struct musb	*musb = gadget_to_musb(gadget);
   1614 
   1615 	return (int)musb_readw(musb->mregs, MUSB_FRAME);
   1616 }
   1617 
   1618 static int musb_gadget_wakeup(struct usb_gadget *gadget)
   1619 {
   1620 #ifndef __UBOOT__
   1621 	struct musb	*musb = gadget_to_musb(gadget);
   1622 	void __iomem	*mregs = musb->mregs;
   1623 	unsigned long	flags;
   1624 	int		status = -EINVAL;
   1625 	u8		power, devctl;
   1626 	int		retries;
   1627 
   1628 	spin_lock_irqsave(&musb->lock, flags);
   1629 
   1630 	switch (musb->xceiv->state) {
   1631 	case OTG_STATE_B_PERIPHERAL:
   1632 		/* NOTE:  OTG state machine doesn't include B_SUSPENDED;
   1633 		 * that's part of the standard usb 1.1 state machine, and
   1634 		 * doesn't affect OTG transitions.
   1635 		 */
   1636 		if (musb->may_wakeup && musb->is_suspended)
   1637 			break;
   1638 		goto done;
   1639 	case OTG_STATE_B_IDLE:
   1640 		/* Start SRP ... OTG not required. */
   1641 		devctl = musb_readb(mregs, MUSB_DEVCTL);
   1642 		dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
   1643 		devctl |= MUSB_DEVCTL_SESSION;
   1644 		musb_writeb(mregs, MUSB_DEVCTL, devctl);
   1645 		devctl = musb_readb(mregs, MUSB_DEVCTL);
   1646 		retries = 100;
   1647 		while (!(devctl & MUSB_DEVCTL_SESSION)) {
   1648 			devctl = musb_readb(mregs, MUSB_DEVCTL);
   1649 			if (retries-- < 1)
   1650 				break;
   1651 		}
   1652 		retries = 10000;
   1653 		while (devctl & MUSB_DEVCTL_SESSION) {
   1654 			devctl = musb_readb(mregs, MUSB_DEVCTL);
   1655 			if (retries-- < 1)
   1656 				break;
   1657 		}
   1658 
   1659 		spin_unlock_irqrestore(&musb->lock, flags);
   1660 		otg_start_srp(musb->xceiv->otg);
   1661 		spin_lock_irqsave(&musb->lock, flags);
   1662 
   1663 		/* Block idling for at least 1s */
   1664 		musb_platform_try_idle(musb,
   1665 			jiffies + msecs_to_jiffies(1 * HZ));
   1666 
   1667 		status = 0;
   1668 		goto done;
   1669 	default:
   1670 		dev_dbg(musb->controller, "Unhandled wake: %s\n",
   1671 			otg_state_string(musb->xceiv->state));
   1672 		goto done;
   1673 	}
   1674 
   1675 	status = 0;
   1676 
   1677 	power = musb_readb(mregs, MUSB_POWER);
   1678 	power |= MUSB_POWER_RESUME;
   1679 	musb_writeb(mregs, MUSB_POWER, power);
   1680 	dev_dbg(musb->controller, "issue wakeup\n");
   1681 
   1682 	/* FIXME do this next chunk in a timer callback, no udelay */
   1683 	mdelay(2);
   1684 
   1685 	power = musb_readb(mregs, MUSB_POWER);
   1686 	power &= ~MUSB_POWER_RESUME;
   1687 	musb_writeb(mregs, MUSB_POWER, power);
   1688 done:
   1689 	spin_unlock_irqrestore(&musb->lock, flags);
   1690 	return status;
   1691 #else
   1692 	return 0;
   1693 #endif
   1694 }
   1695 
   1696 static int
   1697 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
   1698 {
   1699 	struct musb	*musb = gadget_to_musb(gadget);
   1700 
   1701 	musb->is_self_powered = !!is_selfpowered;
   1702 	return 0;
   1703 }
   1704 
   1705 static void musb_pullup(struct musb *musb, int is_on)
   1706 {
   1707 	u8 power;
   1708 
   1709 	power = musb_readb(musb->mregs, MUSB_POWER);
   1710 	if (is_on)
   1711 		power |= MUSB_POWER_SOFTCONN;
   1712 	else
   1713 		power &= ~MUSB_POWER_SOFTCONN;
   1714 
   1715 	/* FIXME if on, HdrcStart; if off, HdrcStop */
   1716 
   1717 	dev_dbg(musb->controller, "gadget D+ pullup %s\n",
   1718 		is_on ? "on" : "off");
   1719 	musb_writeb(musb->mregs, MUSB_POWER, power);
   1720 }
   1721 
   1722 #if 0
   1723 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
   1724 {
   1725 	dev_dbg(musb->controller, "<= %s =>\n", __func__);
   1726 
   1727 	/*
   1728 	 * FIXME iff driver's softconnect flag is set (as it is during probe,
   1729 	 * though that can clear it), just musb_pullup().
   1730 	 */
   1731 
   1732 	return -EINVAL;
   1733 }
   1734 #endif
   1735 
   1736 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
   1737 {
   1738 #ifndef __UBOOT__
   1739 	struct musb	*musb = gadget_to_musb(gadget);
   1740 
   1741 	if (!musb->xceiv->set_power)
   1742 		return -EOPNOTSUPP;
   1743 	return usb_phy_set_power(musb->xceiv, mA);
   1744 #else
   1745 	return 0;
   1746 #endif
   1747 }
   1748 
   1749 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
   1750 {
   1751 	struct musb	*musb = gadget_to_musb(gadget);
   1752 	unsigned long	flags;
   1753 
   1754 	is_on = !!is_on;
   1755 
   1756 	pm_runtime_get_sync(musb->controller);
   1757 
   1758 	/* NOTE: this assumes we are sensing vbus; we'd rather
   1759 	 * not pullup unless the B-session is active.
   1760 	 */
   1761 	spin_lock_irqsave(&musb->lock, flags);
   1762 	if (is_on != musb->softconnect) {
   1763 		musb->softconnect = is_on;
   1764 		musb_pullup(musb, is_on);
   1765 	}
   1766 	spin_unlock_irqrestore(&musb->lock, flags);
   1767 
   1768 	pm_runtime_put(musb->controller);
   1769 
   1770 	return 0;
   1771 }
   1772 
   1773 #ifndef __UBOOT__
   1774 static int musb_gadget_start(struct usb_gadget *g,
   1775 		struct usb_gadget_driver *driver);
   1776 static int musb_gadget_stop(struct usb_gadget *g,
   1777 		struct usb_gadget_driver *driver);
   1778 #endif
   1779 
   1780 static const struct usb_gadget_ops musb_gadget_operations = {
   1781 	.get_frame		= musb_gadget_get_frame,
   1782 	.wakeup			= musb_gadget_wakeup,
   1783 	.set_selfpowered	= musb_gadget_set_self_powered,
   1784 	/* .vbus_session		= musb_gadget_vbus_session, */
   1785 	.vbus_draw		= musb_gadget_vbus_draw,
   1786 	.pullup			= musb_gadget_pullup,
   1787 #ifndef __UBOOT__
   1788 	.udc_start		= musb_gadget_start,
   1789 	.udc_stop		= musb_gadget_stop,
   1790 #endif
   1791 };
   1792 
   1793 /* ----------------------------------------------------------------------- */
   1794 
   1795 /* Registration */
   1796 
   1797 /* Only this registration code "knows" the rule (from USB standards)
   1798  * about there being only one external upstream port.  It assumes
   1799  * all peripheral ports are external...
   1800  */
   1801 
   1802 #ifndef __UBOOT__
   1803 static void musb_gadget_release(struct device *dev)
   1804 {
   1805 	/* kref_put(WHAT) */
   1806 	dev_dbg(dev, "%s\n", __func__);
   1807 }
   1808 #endif
   1809 
   1810 
   1811 static void __devinit
   1812 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
   1813 {
   1814 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
   1815 
   1816 	memset(ep, 0, sizeof *ep);
   1817 
   1818 	ep->current_epnum = epnum;
   1819 	ep->musb = musb;
   1820 	ep->hw_ep = hw_ep;
   1821 	ep->is_in = is_in;
   1822 
   1823 	INIT_LIST_HEAD(&ep->req_list);
   1824 
   1825 	sprintf(ep->name, "ep%d%s", epnum,
   1826 			(!epnum || hw_ep->is_shared_fifo) ? "" : (
   1827 				is_in ? "in" : "out"));
   1828 	ep->end_point.name = ep->name;
   1829 	INIT_LIST_HEAD(&ep->end_point.ep_list);
   1830 	if (!epnum) {
   1831 		ep->end_point.maxpacket = 64;
   1832 		ep->end_point.ops = &musb_g_ep0_ops;
   1833 		musb->g.ep0 = &ep->end_point;
   1834 	} else {
   1835 		if (is_in)
   1836 			ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
   1837 		else
   1838 			ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
   1839 		ep->end_point.ops = &musb_ep_ops;
   1840 		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
   1841 	}
   1842 }
   1843 
   1844 /*
   1845  * Initialize the endpoints exposed to peripheral drivers, with backlinks
   1846  * to the rest of the driver state.
   1847  */
   1848 static inline void __devinit musb_g_init_endpoints(struct musb *musb)
   1849 {
   1850 	u8			epnum;
   1851 	struct musb_hw_ep	*hw_ep;
   1852 	unsigned		count = 0;
   1853 
   1854 	/* initialize endpoint list just once */
   1855 	INIT_LIST_HEAD(&(musb->g.ep_list));
   1856 
   1857 	for (epnum = 0, hw_ep = musb->endpoints;
   1858 			epnum < musb->nr_endpoints;
   1859 			epnum++, hw_ep++) {
   1860 		if (hw_ep->is_shared_fifo /* || !epnum */) {
   1861 			init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
   1862 			count++;
   1863 		} else {
   1864 			if (hw_ep->max_packet_sz_tx) {
   1865 				init_peripheral_ep(musb, &hw_ep->ep_in,
   1866 							epnum, 1);
   1867 				count++;
   1868 			}
   1869 			if (hw_ep->max_packet_sz_rx) {
   1870 				init_peripheral_ep(musb, &hw_ep->ep_out,
   1871 							epnum, 0);
   1872 				count++;
   1873 			}
   1874 		}
   1875 	}
   1876 }
   1877 
   1878 /* called once during driver setup to initialize and link into
   1879  * the driver model; memory is zeroed.
   1880  */
   1881 int __devinit musb_gadget_setup(struct musb *musb)
   1882 {
   1883 	int status;
   1884 
   1885 	/* REVISIT minor race:  if (erroneously) setting up two
   1886 	 * musb peripherals at the same time, only the bus lock
   1887 	 * is probably held.
   1888 	 */
   1889 
   1890 	musb->g.ops = &musb_gadget_operations;
   1891 #ifndef __UBOOT__
   1892 	musb->g.max_speed = USB_SPEED_HIGH;
   1893 #endif
   1894 	musb->g.speed = USB_SPEED_UNKNOWN;
   1895 
   1896 #ifndef __UBOOT__
   1897 	/* this "gadget" abstracts/virtualizes the controller */
   1898 	dev_set_name(&musb->g.dev, "gadget");
   1899 	musb->g.dev.parent = musb->controller;
   1900 	musb->g.dev.dma_mask = musb->controller->dma_mask;
   1901 	musb->g.dev.release = musb_gadget_release;
   1902 #endif
   1903 	musb->g.name = musb_driver_name;
   1904 
   1905 #ifndef __UBOOT__
   1906 	if (is_otg_enabled(musb))
   1907 		musb->g.is_otg = 1;
   1908 #endif
   1909 
   1910 	musb_g_init_endpoints(musb);
   1911 
   1912 	musb->is_active = 0;
   1913 	musb_platform_try_idle(musb, 0);
   1914 
   1915 #ifndef __UBOOT__
   1916 	status = device_register(&musb->g.dev);
   1917 	if (status != 0) {
   1918 		put_device(&musb->g.dev);
   1919 		return status;
   1920 	}
   1921 	status = usb_add_gadget_udc(musb->controller, &musb->g);
   1922 	if (status)
   1923 		goto err;
   1924 #endif
   1925 
   1926 	return 0;
   1927 #ifndef __UBOOT__
   1928 err:
   1929 	musb->g.dev.parent = NULL;
   1930 	device_unregister(&musb->g.dev);
   1931 	return status;
   1932 #endif
   1933 }
   1934 
   1935 void musb_gadget_cleanup(struct musb *musb)
   1936 {
   1937 #ifndef __UBOOT__
   1938 	usb_del_gadget_udc(&musb->g);
   1939 	if (musb->g.dev.parent)
   1940 		device_unregister(&musb->g.dev);
   1941 #endif
   1942 }
   1943 
   1944 /*
   1945  * Register the gadget driver. Used by gadget drivers when
   1946  * registering themselves with the controller.
   1947  *
   1948  * -EINVAL something went wrong (not driver)
   1949  * -EBUSY another gadget is already using the controller
   1950  * -ENOMEM no memory to perform the operation
   1951  *
   1952  * @param driver the gadget driver
   1953  * @return <0 if error, 0 if everything is fine
   1954  */
   1955 #ifndef __UBOOT__
   1956 static int musb_gadget_start(struct usb_gadget *g,
   1957 		struct usb_gadget_driver *driver)
   1958 #else
   1959 int musb_gadget_start(struct usb_gadget *g,
   1960 		struct usb_gadget_driver *driver)
   1961 #endif
   1962 {
   1963 	struct musb		*musb = gadget_to_musb(g);
   1964 #ifndef __UBOOT__
   1965 	struct usb_otg		*otg = musb->xceiv->otg;
   1966 #endif
   1967 	unsigned long		flags;
   1968 	int			retval = -EINVAL;
   1969 
   1970 #ifndef __UBOOT__
   1971 	if (driver->max_speed < USB_SPEED_HIGH)
   1972 		goto err0;
   1973 #endif
   1974 
   1975 	pm_runtime_get_sync(musb->controller);
   1976 
   1977 #ifndef __UBOOT__
   1978 	dev_dbg(musb->controller, "registering driver %s\n", driver->function);
   1979 #endif
   1980 
   1981 	musb->softconnect = 0;
   1982 	musb->gadget_driver = driver;
   1983 
   1984 	spin_lock_irqsave(&musb->lock, flags);
   1985 	musb->is_active = 1;
   1986 
   1987 #ifndef __UBOOT__
   1988 	otg_set_peripheral(otg, &musb->g);
   1989 	musb->xceiv->state = OTG_STATE_B_IDLE;
   1990 
   1991 	/*
   1992 	 * FIXME this ignores the softconnect flag.  Drivers are
   1993 	 * allowed hold the peripheral inactive until for example
   1994 	 * userspace hooks up printer hardware or DSP codecs, so
   1995 	 * hosts only see fully functional devices.
   1996 	 */
   1997 
   1998 	if (!is_otg_enabled(musb))
   1999 #endif
   2000 		musb_start(musb);
   2001 
   2002 	spin_unlock_irqrestore(&musb->lock, flags);
   2003 
   2004 #ifndef __UBOOT__
   2005 	if (is_otg_enabled(musb)) {
   2006 		struct usb_hcd	*hcd = musb_to_hcd(musb);
   2007 
   2008 		dev_dbg(musb->controller, "OTG startup...\n");
   2009 
   2010 		/* REVISIT:  funcall to other code, which also
   2011 		 * handles power budgeting ... this way also
   2012 		 * ensures HdrcStart is indirectly called.
   2013 		 */
   2014 		retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
   2015 		if (retval < 0) {
   2016 			dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
   2017 			goto err2;
   2018 		}
   2019 
   2020 		if ((musb->xceiv->last_event == USB_EVENT_ID)
   2021 					&& otg->set_vbus)
   2022 			otg_set_vbus(otg, 1);
   2023 
   2024 		hcd->self.uses_pio_for_control = 1;
   2025 	}
   2026 	if (musb->xceiv->last_event == USB_EVENT_NONE)
   2027 		pm_runtime_put(musb->controller);
   2028 #endif
   2029 
   2030 	return 0;
   2031 
   2032 #ifndef __UBOOT__
   2033 err2:
   2034 	if (!is_otg_enabled(musb))
   2035 		musb_stop(musb);
   2036 err0:
   2037 	return retval;
   2038 #endif
   2039 }
   2040 
   2041 #ifndef __UBOOT__
   2042 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
   2043 {
   2044 	int			i;
   2045 	struct musb_hw_ep	*hw_ep;
   2046 
   2047 	/* don't disconnect if it's not connected */
   2048 	if (musb->g.speed == USB_SPEED_UNKNOWN)
   2049 		driver = NULL;
   2050 	else
   2051 		musb->g.speed = USB_SPEED_UNKNOWN;
   2052 
   2053 	/* deactivate the hardware */
   2054 	if (musb->softconnect) {
   2055 		musb->softconnect = 0;
   2056 		musb_pullup(musb, 0);
   2057 	}
   2058 	musb_stop(musb);
   2059 
   2060 	/* killing any outstanding requests will quiesce the driver;
   2061 	 * then report disconnect
   2062 	 */
   2063 	if (driver) {
   2064 		for (i = 0, hw_ep = musb->endpoints;
   2065 				i < musb->nr_endpoints;
   2066 				i++, hw_ep++) {
   2067 			musb_ep_select(musb->mregs, i);
   2068 			if (hw_ep->is_shared_fifo /* || !epnum */) {
   2069 				nuke(&hw_ep->ep_in, -ESHUTDOWN);
   2070 			} else {
   2071 				if (hw_ep->max_packet_sz_tx)
   2072 					nuke(&hw_ep->ep_in, -ESHUTDOWN);
   2073 				if (hw_ep->max_packet_sz_rx)
   2074 					nuke(&hw_ep->ep_out, -ESHUTDOWN);
   2075 			}
   2076 		}
   2077 	}
   2078 }
   2079 
   2080 /*
   2081  * Unregister the gadget driver. Used by gadget drivers when
   2082  * unregistering themselves from the controller.
   2083  *
   2084  * @param driver the gadget driver to unregister
   2085  */
   2086 static int musb_gadget_stop(struct usb_gadget *g,
   2087 		struct usb_gadget_driver *driver)
   2088 {
   2089 	struct musb	*musb = gadget_to_musb(g);
   2090 	unsigned long	flags;
   2091 
   2092 	if (musb->xceiv->last_event == USB_EVENT_NONE)
   2093 		pm_runtime_get_sync(musb->controller);
   2094 
   2095 	/*
   2096 	 * REVISIT always use otg_set_peripheral() here too;
   2097 	 * this needs to shut down the OTG engine.
   2098 	 */
   2099 
   2100 	spin_lock_irqsave(&musb->lock, flags);
   2101 
   2102 	musb_hnp_stop(musb);
   2103 
   2104 	(void) musb_gadget_vbus_draw(&musb->g, 0);
   2105 
   2106 	musb->xceiv->state = OTG_STATE_UNDEFINED;
   2107 	stop_activity(musb, driver);
   2108 	otg_set_peripheral(musb->xceiv->otg, NULL);
   2109 
   2110 	dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
   2111 
   2112 	musb->is_active = 0;
   2113 	musb_platform_try_idle(musb, 0);
   2114 	spin_unlock_irqrestore(&musb->lock, flags);
   2115 
   2116 	if (is_otg_enabled(musb)) {
   2117 		usb_remove_hcd(musb_to_hcd(musb));
   2118 		/* FIXME we need to be able to register another
   2119 		 * gadget driver here and have everything work;
   2120 		 * that currently misbehaves.
   2121 		 */
   2122 	}
   2123 
   2124 	if (!is_otg_enabled(musb))
   2125 		musb_stop(musb);
   2126 
   2127 	pm_runtime_put(musb->controller);
   2128 
   2129 	return 0;
   2130 }
   2131 #endif
   2132 
   2133 /* ----------------------------------------------------------------------- */
   2134 
   2135 /* lifecycle operations called through plat_uds.c */
   2136 
   2137 void musb_g_resume(struct musb *musb)
   2138 {
   2139 #ifndef __UBOOT__
   2140 	musb->is_suspended = 0;
   2141 	switch (musb->xceiv->state) {
   2142 	case OTG_STATE_B_IDLE:
   2143 		break;
   2144 	case OTG_STATE_B_WAIT_ACON:
   2145 	case OTG_STATE_B_PERIPHERAL:
   2146 		musb->is_active = 1;
   2147 		if (musb->gadget_driver && musb->gadget_driver->resume) {
   2148 			spin_unlock(&musb->lock);
   2149 			musb->gadget_driver->resume(&musb->g);
   2150 			spin_lock(&musb->lock);
   2151 		}
   2152 		break;
   2153 	default:
   2154 		WARNING("unhandled RESUME transition (%s)\n",
   2155 				otg_state_string(musb->xceiv->state));
   2156 	}
   2157 #endif
   2158 }
   2159 
   2160 /* called when SOF packets stop for 3+ msec */
   2161 void musb_g_suspend(struct musb *musb)
   2162 {
   2163 #ifndef __UBOOT__
   2164 	u8	devctl;
   2165 
   2166 	devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
   2167 	dev_dbg(musb->controller, "devctl %02x\n", devctl);
   2168 
   2169 	switch (musb->xceiv->state) {
   2170 	case OTG_STATE_B_IDLE:
   2171 		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
   2172 			musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
   2173 		break;
   2174 	case OTG_STATE_B_PERIPHERAL:
   2175 		musb->is_suspended = 1;
   2176 		if (musb->gadget_driver && musb->gadget_driver->suspend) {
   2177 			spin_unlock(&musb->lock);
   2178 			musb->gadget_driver->suspend(&musb->g);
   2179 			spin_lock(&musb->lock);
   2180 		}
   2181 		break;
   2182 	default:
   2183 		/* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
   2184 		 * A_PERIPHERAL may need care too
   2185 		 */
   2186 		WARNING("unhandled SUSPEND transition (%s)\n",
   2187 				otg_state_string(musb->xceiv->state));
   2188 	}
   2189 #endif
   2190 }
   2191 
   2192 /* Called during SRP */
   2193 void musb_g_wakeup(struct musb *musb)
   2194 {
   2195 	musb_gadget_wakeup(&musb->g);
   2196 }
   2197 
   2198 /* called when VBUS drops below session threshold, and in other cases */
   2199 void musb_g_disconnect(struct musb *musb)
   2200 {
   2201 	void __iomem	*mregs = musb->mregs;
   2202 	u8	devctl = musb_readb(mregs, MUSB_DEVCTL);
   2203 
   2204 	dev_dbg(musb->controller, "devctl %02x\n", devctl);
   2205 
   2206 	/* clear HR */
   2207 	musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
   2208 
   2209 	/* don't draw vbus until new b-default session */
   2210 	(void) musb_gadget_vbus_draw(&musb->g, 0);
   2211 
   2212 	musb->g.speed = USB_SPEED_UNKNOWN;
   2213 	if (musb->gadget_driver && musb->gadget_driver->disconnect) {
   2214 		spin_unlock(&musb->lock);
   2215 		musb->gadget_driver->disconnect(&musb->g);
   2216 		spin_lock(&musb->lock);
   2217 	}
   2218 
   2219 #ifndef __UBOOT__
   2220 	switch (musb->xceiv->state) {
   2221 	default:
   2222 		dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
   2223 			otg_state_string(musb->xceiv->state));
   2224 		musb->xceiv->state = OTG_STATE_A_IDLE;
   2225 		MUSB_HST_MODE(musb);
   2226 		break;
   2227 	case OTG_STATE_A_PERIPHERAL:
   2228 		musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
   2229 		MUSB_HST_MODE(musb);
   2230 		break;
   2231 	case OTG_STATE_B_WAIT_ACON:
   2232 	case OTG_STATE_B_HOST:
   2233 	case OTG_STATE_B_PERIPHERAL:
   2234 	case OTG_STATE_B_IDLE:
   2235 		musb->xceiv->state = OTG_STATE_B_IDLE;
   2236 		break;
   2237 	case OTG_STATE_B_SRP_INIT:
   2238 		break;
   2239 	}
   2240 #endif
   2241 
   2242 	musb->is_active = 0;
   2243 }
   2244 
   2245 void musb_g_reset(struct musb *musb)
   2246 __releases(musb->lock)
   2247 __acquires(musb->lock)
   2248 {
   2249 	void __iomem	*mbase = musb->mregs;
   2250 	u8		devctl = musb_readb(mbase, MUSB_DEVCTL);
   2251 	u8		power;
   2252 
   2253 #ifndef __UBOOT__
   2254 	dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
   2255 			(devctl & MUSB_DEVCTL_BDEVICE)
   2256 				? "B-Device" : "A-Device",
   2257 			musb_readb(mbase, MUSB_FADDR),
   2258 			musb->gadget_driver
   2259 				? musb->gadget_driver->driver.name
   2260 				: NULL
   2261 			);
   2262 #endif
   2263 
   2264 	/* report disconnect, if we didn't already (flushing EP state) */
   2265 	if (musb->g.speed != USB_SPEED_UNKNOWN)
   2266 		musb_g_disconnect(musb);
   2267 
   2268 	/* clear HR */
   2269 	else if (devctl & MUSB_DEVCTL_HR)
   2270 		musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
   2271 
   2272 
   2273 	/* what speed did we negotiate? */
   2274 	power = musb_readb(mbase, MUSB_POWER);
   2275 	musb->g.speed = (power & MUSB_POWER_HSMODE)
   2276 			? USB_SPEED_HIGH : USB_SPEED_FULL;
   2277 
   2278 	/* start in USB_STATE_DEFAULT */
   2279 	musb->is_active = 1;
   2280 	musb->is_suspended = 0;
   2281 	MUSB_DEV_MODE(musb);
   2282 	musb->address = 0;
   2283 	musb->ep0_state = MUSB_EP0_STAGE_SETUP;
   2284 
   2285 	musb->may_wakeup = 0;
   2286 	musb->g.b_hnp_enable = 0;
   2287 	musb->g.a_alt_hnp_support = 0;
   2288 	musb->g.a_hnp_support = 0;
   2289 
   2290 #ifndef __UBOOT__
   2291 	/* Normal reset, as B-Device;
   2292 	 * or else after HNP, as A-Device
   2293 	 */
   2294 	if (devctl & MUSB_DEVCTL_BDEVICE) {
   2295 		musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
   2296 		musb->g.is_a_peripheral = 0;
   2297 	} else if (is_otg_enabled(musb)) {
   2298 		musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
   2299 		musb->g.is_a_peripheral = 1;
   2300 	} else
   2301 		WARN_ON(1);
   2302 
   2303 	/* start with default limits on VBUS power draw */
   2304 	(void) musb_gadget_vbus_draw(&musb->g,
   2305 			is_otg_enabled(musb) ? 8 : 100);
   2306 #endif
   2307 }
   2308