1 /* 2 * Copyright (c) 2000-2004 by David Brownell 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License as published by the 6 * Free Software Foundation; either version 2 of the License, or (at your 7 * option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software Foundation, 16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 */ 18 19 #include <linux/module.h> 20 #include <linux/pci.h> 21 #include <linux/dmapool.h> 22 #include <linux/kernel.h> 23 #include <linux/delay.h> 24 #include <linux/ioport.h> 25 #include <linux/sched.h> 26 #include <linux/vmalloc.h> 27 #include <linux/errno.h> 28 #include <linux/init.h> 29 #include <linux/timer.h> 30 #include <linux/ktime.h> 31 #include <linux/list.h> 32 #include <linux/interrupt.h> 33 #include <linux/usb.h> 34 #include <linux/usb/hcd.h> 35 #include <linux/moduleparam.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/debugfs.h> 38 #include <linux/slab.h> 39 #include <linux/uaccess.h> 40 41 #include <asm/byteorder.h> 42 #include <asm/io.h> 43 #include <asm/irq.h> 44 #include <asm/system.h> 45 #include <asm/unaligned.h> 46 47 /*-------------------------------------------------------------------------*/ 48 49 /* 50 * EHCI hc_driver implementation ... experimental, incomplete. 51 * Based on the final 1.0 register interface specification. 52 * 53 * USB 2.0 shows up in upcoming www.pcmcia.org technology. 54 * First was PCMCIA, like ISA; then CardBus, which is PCI. 55 * Next comes "CardBay", using USB 2.0 signals. 56 * 57 * Contains additional contributions by Brad Hards, Rory Bolt, and others. 58 * Special thanks to Intel and VIA for providing host controllers to 59 * test this driver on, and Cypress (including In-System Design) for 60 * providing early devices for those host controllers to talk to! 61 */ 62 63 #define DRIVER_AUTHOR "David Brownell" 64 #define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver" 65 66 static const char hcd_name [] = "ehci_hcd"; 67 68 69 #undef VERBOSE_DEBUG 70 #undef EHCI_URB_TRACE 71 72 #ifdef DEBUG 73 #define EHCI_STATS 74 #endif 75 76 /* magic numbers that can affect system performance */ 77 #define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ 78 #define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ 79 #define EHCI_TUNE_RL_TT 0 80 #define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ 81 #define EHCI_TUNE_MULT_TT 1 82 /* 83 * Some drivers think it's safe to schedule isochronous transfers more than 84 * 256 ms into the future (partly as a result of an old bug in the scheduling 85 * code). In an attempt to avoid trouble, we will use a minimum scheduling 86 * length of 512 frames instead of 256. 87 */ 88 #define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */ 89 90 #define EHCI_IAA_MSECS 10 /* arbitrary */ 91 #define EHCI_IO_JIFFIES (HZ/10) /* io watchdog > irq_thresh */ 92 #define EHCI_ASYNC_JIFFIES (HZ/20) /* async idle timeout */ 93 #define EHCI_SHRINK_FRAMES 5 /* async qh unlink delay */ 94 95 /* Initial IRQ latency: faster than hw default */ 96 static int log2_irq_thresh = 0; // 0 to 6 97 module_param (log2_irq_thresh, int, S_IRUGO); 98 MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); 99 100 /* initial park setting: slower than hw default */ 101 static unsigned park = 0; 102 module_param (park, uint, S_IRUGO); 103 MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets"); 104 105 /* for flakey hardware, ignore overcurrent indicators */ 106 static int ignore_oc = 0; 107 module_param (ignore_oc, bool, S_IRUGO); 108 MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications"); 109 110 /* for link power management(LPM) feature */ 111 static unsigned int hird; 112 module_param(hird, int, S_IRUGO); 113 MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us\n"); 114 115 #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) 116 117 /*-------------------------------------------------------------------------*/ 118 119 #include "ehci.h" 120 #include "ehci-dbg.c" 121 122 /*-------------------------------------------------------------------------*/ 123 124 static void 125 timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action) 126 { 127 /* Don't override timeouts which shrink or (later) disable 128 * the async ring; just the I/O watchdog. Note that if a 129 * SHRINK were pending, OFF would never be requested. 130 */ 131 if (timer_pending(&ehci->watchdog) 132 && ((BIT(TIMER_ASYNC_SHRINK) | BIT(TIMER_ASYNC_OFF)) 133 & ehci->actions)) 134 return; 135 136 if (!test_and_set_bit(action, &ehci->actions)) { 137 unsigned long t; 138 139 switch (action) { 140 case TIMER_IO_WATCHDOG: 141 if (!ehci->need_io_watchdog) 142 return; 143 t = EHCI_IO_JIFFIES; 144 break; 145 case TIMER_ASYNC_OFF: 146 t = EHCI_ASYNC_JIFFIES; 147 break; 148 /* case TIMER_ASYNC_SHRINK: */ 149 default: 150 /* add a jiffie since we synch against the 151 * 8 KHz uframe counter. 152 */ 153 t = DIV_ROUND_UP(EHCI_SHRINK_FRAMES * HZ, 1000) + 1; 154 break; 155 } 156 mod_timer(&ehci->watchdog, t + jiffies); 157 } 158 } 159 160 /*-------------------------------------------------------------------------*/ 161 162 /* 163 * handshake - spin reading hc until handshake completes or fails 164 * @ptr: address of hc register to be read 165 * @mask: bits to look at in result of read 166 * @done: value of those bits when handshake succeeds 167 * @usec: timeout in microseconds 168 * 169 * Returns negative errno, or zero on success 170 * 171 * Success happens when the "mask" bits have the specified value (hardware 172 * handshake done). There are two failure modes: "usec" have passed (major 173 * hardware flakeout), or the register reads as all-ones (hardware removed). 174 * 175 * That last failure should_only happen in cases like physical cardbus eject 176 * before driver shutdown. But it also seems to be caused by bugs in cardbus 177 * bridge shutdown: shutting down the bridge before the devices using it. 178 */ 179 static int handshake (struct ehci_hcd *ehci, void __iomem *ptr, 180 u32 mask, u32 done, int usec) 181 { 182 u32 result; 183 184 do { 185 result = ehci_readl(ehci, ptr); 186 if (result == ~(u32)0) /* card removed */ 187 return -ENODEV; 188 result &= mask; 189 if (result == done) 190 return 0; 191 udelay (1); 192 usec--; 193 } while (usec > 0); 194 return -ETIMEDOUT; 195 } 196 197 /* force HC to halt state from unknown (EHCI spec section 2.3) */ 198 static int ehci_halt (struct ehci_hcd *ehci) 199 { 200 u32 temp = ehci_readl(ehci, &ehci->regs->status); 201 202 /* disable any irqs left enabled by previous code */ 203 ehci_writel(ehci, 0, &ehci->regs->intr_enable); 204 205 if ((temp & STS_HALT) != 0) 206 return 0; 207 208 temp = ehci_readl(ehci, &ehci->regs->command); 209 temp &= ~CMD_RUN; 210 ehci_writel(ehci, temp, &ehci->regs->command); 211 return handshake (ehci, &ehci->regs->status, 212 STS_HALT, STS_HALT, 16 * 125); 213 } 214 215 static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr, 216 u32 mask, u32 done, int usec) 217 { 218 int error; 219 220 error = handshake(ehci, ptr, mask, done, usec); 221 if (error) { 222 ehci_halt(ehci); 223 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 224 ehci_err(ehci, "force halt; handshake %p %08x %08x -> %d\n", 225 ptr, mask, done, error); 226 } 227 228 return error; 229 } 230 231 /* put TDI/ARC silicon into EHCI mode */ 232 static void tdi_reset (struct ehci_hcd *ehci) 233 { 234 u32 __iomem *reg_ptr; 235 u32 tmp; 236 237 reg_ptr = (u32 __iomem *)(((u8 __iomem *)ehci->regs) + USBMODE); 238 tmp = ehci_readl(ehci, reg_ptr); 239 tmp |= USBMODE_CM_HC; 240 /* The default byte access to MMR space is LE after 241 * controller reset. Set the required endian mode 242 * for transfer buffers to match the host microprocessor 243 */ 244 if (ehci_big_endian_mmio(ehci)) 245 tmp |= USBMODE_BE; 246 ehci_writel(ehci, tmp, reg_ptr); 247 } 248 249 /* reset a non-running (STS_HALT == 1) controller */ 250 static int ehci_reset (struct ehci_hcd *ehci) 251 { 252 int retval; 253 u32 command = ehci_readl(ehci, &ehci->regs->command); 254 255 /* If the EHCI debug controller is active, special care must be 256 * taken before and after a host controller reset */ 257 if (ehci->debug && !dbgp_reset_prep()) 258 ehci->debug = NULL; 259 260 command |= CMD_RESET; 261 dbg_cmd (ehci, "reset", command); 262 ehci_writel(ehci, command, &ehci->regs->command); 263 ehci_to_hcd(ehci)->state = HC_STATE_HALT; 264 ehci->next_statechange = jiffies; 265 retval = handshake (ehci, &ehci->regs->command, 266 CMD_RESET, 0, 250 * 1000); 267 268 if (ehci->has_hostpc) { 269 ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS, 270 (u32 __iomem *)(((u8 *)ehci->regs) + USBMODE_EX)); 271 ehci_writel(ehci, TXFIFO_DEFAULT, 272 (u32 __iomem *)(((u8 *)ehci->regs) + TXFILLTUNING)); 273 } 274 if (retval) 275 return retval; 276 277 if (ehci_is_TDI(ehci)) 278 tdi_reset (ehci); 279 280 if (ehci->debug) 281 dbgp_external_startup(); 282 283 return retval; 284 } 285 286 /* idle the controller (from running) */ 287 static void ehci_quiesce (struct ehci_hcd *ehci) 288 { 289 u32 temp; 290 291 #ifdef DEBUG 292 if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) 293 BUG (); 294 #endif 295 296 /* wait for any schedule enables/disables to take effect */ 297 temp = ehci_readl(ehci, &ehci->regs->command) << 10; 298 temp &= STS_ASS | STS_PSS; 299 if (handshake_on_error_set_halt(ehci, &ehci->regs->status, 300 STS_ASS | STS_PSS, temp, 16 * 125)) 301 return; 302 303 /* then disable anything that's still active */ 304 temp = ehci_readl(ehci, &ehci->regs->command); 305 temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE); 306 ehci_writel(ehci, temp, &ehci->regs->command); 307 308 /* hardware can take 16 microframes to turn off ... */ 309 handshake_on_error_set_halt(ehci, &ehci->regs->status, 310 STS_ASS | STS_PSS, 0, 16 * 125); 311 } 312 313 /*-------------------------------------------------------------------------*/ 314 315 static void end_unlink_async(struct ehci_hcd *ehci); 316 static void ehci_work(struct ehci_hcd *ehci); 317 318 #include "ehci-hub.c" 319 #include "ehci-lpm.c" 320 #include "ehci-mem.c" 321 #include "ehci-q.c" 322 #include "ehci-sched.c" 323 324 /*-------------------------------------------------------------------------*/ 325 326 static void ehci_iaa_watchdog(unsigned long param) 327 { 328 struct ehci_hcd *ehci = (struct ehci_hcd *) param; 329 unsigned long flags; 330 331 spin_lock_irqsave (&ehci->lock, flags); 332 333 /* Lost IAA irqs wedge things badly; seen first with a vt8235. 334 * So we need this watchdog, but must protect it against both 335 * (a) SMP races against real IAA firing and retriggering, and 336 * (b) clean HC shutdown, when IAA watchdog was pending. 337 */ 338 if (ehci->reclaim 339 && !timer_pending(&ehci->iaa_watchdog) 340 && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) { 341 u32 cmd, status; 342 343 /* If we get here, IAA is *REALLY* late. It's barely 344 * conceivable that the system is so busy that CMD_IAAD 345 * is still legitimately set, so let's be sure it's 346 * clear before we read STS_IAA. (The HC should clear 347 * CMD_IAAD when it sets STS_IAA.) 348 */ 349 cmd = ehci_readl(ehci, &ehci->regs->command); 350 if (cmd & CMD_IAAD) 351 ehci_writel(ehci, cmd & ~CMD_IAAD, 352 &ehci->regs->command); 353 354 /* If IAA is set here it either legitimately triggered 355 * before we cleared IAAD above (but _way_ late, so we'll 356 * still count it as lost) ... or a silicon erratum: 357 * - VIA seems to set IAA without triggering the IRQ; 358 * - IAAD potentially cleared without setting IAA. 359 */ 360 status = ehci_readl(ehci, &ehci->regs->status); 361 if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { 362 COUNT (ehci->stats.lost_iaa); 363 ehci_writel(ehci, STS_IAA, &ehci->regs->status); 364 } 365 366 ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n", 367 status, cmd); 368 end_unlink_async(ehci); 369 } 370 371 spin_unlock_irqrestore(&ehci->lock, flags); 372 } 373 374 static void ehci_watchdog(unsigned long param) 375 { 376 struct ehci_hcd *ehci = (struct ehci_hcd *) param; 377 unsigned long flags; 378 379 spin_lock_irqsave(&ehci->lock, flags); 380 381 /* stop async processing after it's idled a bit */ 382 if (test_bit (TIMER_ASYNC_OFF, &ehci->actions)) 383 start_unlink_async (ehci, ehci->async); 384 385 /* ehci could run by timer, without IRQs ... */ 386 ehci_work (ehci); 387 388 spin_unlock_irqrestore (&ehci->lock, flags); 389 } 390 391 /* On some systems, leaving remote wakeup enabled prevents system shutdown. 392 * The firmware seems to think that powering off is a wakeup event! 393 * This routine turns off remote wakeup and everything else, on all ports. 394 */ 395 static void ehci_turn_off_all_ports(struct ehci_hcd *ehci) 396 { 397 int port = HCS_N_PORTS(ehci->hcs_params); 398 399 while (port--) 400 ehci_writel(ehci, PORT_RWC_BITS, 401 &ehci->regs->port_status[port]); 402 } 403 404 /* 405 * Halt HC, turn off all ports, and let the BIOS use the companion controllers. 406 * Should be called with ehci->lock held. 407 */ 408 static void ehci_silence_controller(struct ehci_hcd *ehci) 409 { 410 ehci_halt(ehci); 411 ehci_turn_off_all_ports(ehci); 412 413 /* make BIOS/etc use companion controller during reboot */ 414 ehci_writel(ehci, 0, &ehci->regs->configured_flag); 415 416 /* unblock posted writes */ 417 ehci_readl(ehci, &ehci->regs->configured_flag); 418 } 419 420 /* ehci_shutdown kick in for silicon on any bus (not just pci, etc). 421 * This forcibly disables dma and IRQs, helping kexec and other cases 422 * where the next system software may expect clean state. 423 */ 424 static void ehci_shutdown(struct usb_hcd *hcd) 425 { 426 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 427 428 del_timer_sync(&ehci->watchdog); 429 del_timer_sync(&ehci->iaa_watchdog); 430 431 spin_lock_irq(&ehci->lock); 432 ehci_silence_controller(ehci); 433 spin_unlock_irq(&ehci->lock); 434 } 435 436 static void ehci_port_power (struct ehci_hcd *ehci, int is_on) 437 { 438 unsigned port; 439 440 if (!HCS_PPC (ehci->hcs_params)) 441 return; 442 443 ehci_dbg (ehci, "...power%s ports...\n", is_on ? "up" : "down"); 444 for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; ) 445 (void) ehci_hub_control(ehci_to_hcd(ehci), 446 is_on ? SetPortFeature : ClearPortFeature, 447 USB_PORT_FEAT_POWER, 448 port--, NULL, 0); 449 /* Flush those writes */ 450 ehci_readl(ehci, &ehci->regs->command); 451 msleep(20); 452 } 453 454 /*-------------------------------------------------------------------------*/ 455 456 /* 457 * ehci_work is called from some interrupts, timers, and so on. 458 * it calls driver completion functions, after dropping ehci->lock. 459 */ 460 static void ehci_work (struct ehci_hcd *ehci) 461 { 462 timer_action_done (ehci, TIMER_IO_WATCHDOG); 463 464 /* another CPU may drop ehci->lock during a schedule scan while 465 * it reports urb completions. this flag guards against bogus 466 * attempts at re-entrant schedule scanning. 467 */ 468 if (ehci->scanning) 469 return; 470 ehci->scanning = 1; 471 scan_async (ehci); 472 if (ehci->next_uframe != -1) 473 scan_periodic (ehci); 474 ehci->scanning = 0; 475 476 /* the IO watchdog guards against hardware or driver bugs that 477 * misplace IRQs, and should let us run completely without IRQs. 478 * such lossage has been observed on both VT6202 and VT8235. 479 */ 480 if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) && 481 (ehci->async->qh_next.ptr != NULL || 482 ehci->periodic_sched != 0)) 483 timer_action (ehci, TIMER_IO_WATCHDOG); 484 } 485 486 /* 487 * Called when the ehci_hcd module is removed. 488 */ 489 static void ehci_stop (struct usb_hcd *hcd) 490 { 491 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 492 493 ehci_dbg (ehci, "stop\n"); 494 495 /* no more interrupts ... */ 496 del_timer_sync (&ehci->watchdog); 497 del_timer_sync(&ehci->iaa_watchdog); 498 499 spin_lock_irq(&ehci->lock); 500 if (HC_IS_RUNNING (hcd->state)) 501 ehci_quiesce (ehci); 502 503 ehci_silence_controller(ehci); 504 ehci_reset (ehci); 505 spin_unlock_irq(&ehci->lock); 506 507 remove_companion_file(ehci); 508 remove_debug_files (ehci); 509 510 /* root hub is shut down separately (first, when possible) */ 511 spin_lock_irq (&ehci->lock); 512 if (ehci->async) 513 ehci_work (ehci); 514 spin_unlock_irq (&ehci->lock); 515 ehci_mem_cleanup (ehci); 516 517 #ifdef EHCI_STATS 518 ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n", 519 ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim, 520 ehci->stats.lost_iaa); 521 ehci_dbg (ehci, "complete %ld unlink %ld\n", 522 ehci->stats.complete, ehci->stats.unlink); 523 #endif 524 525 dbg_status (ehci, "ehci_stop completed", 526 ehci_readl(ehci, &ehci->regs->status)); 527 } 528 529 /* one-time init, only for memory state */ 530 static int ehci_init(struct usb_hcd *hcd) 531 { 532 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 533 u32 temp; 534 int retval; 535 u32 hcc_params; 536 struct ehci_qh_hw *hw; 537 538 spin_lock_init(&ehci->lock); 539 540 /* 541 * keep io watchdog by default, those good HCDs could turn off it later 542 */ 543 ehci->need_io_watchdog = 1; 544 init_timer(&ehci->watchdog); 545 ehci->watchdog.function = ehci_watchdog; 546 ehci->watchdog.data = (unsigned long) ehci; 547 548 init_timer(&ehci->iaa_watchdog); 549 ehci->iaa_watchdog.function = ehci_iaa_watchdog; 550 ehci->iaa_watchdog.data = (unsigned long) ehci; 551 552 /* 553 * hw default: 1K periodic list heads, one per frame. 554 * periodic_size can shrink by USBCMD update if hcc_params allows. 555 */ 556 ehci->periodic_size = DEFAULT_I_TDPS; 557 INIT_LIST_HEAD(&ehci->cached_itd_list); 558 INIT_LIST_HEAD(&ehci->cached_sitd_list); 559 if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) 560 return retval; 561 562 /* controllers may cache some of the periodic schedule ... */ 563 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); 564 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache 565 ehci->i_thresh = 2 + 8; 566 else // N microframes cached 567 ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); 568 569 ehci->reclaim = NULL; 570 ehci->next_uframe = -1; 571 ehci->clock_frame = -1; 572 573 /* 574 * dedicate a qh for the async ring head, since we couldn't unlink 575 * a 'real' qh without stopping the async schedule [4.8]. use it 576 * as the 'reclamation list head' too. 577 * its dummy is used in hw_alt_next of many tds, to prevent the qh 578 * from automatically advancing to the next td after short reads. 579 */ 580 ehci->async->qh_next.qh = NULL; 581 hw = ehci->async->hw; 582 hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); 583 hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); 584 hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); 585 hw->hw_qtd_next = EHCI_LIST_END(ehci); 586 ehci->async->qh_state = QH_STATE_LINKED; 587 hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); 588 589 /* clear interrupt enables, set irq latency */ 590 if (log2_irq_thresh < 0 || log2_irq_thresh > 6) 591 log2_irq_thresh = 0; 592 temp = 1 << (16 + log2_irq_thresh); 593 if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) { 594 ehci->has_ppcd = 1; 595 ehci_dbg(ehci, "enable per-port change event\n"); 596 temp |= CMD_PPCEE; 597 } 598 if (HCC_CANPARK(hcc_params)) { 599 /* HW default park == 3, on hardware that supports it (like 600 * NVidia and ALI silicon), maximizes throughput on the async 601 * schedule by avoiding QH fetches between transfers. 602 * 603 * With fast usb storage devices and NForce2, "park" seems to 604 * make problems: throughput reduction (!), data errors... 605 */ 606 if (park) { 607 park = min(park, (unsigned) 3); 608 temp |= CMD_PARK; 609 temp |= park << 8; 610 } 611 ehci_dbg(ehci, "park %d\n", park); 612 } 613 if (HCC_PGM_FRAMELISTLEN(hcc_params)) { 614 /* periodic schedule size can be smaller than default */ 615 temp &= ~(3 << 2); 616 temp |= (EHCI_TUNE_FLS << 2); 617 switch (EHCI_TUNE_FLS) { 618 case 0: ehci->periodic_size = 1024; break; 619 case 1: ehci->periodic_size = 512; break; 620 case 2: ehci->periodic_size = 256; break; 621 default: BUG(); 622 } 623 } 624 if (HCC_LPM(hcc_params)) { 625 /* support link power management EHCI 1.1 addendum */ 626 ehci_dbg(ehci, "support lpm\n"); 627 ehci->has_lpm = 1; 628 if (hird > 0xf) { 629 ehci_dbg(ehci, "hird %d invalid, use default 0", 630 hird); 631 hird = 0; 632 } 633 temp |= hird << 24; 634 } 635 ehci->command = temp; 636 637 /* Accept arbitrarily long scatter-gather lists */ 638 if (!(hcd->driver->flags & HCD_LOCAL_MEM)) 639 hcd->self.sg_tablesize = ~0; 640 return 0; 641 } 642 643 /* start HC running; it's halted, ehci_init() has been run (once) */ 644 static int ehci_run (struct usb_hcd *hcd) 645 { 646 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 647 int retval; 648 u32 temp; 649 u32 hcc_params; 650 651 hcd->uses_new_polling = 1; 652 653 /* EHCI spec section 4.1 */ 654 if ((retval = ehci_reset(ehci)) != 0) { 655 ehci_mem_cleanup(ehci); 656 return retval; 657 } 658 ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); 659 ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next); 660 661 /* 662 * hcc_params controls whether ehci->regs->segment must (!!!) 663 * be used; it constrains QH/ITD/SITD and QTD locations. 664 * pci_pool consistent memory always uses segment zero. 665 * streaming mappings for I/O buffers, like pci_map_single(), 666 * can return segments above 4GB, if the device allows. 667 * 668 * NOTE: the dma mask is visible through dma_supported(), so 669 * drivers can pass this info along ... like NETIF_F_HIGHDMA, 670 * Scsi_Host.highmem_io, and so forth. It's readonly to all 671 * host side drivers though. 672 */ 673 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); 674 if (HCC_64BIT_ADDR(hcc_params)) { 675 ehci_writel(ehci, 0, &ehci->regs->segment); 676 #if 0 677 // this is deeply broken on almost all architectures 678 if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64))) 679 ehci_info(ehci, "enabled 64bit DMA\n"); 680 #endif 681 } 682 683 684 // Philips, Intel, and maybe others need CMD_RUN before the 685 // root hub will detect new devices (why?); NEC doesn't 686 ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); 687 ehci->command |= CMD_RUN; 688 ehci_writel(ehci, ehci->command, &ehci->regs->command); 689 dbg_cmd (ehci, "init", ehci->command); 690 691 /* 692 * Start, enabling full USB 2.0 functionality ... usb 1.1 devices 693 * are explicitly handed to companion controller(s), so no TT is 694 * involved with the root hub. (Except where one is integrated, 695 * and there's no companion controller unless maybe for USB OTG.) 696 * 697 * Turning on the CF flag will transfer ownership of all ports 698 * from the companions to the EHCI controller. If any of the 699 * companions are in the middle of a port reset at the time, it 700 * could cause trouble. Write-locking ehci_cf_port_reset_rwsem 701 * guarantees that no resets are in progress. After we set CF, 702 * a short delay lets the hardware catch up; new resets shouldn't 703 * be started before the port switching actions could complete. 704 */ 705 down_write(&ehci_cf_port_reset_rwsem); 706 hcd->state = HC_STATE_RUNNING; 707 ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); 708 ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ 709 msleep(5); 710 up_write(&ehci_cf_port_reset_rwsem); 711 ehci->last_periodic_enable = ktime_get_real(); 712 713 temp = HC_VERSION(ehci_readl(ehci, &ehci->caps->hc_capbase)); 714 ehci_info (ehci, 715 "USB %x.%x started, EHCI %x.%02x%s\n", 716 ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), 717 temp >> 8, temp & 0xff, 718 ignore_oc ? ", overcurrent ignored" : ""); 719 720 ehci_writel(ehci, INTR_MASK, 721 &ehci->regs->intr_enable); /* Turn On Interrupts */ 722 723 /* GRR this is run-once init(), being done every time the HC starts. 724 * So long as they're part of class devices, we can't do it init() 725 * since the class device isn't created that early. 726 */ 727 create_debug_files(ehci); 728 create_companion_file(ehci); 729 730 return 0; 731 } 732 733 /*-------------------------------------------------------------------------*/ 734 735 static irqreturn_t ehci_irq (struct usb_hcd *hcd) 736 { 737 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 738 u32 status, masked_status, pcd_status = 0, cmd; 739 int bh; 740 741 spin_lock (&ehci->lock); 742 743 status = ehci_readl(ehci, &ehci->regs->status); 744 745 /* e.g. cardbus physical eject */ 746 if (status == ~(u32) 0) { 747 ehci_dbg (ehci, "device removed\n"); 748 goto dead; 749 } 750 751 masked_status = status & INTR_MASK; 752 if (!masked_status) { /* irq sharing? */ 753 spin_unlock(&ehci->lock); 754 return IRQ_NONE; 755 } 756 757 /* clear (just) interrupts */ 758 ehci_writel(ehci, masked_status, &ehci->regs->status); 759 cmd = ehci_readl(ehci, &ehci->regs->command); 760 bh = 0; 761 762 #ifdef VERBOSE_DEBUG 763 /* unrequested/ignored: Frame List Rollover */ 764 dbg_status (ehci, "irq", status); 765 #endif 766 767 /* INT, ERR, and IAA interrupt rates can be throttled */ 768 769 /* normal [4.15.1.2] or error [4.15.1.1] completion */ 770 if (likely ((status & (STS_INT|STS_ERR)) != 0)) { 771 if (likely ((status & STS_ERR) == 0)) 772 COUNT (ehci->stats.normal); 773 else 774 COUNT (ehci->stats.error); 775 bh = 1; 776 } 777 778 /* complete the unlinking of some qh [4.15.2.3] */ 779 if (status & STS_IAA) { 780 /* guard against (alleged) silicon errata */ 781 if (cmd & CMD_IAAD) { 782 ehci_writel(ehci, cmd & ~CMD_IAAD, 783 &ehci->regs->command); 784 ehci_dbg(ehci, "IAA with IAAD still set?\n"); 785 } 786 if (ehci->reclaim) { 787 COUNT(ehci->stats.reclaim); 788 end_unlink_async(ehci); 789 } else 790 ehci_dbg(ehci, "IAA with nothing to reclaim?\n"); 791 } 792 793 /* remote wakeup [4.3.1] */ 794 if (status & STS_PCD) { 795 unsigned i = HCS_N_PORTS (ehci->hcs_params); 796 u32 ppcd = 0; 797 798 /* kick root hub later */ 799 pcd_status = status; 800 801 /* resume root hub? */ 802 if (!(cmd & CMD_RUN)) 803 usb_hcd_resume_root_hub(hcd); 804 805 /* get per-port change detect bits */ 806 if (ehci->has_ppcd) 807 ppcd = status >> 16; 808 809 while (i--) { 810 int pstatus; 811 812 /* leverage per-port change bits feature */ 813 if (ehci->has_ppcd && !(ppcd & (1 << i))) 814 continue; 815 pstatus = ehci_readl(ehci, 816 &ehci->regs->port_status[i]); 817 818 if (pstatus & PORT_OWNER) 819 continue; 820 if (!(test_bit(i, &ehci->suspended_ports) && 821 ((pstatus & PORT_RESUME) || 822 !(pstatus & PORT_SUSPEND)) && 823 (pstatus & PORT_PE) && 824 ehci->reset_done[i] == 0)) 825 continue; 826 827 /* start 20 msec resume signaling from this port, 828 * and make khubd collect PORT_STAT_C_SUSPEND to 829 * stop that signaling. Use 5 ms extra for safety, 830 * like usb_port_resume() does. 831 */ 832 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); 833 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); 834 mod_timer(&hcd->rh_timer, ehci->reset_done[i]); 835 } 836 } 837 838 /* PCI errors [4.15.2.4] */ 839 if (unlikely ((status & STS_FATAL) != 0)) { 840 ehci_err(ehci, "fatal error\n"); 841 dbg_cmd(ehci, "fatal", cmd); 842 dbg_status(ehci, "fatal", status); 843 ehci_halt(ehci); 844 dead: 845 ehci_reset(ehci); 846 ehci_writel(ehci, 0, &ehci->regs->configured_flag); 847 /* generic layer kills/unlinks all urbs, then 848 * uses ehci_stop to clean up the rest 849 */ 850 bh = 1; 851 } 852 853 if (bh) 854 ehci_work (ehci); 855 spin_unlock (&ehci->lock); 856 if (pcd_status) 857 usb_hcd_poll_rh_status(hcd); 858 return IRQ_HANDLED; 859 } 860 861 /*-------------------------------------------------------------------------*/ 862 863 /* 864 * non-error returns are a promise to giveback() the urb later 865 * we drop ownership so next owner (or urb unlink) can get it 866 * 867 * urb + dev is in hcd.self.controller.urb_list 868 * we're queueing TDs onto software and hardware lists 869 * 870 * hcd-specific init for hcpriv hasn't been done yet 871 * 872 * NOTE: control, bulk, and interrupt share the same code to append TDs 873 * to a (possibly active) QH, and the same QH scanning code. 874 */ 875 static int ehci_urb_enqueue ( 876 struct usb_hcd *hcd, 877 struct urb *urb, 878 gfp_t mem_flags 879 ) { 880 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 881 struct list_head qtd_list; 882 883 INIT_LIST_HEAD (&qtd_list); 884 885 switch (usb_pipetype (urb->pipe)) { 886 case PIPE_CONTROL: 887 /* qh_completions() code doesn't handle all the fault cases 888 * in multi-TD control transfers. Even 1KB is rare anyway. 889 */ 890 if (urb->transfer_buffer_length > (16 * 1024)) 891 return -EMSGSIZE; 892 /* FALLTHROUGH */ 893 /* case PIPE_BULK: */ 894 default: 895 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) 896 return -ENOMEM; 897 return submit_async(ehci, urb, &qtd_list, mem_flags); 898 899 case PIPE_INTERRUPT: 900 if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) 901 return -ENOMEM; 902 return intr_submit(ehci, urb, &qtd_list, mem_flags); 903 904 case PIPE_ISOCHRONOUS: 905 if (urb->dev->speed == USB_SPEED_HIGH) 906 return itd_submit (ehci, urb, mem_flags); 907 else 908 return sitd_submit (ehci, urb, mem_flags); 909 } 910 } 911 912 static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) 913 { 914 /* failfast */ 915 if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim) 916 end_unlink_async(ehci); 917 918 /* If the QH isn't linked then there's nothing we can do 919 * unless we were called during a giveback, in which case 920 * qh_completions() has to deal with it. 921 */ 922 if (qh->qh_state != QH_STATE_LINKED) { 923 if (qh->qh_state == QH_STATE_COMPLETING) 924 qh->needs_rescan = 1; 925 return; 926 } 927 928 /* defer till later if busy */ 929 if (ehci->reclaim) { 930 struct ehci_qh *last; 931 932 for (last = ehci->reclaim; 933 last->reclaim; 934 last = last->reclaim) 935 continue; 936 qh->qh_state = QH_STATE_UNLINK_WAIT; 937 last->reclaim = qh; 938 939 /* start IAA cycle */ 940 } else 941 start_unlink_async (ehci, qh); 942 } 943 944 /* remove from hardware lists 945 * completions normally happen asynchronously 946 */ 947 948 static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 949 { 950 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 951 struct ehci_qh *qh; 952 unsigned long flags; 953 int rc; 954 955 spin_lock_irqsave (&ehci->lock, flags); 956 rc = usb_hcd_check_unlink_urb(hcd, urb, status); 957 if (rc) 958 goto done; 959 960 switch (usb_pipetype (urb->pipe)) { 961 // case PIPE_CONTROL: 962 // case PIPE_BULK: 963 default: 964 qh = (struct ehci_qh *) urb->hcpriv; 965 if (!qh) 966 break; 967 switch (qh->qh_state) { 968 case QH_STATE_LINKED: 969 case QH_STATE_COMPLETING: 970 unlink_async(ehci, qh); 971 break; 972 case QH_STATE_UNLINK: 973 case QH_STATE_UNLINK_WAIT: 974 /* already started */ 975 break; 976 case QH_STATE_IDLE: 977 /* QH might be waiting for a Clear-TT-Buffer */ 978 qh_completions(ehci, qh); 979 break; 980 } 981 break; 982 983 case PIPE_INTERRUPT: 984 qh = (struct ehci_qh *) urb->hcpriv; 985 if (!qh) 986 break; 987 switch (qh->qh_state) { 988 case QH_STATE_LINKED: 989 case QH_STATE_COMPLETING: 990 intr_deschedule (ehci, qh); 991 break; 992 case QH_STATE_IDLE: 993 qh_completions (ehci, qh); 994 break; 995 default: 996 ehci_dbg (ehci, "bogus qh %p state %d\n", 997 qh, qh->qh_state); 998 goto done; 999 } 1000 break; 1001 1002 case PIPE_ISOCHRONOUS: 1003 // itd or sitd ... 1004 1005 // wait till next completion, do it then. 1006 // completion irqs can wait up to 1024 msec, 1007 break; 1008 } 1009 done: 1010 spin_unlock_irqrestore (&ehci->lock, flags); 1011 return rc; 1012 } 1013 1014 /*-------------------------------------------------------------------------*/ 1015 1016 // bulk qh holds the data toggle 1017 1018 static void 1019 ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) 1020 { 1021 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 1022 unsigned long flags; 1023 struct ehci_qh *qh, *tmp; 1024 1025 /* ASSERT: any requests/urbs are being unlinked */ 1026 /* ASSERT: nobody can be submitting urbs for this any more */ 1027 1028 rescan: 1029 spin_lock_irqsave (&ehci->lock, flags); 1030 qh = ep->hcpriv; 1031 if (!qh) 1032 goto done; 1033 1034 /* endpoints can be iso streams. for now, we don't 1035 * accelerate iso completions ... so spin a while. 1036 */ 1037 if (qh->hw == NULL) { 1038 ehci_vdbg (ehci, "iso delay\n"); 1039 goto idle_timeout; 1040 } 1041 1042 if (!HC_IS_RUNNING (hcd->state)) 1043 qh->qh_state = QH_STATE_IDLE; 1044 switch (qh->qh_state) { 1045 case QH_STATE_LINKED: 1046 case QH_STATE_COMPLETING: 1047 for (tmp = ehci->async->qh_next.qh; 1048 tmp && tmp != qh; 1049 tmp = tmp->qh_next.qh) 1050 continue; 1051 /* periodic qh self-unlinks on empty */ 1052 if (!tmp) 1053 goto nogood; 1054 unlink_async (ehci, qh); 1055 /* FALL THROUGH */ 1056 case QH_STATE_UNLINK: /* wait for hw to finish? */ 1057 case QH_STATE_UNLINK_WAIT: 1058 idle_timeout: 1059 spin_unlock_irqrestore (&ehci->lock, flags); 1060 schedule_timeout_uninterruptible(1); 1061 goto rescan; 1062 case QH_STATE_IDLE: /* fully unlinked */ 1063 if (qh->clearing_tt) 1064 goto idle_timeout; 1065 if (list_empty (&qh->qtd_list)) { 1066 qh_put (qh); 1067 break; 1068 } 1069 /* else FALL THROUGH */ 1070 default: 1071 nogood: 1072 /* caller was supposed to have unlinked any requests; 1073 * that's not our job. just leak this memory. 1074 */ 1075 ehci_err (ehci, "qh %p (#%02x) state %d%s\n", 1076 qh, ep->desc.bEndpointAddress, qh->qh_state, 1077 list_empty (&qh->qtd_list) ? "" : "(has tds)"); 1078 break; 1079 } 1080 ep->hcpriv = NULL; 1081 done: 1082 spin_unlock_irqrestore (&ehci->lock, flags); 1083 return; 1084 } 1085 1086 static void 1087 ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) 1088 { 1089 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 1090 struct ehci_qh *qh; 1091 int eptype = usb_endpoint_type(&ep->desc); 1092 int epnum = usb_endpoint_num(&ep->desc); 1093 int is_out = usb_endpoint_dir_out(&ep->desc); 1094 unsigned long flags; 1095 1096 if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT) 1097 return; 1098 1099 spin_lock_irqsave(&ehci->lock, flags); 1100 qh = ep->hcpriv; 1101 1102 /* For Bulk and Interrupt endpoints we maintain the toggle state 1103 * in the hardware; the toggle bits in udev aren't used at all. 1104 * When an endpoint is reset by usb_clear_halt() we must reset 1105 * the toggle bit in the QH. 1106 */ 1107 if (qh) { 1108 usb_settoggle(qh->dev, epnum, is_out, 0); 1109 if (!list_empty(&qh->qtd_list)) { 1110 WARN_ONCE(1, "clear_halt for a busy endpoint\n"); 1111 } else if (qh->qh_state == QH_STATE_LINKED || 1112 qh->qh_state == QH_STATE_COMPLETING) { 1113 1114 /* The toggle value in the QH can't be updated 1115 * while the QH is active. Unlink it now; 1116 * re-linking will call qh_refresh(). 1117 */ 1118 if (eptype == USB_ENDPOINT_XFER_BULK) 1119 unlink_async(ehci, qh); 1120 else 1121 intr_deschedule(ehci, qh); 1122 } 1123 } 1124 spin_unlock_irqrestore(&ehci->lock, flags); 1125 } 1126 1127 static int ehci_get_frame (struct usb_hcd *hcd) 1128 { 1129 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 1130 return (ehci_readl(ehci, &ehci->regs->frame_index) >> 3) % 1131 ehci->periodic_size; 1132 } 1133 1134 /*-------------------------------------------------------------------------*/ 1135 1136 MODULE_DESCRIPTION(DRIVER_DESC); 1137 MODULE_AUTHOR (DRIVER_AUTHOR); 1138 MODULE_LICENSE ("GPL"); 1139 1140 #ifdef CONFIG_PCI 1141 #include "ehci-pci.c" 1142 #define PCI_DRIVER ehci_pci_driver 1143 #endif 1144 1145 #ifdef CONFIG_USB_EHCI_FSL 1146 #include "ehci-fsl.c" 1147 #define PLATFORM_DRIVER ehci_fsl_driver 1148 #endif 1149 1150 #ifdef CONFIG_USB_EHCI_MXC 1151 #include "ehci-mxc.c" 1152 #define PLATFORM_DRIVER ehci_mxc_driver 1153 #endif 1154 1155 #ifdef CONFIG_SOC_AU1200 1156 #include "ehci-au1xxx.c" 1157 #define PLATFORM_DRIVER ehci_hcd_au1xxx_driver 1158 #endif 1159 1160 #ifdef CONFIG_ARCH_OMAP3 1161 #include "ehci-omap.c" 1162 #define PLATFORM_DRIVER ehci_hcd_omap_driver 1163 #endif 1164 1165 #ifdef CONFIG_PPC_PS3 1166 #include "ehci-ps3.c" 1167 #define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver 1168 #endif 1169 1170 #ifdef CONFIG_USB_EHCI_HCD_PPC_OF 1171 #include "ehci-ppc-of.c" 1172 #define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver 1173 #endif 1174 1175 #ifdef CONFIG_XPS_USB_HCD_XILINX 1176 #include "ehci-xilinx-of.c" 1177 #define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver 1178 #endif 1179 1180 #ifdef CONFIG_PLAT_ORION 1181 #include "ehci-orion.c" 1182 #define PLATFORM_DRIVER ehci_orion_driver 1183 #endif 1184 1185 #ifdef CONFIG_ARCH_IXP4XX 1186 #include "ehci-ixp4xx.c" 1187 #define PLATFORM_DRIVER ixp4xx_ehci_driver 1188 #endif 1189 1190 #ifdef CONFIG_USB_W90X900_EHCI 1191 #include "ehci-w90x900.c" 1192 #define PLATFORM_DRIVER ehci_hcd_w90x900_driver 1193 #endif 1194 1195 #ifdef CONFIG_ARCH_AT91 1196 #include "ehci-atmel.c" 1197 #define PLATFORM_DRIVER ehci_atmel_driver 1198 #endif 1199 1200 #if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \ 1201 !defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \ 1202 !defined(XILINX_OF_PLATFORM_DRIVER) 1203 #error "missing bus glue for ehci-hcd" 1204 #endif 1205 1206 static int __init ehci_hcd_init(void) 1207 { 1208 int retval = 0; 1209 1210 if (usb_disabled()) 1211 return -ENODEV; 1212 1213 printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name); 1214 set_bit(USB_EHCI_LOADED, &usb_hcds_loaded); 1215 if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) || 1216 test_bit(USB_OHCI_LOADED, &usb_hcds_loaded)) 1217 printk(KERN_WARNING "Warning! ehci_hcd should always be loaded" 1218 " before uhci_hcd and ohci_hcd, not after\n"); 1219 1220 pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n", 1221 hcd_name, 1222 sizeof(struct ehci_qh), sizeof(struct ehci_qtd), 1223 sizeof(struct ehci_itd), sizeof(struct ehci_sitd)); 1224 1225 #ifdef DEBUG 1226 ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root); 1227 if (!ehci_debug_root) { 1228 retval = -ENOENT; 1229 goto err_debug; 1230 } 1231 #endif 1232 1233 #ifdef PLATFORM_DRIVER 1234 retval = platform_driver_register(&PLATFORM_DRIVER); 1235 if (retval < 0) 1236 goto clean0; 1237 #endif 1238 1239 #ifdef PCI_DRIVER 1240 retval = pci_register_driver(&PCI_DRIVER); 1241 if (retval < 0) 1242 goto clean1; 1243 #endif 1244 1245 #ifdef PS3_SYSTEM_BUS_DRIVER 1246 retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER); 1247 if (retval < 0) 1248 goto clean2; 1249 #endif 1250 1251 #ifdef OF_PLATFORM_DRIVER 1252 retval = of_register_platform_driver(&OF_PLATFORM_DRIVER); 1253 if (retval < 0) 1254 goto clean3; 1255 #endif 1256 1257 #ifdef XILINX_OF_PLATFORM_DRIVER 1258 retval = of_register_platform_driver(&XILINX_OF_PLATFORM_DRIVER); 1259 if (retval < 0) 1260 goto clean4; 1261 #endif 1262 return retval; 1263 1264 #ifdef XILINX_OF_PLATFORM_DRIVER 1265 /* of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER); */ 1266 clean4: 1267 #endif 1268 #ifdef OF_PLATFORM_DRIVER 1269 of_unregister_platform_driver(&OF_PLATFORM_DRIVER); 1270 clean3: 1271 #endif 1272 #ifdef PS3_SYSTEM_BUS_DRIVER 1273 ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); 1274 clean2: 1275 #endif 1276 #ifdef PCI_DRIVER 1277 pci_unregister_driver(&PCI_DRIVER); 1278 clean1: 1279 #endif 1280 #ifdef PLATFORM_DRIVER 1281 platform_driver_unregister(&PLATFORM_DRIVER); 1282 clean0: 1283 #endif 1284 #ifdef DEBUG 1285 debugfs_remove(ehci_debug_root); 1286 ehci_debug_root = NULL; 1287 err_debug: 1288 #endif 1289 clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); 1290 return retval; 1291 } 1292 module_init(ehci_hcd_init); 1293 1294 static void __exit ehci_hcd_cleanup(void) 1295 { 1296 #ifdef XILINX_OF_PLATFORM_DRIVER 1297 of_unregister_platform_driver(&XILINX_OF_PLATFORM_DRIVER); 1298 #endif 1299 #ifdef OF_PLATFORM_DRIVER 1300 of_unregister_platform_driver(&OF_PLATFORM_DRIVER); 1301 #endif 1302 #ifdef PLATFORM_DRIVER 1303 platform_driver_unregister(&PLATFORM_DRIVER); 1304 #endif 1305 #ifdef PCI_DRIVER 1306 pci_unregister_driver(&PCI_DRIVER); 1307 #endif 1308 #ifdef PS3_SYSTEM_BUS_DRIVER 1309 ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); 1310 #endif 1311 #ifdef DEBUG 1312 debugfs_remove(ehci_debug_root); 1313 #endif 1314 clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); 1315 } 1316 module_exit(ehci_hcd_cleanup); 1317 1318