1 /* 2 * Universal Host Controller Interface driver for USB. 3 * 4 * Maintainer: Alan Stern <stern@rowland.harvard.edu> 5 * 6 * (C) Copyright 1999 Linus Torvalds 7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com 8 * (C) Copyright 1999 Randy Dunlap 9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de 10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de 11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch 12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at 13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface 14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). 15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) 16 * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu 17 * 18 * Intel documents this fairly well, and as far as I know there 19 * are no royalties or anything like that, but even so there are 20 * people who decided that they want to do the same thing in a 21 * completely different way. 22 * 23 */ 24 25 #include <linux/config.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include <linux/kernel.h> 29 #include <linux/init.h> 30 #include <linux/delay.h> 31 #include <linux/ioport.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/smp_lock.h> 35 #include <linux/errno.h> 36 #include <linux/unistd.h> 37 #include <linux/interrupt.h> 38 #include <linux/spinlock.h> 39 #include <linux/debugfs.h> 40 #include <linux/pm.h> 41 #include <linux/dmapool.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/usb.h> 44 #include <linux/bitops.h> 45 46 #include <asm/uaccess.h> 47 #include <asm/io.h> 48 #include <asm/irq.h> 49 #include <asm/system.h> 50 51 #include "../core/hcd.h" 52 #include "uhci-hcd.h" 53 #include "pci-quirks.h" 54 55 /* 56 * Version Information 57 */ 58 #define DRIVER_VERSION "v3.0" 59 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \ 60 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \ 61 Alan Stern" 62 #define DRIVER_DESC "USB Universal Host Controller Interface driver" 63 64 /* 65 * debug = 0, no debugging messages 66 * debug = 1, dump failed URBs except for stalls 67 * debug = 2, dump all failed URBs (including stalls) 68 * show all queues in /debug/uhci/[pci_addr] 69 * debug = 3, show all TDs in URBs when dumping 70 */ 71 #ifdef DEBUG 72 #define DEBUG_CONFIGURED 1 73 static int debug = 1; 74 module_param(debug, int, S_IRUGO | S_IWUSR); 75 MODULE_PARM_DESC(debug, "Debug level"); 76 77 #else 78 #define DEBUG_CONFIGURED 0 79 #define debug 0 80 #endif 81 82 static char *errbuf; 83 #define ERRBUF_LEN (32 * 1024) 84 85 static kmem_cache_t *uhci_up_cachep; /* urb_priv */ 86 87 static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state); 88 static void wakeup_rh(struct uhci_hcd *uhci); 89 static void uhci_get_current_frame_number(struct uhci_hcd *uhci); 90 91 /* If a transfer is still active after this much time, turn off FSBR */ 92 #define IDLE_TIMEOUT msecs_to_jiffies(50) 93 #define FSBR_DELAY msecs_to_jiffies(50) 94 95 /* When we timeout an idle transfer for FSBR, we'll switch it over to */ 96 /* depth first traversal. We'll do it in groups of this number of TDs */ 97 /* to make sure it doesn't hog all of the bandwidth */ 98 #define DEPTH_INTERVAL 5 99 100 #include "uhci-debug.c" 101 #include "uhci-q.c" 102 #include "uhci-hub.c" 103 104 /* 105 * Finish up a host controller reset and update the recorded state. 106 */ 107 static void finish_reset(struct uhci_hcd *uhci) 108 { 109 int port; 110 111 /* HCRESET doesn't affect the Suspend, Reset, and Resume Detect 112 * bits in the port status and control registers. 113 * We have to clear them by hand. 114 */ 115 for (port = 0; port < uhci->rh_numports; ++port) 116 outw(0, uhci->io_addr + USBPORTSC1 + (port * 2)); 117 118 uhci->port_c_suspend = uhci->resuming_ports = 0; 119 uhci->rh_state = UHCI_RH_RESET; 120 uhci->is_stopped = UHCI_IS_STOPPED; 121 uhci_to_hcd(uhci)->state = HC_STATE_HALT; 122 uhci_to_hcd(uhci)->poll_rh = 0; 123 } 124 125 /* 126 * Last rites for a defunct/nonfunctional controller 127 * or one we don't want to use any more. 128 */ 129 static void hc_died(struct uhci_hcd *uhci) 130 { 131 uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr); 132 finish_reset(uhci); 133 uhci->hc_inaccessible = 1; 134 } 135 136 /* 137 * Initialize a controller that was newly discovered or has just been 138 * resumed. In either case we can't be sure of its previous state. 139 */ 140 static void check_and_reset_hc(struct uhci_hcd *uhci) 141 { 142 if (uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr)) 143 finish_reset(uhci); 144 } 145 146 /* 147 * Store the basic register settings needed by the controller. 148 */ 149 static void configure_hc(struct uhci_hcd *uhci) 150 { 151 /* Set the frame length to the default: 1 ms exactly */ 152 outb(USBSOF_DEFAULT, uhci->io_addr + USBSOF); 153 154 /* Store the frame list base address */ 155 outl(uhci->frame_dma_handle, uhci->io_addr + USBFLBASEADD); 156 157 /* Set the current frame number */ 158 outw(uhci->frame_number, uhci->io_addr + USBFRNUM); 159 160 /* Mark controller as not halted before we enable interrupts */ 161 uhci_to_hcd(uhci)->state = HC_STATE_SUSPENDED; 162 mb(); 163 164 /* Enable PIRQ */ 165 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 166 USBLEGSUP_DEFAULT); 167 } 168 169 170 static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci) 171 { 172 int port; 173 174 switch (to_pci_dev(uhci_dev(uhci))->vendor) { 175 default: 176 break; 177 178 case PCI_VENDOR_ID_GENESYS: 179 /* Genesys Logic's GL880S controllers don't generate 180 * resume-detect interrupts. 181 */ 182 return 1; 183 184 case PCI_VENDOR_ID_INTEL: 185 /* Some of Intel's USB controllers have a bug that causes 186 * resume-detect interrupts if any port has an over-current 187 * condition. To make matters worse, some motherboards 188 * hardwire unused USB ports' over-current inputs active! 189 * To prevent problems, we will not enable resume-detect 190 * interrupts if any ports are OC. 191 */ 192 for (port = 0; port < uhci->rh_numports; ++port) { 193 if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & 194 USBPORTSC_OC) 195 return 1; 196 } 197 break; 198 } 199 return 0; 200 } 201 202 static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state) 203 __releases(uhci->lock) 204 __acquires(uhci->lock) 205 { 206 int auto_stop; 207 int int_enable; 208 209 auto_stop = (new_state == UHCI_RH_AUTO_STOPPED); 210 dev_dbg(uhci_dev(uhci), "%s%s\n", __FUNCTION__, 211 (auto_stop ? " (auto-stop)" : "")); 212 213 /* If we get a suspend request when we're already auto-stopped 214 * then there's nothing to do. 215 */ 216 if (uhci->rh_state == UHCI_RH_AUTO_STOPPED) { 217 uhci->rh_state = new_state; 218 return; 219 } 220 221 /* Enable resume-detect interrupts if they work. 222 * Then enter Global Suspend mode, still configured. 223 */ 224 uhci->working_RD = 1; 225 int_enable = USBINTR_RESUME; 226 if (resume_detect_interrupts_are_broken(uhci)) { 227 uhci->working_RD = int_enable = 0; 228 } 229 outw(int_enable, uhci->io_addr + USBINTR); 230 outw(USBCMD_EGSM | USBCMD_CF, uhci->io_addr + USBCMD); 231 mb(); 232 udelay(5); 233 234 /* If we're auto-stopping then no devices have been attached 235 * for a while, so there shouldn't be any active URBs and the 236 * controller should stop after a few microseconds. Otherwise 237 * we will give the controller one frame to stop. 238 */ 239 if (!auto_stop && !(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) { 240 uhci->rh_state = UHCI_RH_SUSPENDING; 241 spin_unlock_irq(&uhci->lock); 242 msleep(1); 243 spin_lock_irq(&uhci->lock); 244 if (uhci->hc_inaccessible) /* Died */ 245 return; 246 } 247 if (!(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) 248 dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n"); 249 250 uhci_get_current_frame_number(uhci); 251 smp_wmb(); 252 253 uhci->rh_state = new_state; 254 uhci->is_stopped = UHCI_IS_STOPPED; 255 uhci_to_hcd(uhci)->poll_rh = !int_enable; 256 257 uhci_scan_schedule(uhci, NULL); 258 } 259 260 static void start_rh(struct uhci_hcd *uhci) 261 { 262 uhci_to_hcd(uhci)->state = HC_STATE_RUNNING; 263 uhci->is_stopped = 0; 264 smp_wmb(); 265 266 /* Mark it configured and running with a 64-byte max packet. 267 * All interrupts are enabled, even though RESUME won't do anything. 268 */ 269 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, uhci->io_addr + USBCMD); 270 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP, 271 uhci->io_addr + USBINTR); 272 mb(); 273 uhci->rh_state = UHCI_RH_RUNNING; 274 uhci_to_hcd(uhci)->poll_rh = 1; 275 } 276 277 static void wakeup_rh(struct uhci_hcd *uhci) 278 __releases(uhci->lock) 279 __acquires(uhci->lock) 280 { 281 dev_dbg(uhci_dev(uhci), "%s%s\n", __FUNCTION__, 282 uhci->rh_state == UHCI_RH_AUTO_STOPPED ? 283 " (auto-start)" : ""); 284 285 /* If we are auto-stopped then no devices are attached so there's 286 * no need for wakeup signals. Otherwise we send Global Resume 287 * for 20 ms. 288 */ 289 if (uhci->rh_state == UHCI_RH_SUSPENDED) { 290 uhci->rh_state = UHCI_RH_RESUMING; 291 outw(USBCMD_FGR | USBCMD_EGSM | USBCMD_CF, 292 uhci->io_addr + USBCMD); 293 spin_unlock_irq(&uhci->lock); 294 msleep(20); 295 spin_lock_irq(&uhci->lock); 296 if (uhci->hc_inaccessible) /* Died */ 297 return; 298 299 /* End Global Resume and wait for EOP to be sent */ 300 outw(USBCMD_CF, uhci->io_addr + USBCMD); 301 mb(); 302 udelay(4); 303 if (inw(uhci->io_addr + USBCMD) & USBCMD_FGR) 304 dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n"); 305 } 306 307 start_rh(uhci); 308 309 /* Restart root hub polling */ 310 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); 311 } 312 313 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs) 314 { 315 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 316 unsigned short status; 317 unsigned long flags; 318 319 /* 320 * Read the interrupt status, and write it back to clear the 321 * interrupt cause. Contrary to the UHCI specification, the 322 * "HC Halted" status bit is persistent: it is RO, not R/WC. 323 */ 324 status = inw(uhci->io_addr + USBSTS); 325 if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */ 326 return IRQ_NONE; 327 outw(status, uhci->io_addr + USBSTS); /* Clear it */ 328 329 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { 330 if (status & USBSTS_HSE) 331 dev_err(uhci_dev(uhci), "host system error, " 332 "PCI problems?\n"); 333 if (status & USBSTS_HCPE) 334 dev_err(uhci_dev(uhci), "host controller process " 335 "error, something bad happened!\n"); 336 if (status & USBSTS_HCH) { 337 spin_lock_irqsave(&uhci->lock, flags); 338 if (uhci->rh_state >= UHCI_RH_RUNNING) { 339 dev_err(uhci_dev(uhci), 340 "host controller halted, " 341 "very bad!\n"); 342 if (debug > 1 && errbuf) { 343 /* Print the schedule for debugging */ 344 uhci_sprint_schedule(uhci, 345 errbuf, ERRBUF_LEN); 346 lprintk(errbuf); 347 } 348 hc_died(uhci); 349 350 /* Force a callback in case there are 351 * pending unlinks */ 352 mod_timer(&hcd->rh_timer, jiffies); 353 } 354 spin_unlock_irqrestore(&uhci->lock, flags); 355 } 356 } 357 358 if (status & USBSTS_RD) 359 usb_hcd_poll_rh_status(hcd); 360 else { 361 spin_lock_irqsave(&uhci->lock, flags); 362 uhci_scan_schedule(uhci, regs); 363 spin_unlock_irqrestore(&uhci->lock, flags); 364 } 365 366 return IRQ_HANDLED; 367 } 368 369 /* 370 * Store the current frame number in uhci->frame_number if the controller 371 * is runnning 372 */ 373 static void uhci_get_current_frame_number(struct uhci_hcd *uhci) 374 { 375 if (!uhci->is_stopped) 376 uhci->frame_number = inw(uhci->io_addr + USBFRNUM); 377 } 378 379 /* 380 * De-allocate all resources 381 */ 382 static void release_uhci(struct uhci_hcd *uhci) 383 { 384 int i; 385 386 if (DEBUG_CONFIGURED) { 387 spin_lock_irq(&uhci->lock); 388 uhci->is_initialized = 0; 389 spin_unlock_irq(&uhci->lock); 390 391 debugfs_remove(uhci->dentry); 392 } 393 394 for (i = 0; i < UHCI_NUM_SKELQH; i++) 395 uhci_free_qh(uhci, uhci->skelqh[i]); 396 397 uhci_free_td(uhci, uhci->term_td); 398 399 dma_pool_destroy(uhci->qh_pool); 400 401 dma_pool_destroy(uhci->td_pool); 402 403 kfree(uhci->frame_cpu); 404 405 dma_free_coherent(uhci_dev(uhci), 406 UHCI_NUMFRAMES * sizeof(*uhci->frame), 407 uhci->frame, uhci->frame_dma_handle); 408 } 409 410 static int uhci_reset(struct usb_hcd *hcd) 411 { 412 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 413 unsigned io_size = (unsigned) hcd->rsrc_len; 414 int port; 415 416 uhci->io_addr = (unsigned long) hcd->rsrc_start; 417 418 /* The UHCI spec says devices must have 2 ports, and goes on to say 419 * they may have more but gives no way to determine how many there 420 * are. However according to the UHCI spec, Bit 7 of the port 421 * status and control register is always set to 1. So we try to 422 * use this to our advantage. Another common failure mode when 423 * a nonexistent register is addressed is to return all ones, so 424 * we test for that also. 425 */ 426 for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) { 427 unsigned int portstatus; 428 429 portstatus = inw(uhci->io_addr + USBPORTSC1 + (port * 2)); 430 if (!(portstatus & 0x0080) || portstatus == 0xffff) 431 break; 432 } 433 if (debug) 434 dev_info(uhci_dev(uhci), "detected %d ports\n", port); 435 436 /* Anything greater than 7 is weird so we'll ignore it. */ 437 if (port > UHCI_RH_MAXCHILD) { 438 dev_info(uhci_dev(uhci), "port count misdetected? " 439 "forcing to 2 ports\n"); 440 port = 2; 441 } 442 uhci->rh_numports = port; 443 444 /* Kick BIOS off this hardware and reset if the controller 445 * isn't already safely quiescent. 446 */ 447 check_and_reset_hc(uhci); 448 return 0; 449 } 450 451 /* Make sure the controller is quiescent and that we're not using it 452 * any more. This is mainly for the benefit of programs which, like kexec, 453 * expect the hardware to be idle: not doing DMA or generating IRQs. 454 * 455 * This routine may be called in a damaged or failing kernel. Hence we 456 * do not acquire the spinlock before shutting down the controller. 457 */ 458 static void uhci_shutdown(struct pci_dev *pdev) 459 { 460 struct usb_hcd *hcd = (struct usb_hcd *) pci_get_drvdata(pdev); 461 462 hc_died(hcd_to_uhci(hcd)); 463 } 464 465 /* 466 * Allocate a frame list, and then setup the skeleton 467 * 468 * The hardware doesn't really know any difference 469 * in the queues, but the order does matter for the 470 * protocols higher up. The order is: 471 * 472 * - any isochronous events handled before any 473 * of the queues. We don't do that here, because 474 * we'll create the actual TD entries on demand. 475 * - The first queue is the interrupt queue. 476 * - The second queue is the control queue, split into low- and full-speed 477 * - The third queue is bulk queue. 478 * - The fourth queue is the bandwidth reclamation queue, which loops back 479 * to the full-speed control queue. 480 */ 481 static int uhci_start(struct usb_hcd *hcd) 482 { 483 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 484 int retval = -EBUSY; 485 int i; 486 struct dentry *dentry; 487 488 hcd->uses_new_polling = 1; 489 490 uhci->fsbr = 0; 491 uhci->fsbrtimeout = 0; 492 493 spin_lock_init(&uhci->lock); 494 495 INIT_LIST_HEAD(&uhci->td_remove_list); 496 INIT_LIST_HEAD(&uhci->idle_qh_list); 497 498 init_waitqueue_head(&uhci->waitqh); 499 500 if (DEBUG_CONFIGURED) { 501 dentry = debugfs_create_file(hcd->self.bus_name, 502 S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root, 503 uhci, &uhci_debug_operations); 504 if (!dentry) { 505 dev_err(uhci_dev(uhci), "couldn't create uhci " 506 "debugfs entry\n"); 507 retval = -ENOMEM; 508 goto err_create_debug_entry; 509 } 510 uhci->dentry = dentry; 511 } 512 513 uhci->frame = dma_alloc_coherent(uhci_dev(uhci), 514 UHCI_NUMFRAMES * sizeof(*uhci->frame), 515 &uhci->frame_dma_handle, 0); 516 if (!uhci->frame) { 517 dev_err(uhci_dev(uhci), "unable to allocate " 518 "consistent memory for frame list\n"); 519 goto err_alloc_frame; 520 } 521 memset(uhci->frame, 0, UHCI_NUMFRAMES * sizeof(*uhci->frame)); 522 523 uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu), 524 GFP_KERNEL); 525 if (!uhci->frame_cpu) { 526 dev_err(uhci_dev(uhci), "unable to allocate " 527 "memory for frame pointers\n"); 528 goto err_alloc_frame_cpu; 529 } 530 531 uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci), 532 sizeof(struct uhci_td), 16, 0); 533 if (!uhci->td_pool) { 534 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n"); 535 goto err_create_td_pool; 536 } 537 538 uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci), 539 sizeof(struct uhci_qh), 16, 0); 540 if (!uhci->qh_pool) { 541 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n"); 542 goto err_create_qh_pool; 543 } 544 545 uhci->term_td = uhci_alloc_td(uhci); 546 if (!uhci->term_td) { 547 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n"); 548 goto err_alloc_term_td; 549 } 550 551 for (i = 0; i < UHCI_NUM_SKELQH; i++) { 552 uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL); 553 if (!uhci->skelqh[i]) { 554 dev_err(uhci_dev(uhci), "unable to allocate QH\n"); 555 goto err_alloc_skelqh; 556 } 557 } 558 559 /* 560 * 8 Interrupt queues; link all higher int queues to int1, 561 * then link int1 to control and control to bulk 562 */ 563 uhci->skel_int128_qh->link = 564 uhci->skel_int64_qh->link = 565 uhci->skel_int32_qh->link = 566 uhci->skel_int16_qh->link = 567 uhci->skel_int8_qh->link = 568 uhci->skel_int4_qh->link = 569 uhci->skel_int2_qh->link = UHCI_PTR_QH | 570 cpu_to_le32(uhci->skel_int1_qh->dma_handle); 571 572 uhci->skel_int1_qh->link = UHCI_PTR_QH | 573 cpu_to_le32(uhci->skel_ls_control_qh->dma_handle); 574 uhci->skel_ls_control_qh->link = UHCI_PTR_QH | 575 cpu_to_le32(uhci->skel_fs_control_qh->dma_handle); 576 uhci->skel_fs_control_qh->link = UHCI_PTR_QH | 577 cpu_to_le32(uhci->skel_bulk_qh->dma_handle); 578 uhci->skel_bulk_qh->link = UHCI_PTR_QH | 579 cpu_to_le32(uhci->skel_term_qh->dma_handle); 580 581 /* This dummy TD is to work around a bug in Intel PIIX controllers */ 582 uhci_fill_td(uhci->term_td, 0, uhci_explen(0) | 583 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); 584 uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle); 585 586 uhci->skel_term_qh->link = UHCI_PTR_TERM; 587 uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle); 588 589 /* 590 * Fill the frame list: make all entries point to the proper 591 * interrupt queue. 592 * 593 * The interrupt queues will be interleaved as evenly as possible. 594 * There's not much to be done about period-1 interrupts; they have 595 * to occur in every frame. But we can schedule period-2 interrupts 596 * in odd-numbered frames, period-4 interrupts in frames congruent 597 * to 2 (mod 4), and so on. This way each frame only has two 598 * interrupt QHs, which will help spread out bandwidth utilization. 599 */ 600 for (i = 0; i < UHCI_NUMFRAMES; i++) { 601 int irq; 602 603 /* 604 * ffs (Find First bit Set) does exactly what we need: 605 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8], 606 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc. 607 * ffs >= 7 => not on any high-period queue, so use 608 * skel_int1_qh = skelqh[9]. 609 * Add UHCI_NUMFRAMES to insure at least one bit is set. 610 */ 611 irq = 8 - (int) __ffs(i + UHCI_NUMFRAMES); 612 if (irq <= 1) 613 irq = 9; 614 615 /* Only place we don't use the frame list routines */ 616 uhci->frame[i] = UHCI_PTR_QH | 617 cpu_to_le32(uhci->skelqh[irq]->dma_handle); 618 } 619 620 /* 621 * Some architectures require a full mb() to enforce completion of 622 * the memory writes above before the I/O transfers in configure_hc(). 623 */ 624 mb(); 625 626 configure_hc(uhci); 627 uhci->is_initialized = 1; 628 start_rh(uhci); 629 return 0; 630 631 /* 632 * error exits: 633 */ 634 err_alloc_skelqh: 635 for (i = 0; i < UHCI_NUM_SKELQH; i++) { 636 if (uhci->skelqh[i]) 637 uhci_free_qh(uhci, uhci->skelqh[i]); 638 } 639 640 uhci_free_td(uhci, uhci->term_td); 641 642 err_alloc_term_td: 643 dma_pool_destroy(uhci->qh_pool); 644 645 err_create_qh_pool: 646 dma_pool_destroy(uhci->td_pool); 647 648 err_create_td_pool: 649 kfree(uhci->frame_cpu); 650 651 err_alloc_frame_cpu: 652 dma_free_coherent(uhci_dev(uhci), 653 UHCI_NUMFRAMES * sizeof(*uhci->frame), 654 uhci->frame, uhci->frame_dma_handle); 655 656 err_alloc_frame: 657 debugfs_remove(uhci->dentry); 658 659 err_create_debug_entry: 660 return retval; 661 } 662 663 static void uhci_stop(struct usb_hcd *hcd) 664 { 665 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 666 667 spin_lock_irq(&uhci->lock); 668 if (!uhci->hc_inaccessible) 669 hc_died(uhci); 670 uhci_scan_schedule(uhci, NULL); 671 spin_unlock_irq(&uhci->lock); 672 673 release_uhci(uhci); 674 } 675 676 #ifdef CONFIG_PM 677 static int uhci_rh_suspend(struct usb_hcd *hcd) 678 { 679 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 680 681 spin_lock_irq(&uhci->lock); 682 if (!uhci->hc_inaccessible) /* Not dead */ 683 suspend_rh(uhci, UHCI_RH_SUSPENDED); 684 spin_unlock_irq(&uhci->lock); 685 return 0; 686 } 687 688 static int uhci_rh_resume(struct usb_hcd *hcd) 689 { 690 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 691 int rc = 0; 692 693 spin_lock_irq(&uhci->lock); 694 if (uhci->hc_inaccessible) { 695 if (uhci->rh_state == UHCI_RH_SUSPENDED) { 696 dev_warn(uhci_dev(uhci), "HC isn't running!\n"); 697 rc = -ENODEV; 698 } 699 /* Otherwise the HC is dead */ 700 } else 701 wakeup_rh(uhci); 702 spin_unlock_irq(&uhci->lock); 703 return rc; 704 } 705 706 static int uhci_suspend(struct usb_hcd *hcd, pm_message_t message) 707 { 708 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 709 int rc = 0; 710 711 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__); 712 713 spin_lock_irq(&uhci->lock); 714 if (uhci->hc_inaccessible) /* Dead or already suspended */ 715 goto done; 716 717 if (uhci->rh_state > UHCI_RH_SUSPENDED) { 718 dev_warn(uhci_dev(uhci), "Root hub isn't suspended!\n"); 719 rc = -EBUSY; 720 goto done; 721 }; 722 723 /* All PCI host controllers are required to disable IRQ generation 724 * at the source, so we must turn off PIRQ. 725 */ 726 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0); 727 mb(); 728 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 729 uhci->hc_inaccessible = 1; 730 hcd->poll_rh = 0; 731 732 /* FIXME: Enable non-PME# remote wakeup? */ 733 734 done: 735 spin_unlock_irq(&uhci->lock); 736 return rc; 737 } 738 739 static int uhci_resume(struct usb_hcd *hcd) 740 { 741 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 742 743 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__); 744 745 /* Since we aren't in D3 any more, it's safe to set this flag 746 * even if the controller was dead. It might not even be dead 747 * any more, if the firmware or quirks code has reset it. 748 */ 749 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 750 mb(); 751 752 if (uhci->rh_state == UHCI_RH_RESET) /* Dead */ 753 return 0; 754 spin_lock_irq(&uhci->lock); 755 756 /* FIXME: Disable non-PME# remote wakeup? */ 757 758 uhci->hc_inaccessible = 0; 759 760 /* The BIOS may have changed the controller settings during a 761 * system wakeup. Check it and reconfigure to avoid problems. 762 */ 763 check_and_reset_hc(uhci); 764 configure_hc(uhci); 765 766 if (uhci->rh_state == UHCI_RH_RESET) { 767 768 /* The controller had to be reset */ 769 usb_root_hub_lost_power(hcd->self.root_hub); 770 suspend_rh(uhci, UHCI_RH_SUSPENDED); 771 } 772 773 spin_unlock_irq(&uhci->lock); 774 775 if (!uhci->working_RD) { 776 /* Suspended root hub needs to be polled */ 777 hcd->poll_rh = 1; 778 usb_hcd_poll_rh_status(hcd); 779 } 780 return 0; 781 } 782 #endif 783 784 /* Wait until a particular device/endpoint's QH is idle, and free it */ 785 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd, 786 struct usb_host_endpoint *hep) 787 { 788 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 789 struct uhci_qh *qh; 790 791 spin_lock_irq(&uhci->lock); 792 qh = (struct uhci_qh *) hep->hcpriv; 793 if (qh == NULL) 794 goto done; 795 796 while (qh->state != QH_STATE_IDLE) { 797 ++uhci->num_waiting; 798 spin_unlock_irq(&uhci->lock); 799 wait_event_interruptible(uhci->waitqh, 800 qh->state == QH_STATE_IDLE); 801 spin_lock_irq(&uhci->lock); 802 --uhci->num_waiting; 803 } 804 805 uhci_free_qh(uhci, qh); 806 done: 807 spin_unlock_irq(&uhci->lock); 808 } 809 810 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) 811 { 812 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 813 unsigned long flags; 814 int is_stopped; 815 int frame_number; 816 817 /* Minimize latency by avoiding the spinlock */ 818 local_irq_save(flags); 819 is_stopped = uhci->is_stopped; 820 smp_rmb(); 821 frame_number = (is_stopped ? uhci->frame_number : 822 inw(uhci->io_addr + USBFRNUM)); 823 local_irq_restore(flags); 824 return frame_number; 825 } 826 827 static const char hcd_name[] = "uhci_hcd"; 828 829 static const struct hc_driver uhci_driver = { 830 .description = hcd_name, 831 .product_desc = "UHCI Host Controller", 832 .hcd_priv_size = sizeof(struct uhci_hcd), 833 834 /* Generic hardware linkage */ 835 .irq = uhci_irq, 836 .flags = HCD_USB11, 837 838 /* Basic lifecycle operations */ 839 .reset = uhci_reset, 840 .start = uhci_start, 841 #ifdef CONFIG_PM 842 .suspend = uhci_suspend, 843 .resume = uhci_resume, 844 .bus_suspend = uhci_rh_suspend, 845 .bus_resume = uhci_rh_resume, 846 #endif 847 .stop = uhci_stop, 848 849 .urb_enqueue = uhci_urb_enqueue, 850 .urb_dequeue = uhci_urb_dequeue, 851 852 .endpoint_disable = uhci_hcd_endpoint_disable, 853 .get_frame_number = uhci_hcd_get_frame_number, 854 855 .hub_status_data = uhci_hub_status_data, 856 .hub_control = uhci_hub_control, 857 }; 858 859 static const struct pci_device_id uhci_pci_ids[] = { { 860 /* handle any USB UHCI controller */ 861 PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0), 862 .driver_data = (unsigned long) &uhci_driver, 863 }, { /* end: all zeroes */ } 864 }; 865 866 MODULE_DEVICE_TABLE(pci, uhci_pci_ids); 867 868 static struct pci_driver uhci_pci_driver = { 869 .name = (char *)hcd_name, 870 .id_table = uhci_pci_ids, 871 872 .probe = usb_hcd_pci_probe, 873 .remove = usb_hcd_pci_remove, 874 .shutdown = uhci_shutdown, 875 876 #ifdef CONFIG_PM 877 .suspend = usb_hcd_pci_suspend, 878 .resume = usb_hcd_pci_resume, 879 #endif /* PM */ 880 }; 881 882 static int __init uhci_hcd_init(void) 883 { 884 int retval = -ENOMEM; 885 886 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n"); 887 888 if (usb_disabled()) 889 return -ENODEV; 890 891 if (DEBUG_CONFIGURED) { 892 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); 893 if (!errbuf) 894 goto errbuf_failed; 895 uhci_debugfs_root = debugfs_create_dir("uhci", NULL); 896 if (!uhci_debugfs_root) 897 goto debug_failed; 898 } 899 900 uhci_up_cachep = kmem_cache_create("uhci_urb_priv", 901 sizeof(struct urb_priv), 0, 0, NULL, NULL); 902 if (!uhci_up_cachep) 903 goto up_failed; 904 905 retval = pci_register_driver(&uhci_pci_driver); 906 if (retval) 907 goto init_failed; 908 909 return 0; 910 911 init_failed: 912 if (kmem_cache_destroy(uhci_up_cachep)) 913 warn("not all urb_privs were freed!"); 914 915 up_failed: 916 debugfs_remove(uhci_debugfs_root); 917 918 debug_failed: 919 kfree(errbuf); 920 921 errbuf_failed: 922 923 return retval; 924 } 925 926 static void __exit uhci_hcd_cleanup(void) 927 { 928 pci_unregister_driver(&uhci_pci_driver); 929 930 if (kmem_cache_destroy(uhci_up_cachep)) 931 warn("not all urb_privs were freed!"); 932 933 debugfs_remove(uhci_debugfs_root); 934 kfree(errbuf); 935 } 936 937 module_init(uhci_hcd_init); 938 module_exit(uhci_hcd_cleanup); 939 940 MODULE_AUTHOR(DRIVER_AUTHOR); 941 MODULE_DESCRIPTION(DRIVER_DESC); 942 MODULE_LICENSE("GPL"); 943