1 /* 2 * Universal Host Controller Interface driver for USB. 3 * 4 * Maintainer: Alan Stern <stern@rowland.harvard.edu> 5 * 6 * (C) Copyright 1999 Linus Torvalds 7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com 8 * (C) Copyright 1999 Randy Dunlap 9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de 10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de 11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch 12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at 13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface 14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). 15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) 16 * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu 17 * 18 * Intel documents this fairly well, and as far as I know there 19 * are no royalties or anything like that, but even so there are 20 * people who decided that they want to do the same thing in a 21 * completely different way. 22 * 23 */ 24 25 #include <linux/config.h> 26 #include <linux/module.h> 27 #include <linux/pci.h> 28 #include <linux/kernel.h> 29 #include <linux/init.h> 30 #include <linux/delay.h> 31 #include <linux/ioport.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/smp_lock.h> 35 #include <linux/errno.h> 36 #include <linux/unistd.h> 37 #include <linux/interrupt.h> 38 #include <linux/spinlock.h> 39 #include <linux/debugfs.h> 40 #include <linux/pm.h> 41 #include <linux/dmapool.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/usb.h> 44 #include <linux/bitops.h> 45 46 #include <asm/uaccess.h> 47 #include <asm/io.h> 48 #include <asm/irq.h> 49 #include <asm/system.h> 50 51 #include "../core/hcd.h" 52 #include "uhci-hcd.h" 53 54 /* 55 * Version Information 56 */ 57 #define DRIVER_VERSION "v3.0" 58 #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \ 59 Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \ 60 Alan Stern" 61 #define DRIVER_DESC "USB Universal Host Controller Interface driver" 62 63 /* 64 * debug = 0, no debugging messages 65 * debug = 1, dump failed URBs except for stalls 66 * debug = 2, dump all failed URBs (including stalls) 67 * show all queues in /debug/uhci/[pci_addr] 68 * debug = 3, show all TDs in URBs when dumping 69 */ 70 #ifdef DEBUG 71 #define DEBUG_CONFIGURED 1 72 static int debug = 1; 73 module_param(debug, int, S_IRUGO | S_IWUSR); 74 MODULE_PARM_DESC(debug, "Debug level"); 75 76 #else 77 #define DEBUG_CONFIGURED 0 78 #define debug 0 79 #endif 80 81 static char *errbuf; 82 #define ERRBUF_LEN (32 * 1024) 83 84 static kmem_cache_t *uhci_up_cachep; /* urb_priv */ 85 86 static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state); 87 static void wakeup_rh(struct uhci_hcd *uhci); 88 static void uhci_get_current_frame_number(struct uhci_hcd *uhci); 89 90 /* If a transfer is still active after this much time, turn off FSBR */ 91 #define IDLE_TIMEOUT msecs_to_jiffies(50) 92 #define FSBR_DELAY msecs_to_jiffies(50) 93 94 /* When we timeout an idle transfer for FSBR, we'll switch it over to */ 95 /* depth first traversal. We'll do it in groups of this number of TDs */ 96 /* to make sure it doesn't hog all of the bandwidth */ 97 #define DEPTH_INTERVAL 5 98 99 #include "uhci-debug.c" 100 #include "uhci-q.c" 101 #include "uhci-hub.c" 102 103 extern void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); 104 extern int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); 105 106 /* 107 * Finish up a host controller reset and update the recorded state. 108 */ 109 static void finish_reset(struct uhci_hcd *uhci) 110 { 111 int port; 112 113 /* HCRESET doesn't affect the Suspend, Reset, and Resume Detect 114 * bits in the port status and control registers. 115 * We have to clear them by hand. 116 */ 117 for (port = 0; port < uhci->rh_numports; ++port) 118 outw(0, uhci->io_addr + USBPORTSC1 + (port * 2)); 119 120 uhci->port_c_suspend = uhci->suspended_ports = 121 uhci->resuming_ports = 0; 122 uhci->rh_state = UHCI_RH_RESET; 123 uhci->is_stopped = UHCI_IS_STOPPED; 124 uhci_to_hcd(uhci)->state = HC_STATE_HALT; 125 uhci_to_hcd(uhci)->poll_rh = 0; 126 } 127 128 /* 129 * Last rites for a defunct/nonfunctional controller 130 * or one we don't want to use any more. 131 */ 132 static void hc_died(struct uhci_hcd *uhci) 133 { 134 uhci_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr); 135 finish_reset(uhci); 136 uhci->hc_inaccessible = 1; 137 } 138 139 /* 140 * Initialize a controller that was newly discovered or has just been 141 * resumed. In either case we can't be sure of its previous state. 142 */ 143 static void check_and_reset_hc(struct uhci_hcd *uhci) 144 { 145 if (uhci_check_and_reset_hc(to_pci_dev(uhci_dev(uhci)), uhci->io_addr)) 146 finish_reset(uhci); 147 } 148 149 /* 150 * Store the basic register settings needed by the controller. 151 */ 152 static void configure_hc(struct uhci_hcd *uhci) 153 { 154 /* Set the frame length to the default: 1 ms exactly */ 155 outb(USBSOF_DEFAULT, uhci->io_addr + USBSOF); 156 157 /* Store the frame list base address */ 158 outl(uhci->frame_dma_handle, uhci->io_addr + USBFLBASEADD); 159 160 /* Set the current frame number */ 161 outw(uhci->frame_number, uhci->io_addr + USBFRNUM); 162 163 /* Mark controller as not halted before we enable interrupts */ 164 uhci_to_hcd(uhci)->state = HC_STATE_SUSPENDED; 165 mb(); 166 167 /* Enable PIRQ */ 168 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 169 USBLEGSUP_DEFAULT); 170 } 171 172 173 static int resume_detect_interrupts_are_broken(struct uhci_hcd *uhci) 174 { 175 int port; 176 177 switch (to_pci_dev(uhci_dev(uhci))->vendor) { 178 default: 179 break; 180 181 case PCI_VENDOR_ID_GENESYS: 182 /* Genesys Logic's GL880S controllers don't generate 183 * resume-detect interrupts. 184 */ 185 return 1; 186 187 case PCI_VENDOR_ID_INTEL: 188 /* Some of Intel's USB controllers have a bug that causes 189 * resume-detect interrupts if any port has an over-current 190 * condition. To make matters worse, some motherboards 191 * hardwire unused USB ports' over-current inputs active! 192 * To prevent problems, we will not enable resume-detect 193 * interrupts if any ports are OC. 194 */ 195 for (port = 0; port < uhci->rh_numports; ++port) { 196 if (inw(uhci->io_addr + USBPORTSC1 + port * 2) & 197 USBPORTSC_OC) 198 return 1; 199 } 200 break; 201 } 202 return 0; 203 } 204 205 static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state) 206 __releases(uhci->lock) 207 __acquires(uhci->lock) 208 { 209 int auto_stop; 210 int int_enable; 211 212 auto_stop = (new_state == UHCI_RH_AUTO_STOPPED); 213 dev_dbg(uhci_dev(uhci), "%s%s\n", __FUNCTION__, 214 (auto_stop ? " (auto-stop)" : "")); 215 216 /* If we get a suspend request when we're already auto-stopped 217 * then there's nothing to do. 218 */ 219 if (uhci->rh_state == UHCI_RH_AUTO_STOPPED) { 220 uhci->rh_state = new_state; 221 return; 222 } 223 224 /* Enable resume-detect interrupts if they work. 225 * Then enter Global Suspend mode, still configured. 226 */ 227 uhci->working_RD = 1; 228 int_enable = USBINTR_RESUME; 229 if (resume_detect_interrupts_are_broken(uhci)) { 230 uhci->working_RD = int_enable = 0; 231 } 232 outw(int_enable, uhci->io_addr + USBINTR); 233 outw(USBCMD_EGSM | USBCMD_CF, uhci->io_addr + USBCMD); 234 mb(); 235 udelay(5); 236 237 /* If we're auto-stopping then no devices have been attached 238 * for a while, so there shouldn't be any active URBs and the 239 * controller should stop after a few microseconds. Otherwise 240 * we will give the controller one frame to stop. 241 */ 242 if (!auto_stop && !(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) { 243 uhci->rh_state = UHCI_RH_SUSPENDING; 244 spin_unlock_irq(&uhci->lock); 245 msleep(1); 246 spin_lock_irq(&uhci->lock); 247 if (uhci->hc_inaccessible) /* Died */ 248 return; 249 } 250 if (!(inw(uhci->io_addr + USBSTS) & USBSTS_HCH)) 251 dev_warn(uhci_dev(uhci), "Controller not stopped yet!\n"); 252 253 uhci_get_current_frame_number(uhci); 254 smp_wmb(); 255 256 uhci->rh_state = new_state; 257 uhci->is_stopped = UHCI_IS_STOPPED; 258 uhci_to_hcd(uhci)->poll_rh = !int_enable; 259 260 uhci_scan_schedule(uhci, NULL); 261 } 262 263 static void start_rh(struct uhci_hcd *uhci) 264 { 265 uhci_to_hcd(uhci)->state = HC_STATE_RUNNING; 266 uhci->is_stopped = 0; 267 smp_wmb(); 268 269 /* Mark it configured and running with a 64-byte max packet. 270 * All interrupts are enabled, even though RESUME won't do anything. 271 */ 272 outw(USBCMD_RS | USBCMD_CF | USBCMD_MAXP, uhci->io_addr + USBCMD); 273 outw(USBINTR_TIMEOUT | USBINTR_RESUME | USBINTR_IOC | USBINTR_SP, 274 uhci->io_addr + USBINTR); 275 mb(); 276 uhci->rh_state = UHCI_RH_RUNNING; 277 uhci_to_hcd(uhci)->poll_rh = 1; 278 } 279 280 static void wakeup_rh(struct uhci_hcd *uhci) 281 __releases(uhci->lock) 282 __acquires(uhci->lock) 283 { 284 dev_dbg(uhci_dev(uhci), "%s%s\n", __FUNCTION__, 285 uhci->rh_state == UHCI_RH_AUTO_STOPPED ? 286 " (auto-start)" : ""); 287 288 /* If we are auto-stopped then no devices are attached so there's 289 * no need for wakeup signals. Otherwise we send Global Resume 290 * for 20 ms. 291 */ 292 if (uhci->rh_state == UHCI_RH_SUSPENDED) { 293 uhci->rh_state = UHCI_RH_RESUMING; 294 outw(USBCMD_FGR | USBCMD_EGSM | USBCMD_CF, 295 uhci->io_addr + USBCMD); 296 spin_unlock_irq(&uhci->lock); 297 msleep(20); 298 spin_lock_irq(&uhci->lock); 299 if (uhci->hc_inaccessible) /* Died */ 300 return; 301 302 /* End Global Resume and wait for EOP to be sent */ 303 outw(USBCMD_CF, uhci->io_addr + USBCMD); 304 mb(); 305 udelay(4); 306 if (inw(uhci->io_addr + USBCMD) & USBCMD_FGR) 307 dev_warn(uhci_dev(uhci), "FGR not stopped yet!\n"); 308 } 309 310 start_rh(uhci); 311 312 /* Restart root hub polling */ 313 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); 314 } 315 316 static irqreturn_t uhci_irq(struct usb_hcd *hcd, struct pt_regs *regs) 317 { 318 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 319 unsigned short status; 320 unsigned long flags; 321 322 /* 323 * Read the interrupt status, and write it back to clear the 324 * interrupt cause. Contrary to the UHCI specification, the 325 * "HC Halted" status bit is persistent: it is RO, not R/WC. 326 */ 327 status = inw(uhci->io_addr + USBSTS); 328 if (!(status & ~USBSTS_HCH)) /* shared interrupt, not mine */ 329 return IRQ_NONE; 330 outw(status, uhci->io_addr + USBSTS); /* Clear it */ 331 332 if (status & ~(USBSTS_USBINT | USBSTS_ERROR | USBSTS_RD)) { 333 if (status & USBSTS_HSE) 334 dev_err(uhci_dev(uhci), "host system error, " 335 "PCI problems?\n"); 336 if (status & USBSTS_HCPE) 337 dev_err(uhci_dev(uhci), "host controller process " 338 "error, something bad happened!\n"); 339 if (status & USBSTS_HCH) { 340 spin_lock_irqsave(&uhci->lock, flags); 341 if (uhci->rh_state >= UHCI_RH_RUNNING) { 342 dev_err(uhci_dev(uhci), 343 "host controller halted, " 344 "very bad!\n"); 345 if (debug > 1 && errbuf) { 346 /* Print the schedule for debugging */ 347 uhci_sprint_schedule(uhci, 348 errbuf, ERRBUF_LEN); 349 lprintk(errbuf); 350 } 351 hc_died(uhci); 352 353 /* Force a callback in case there are 354 * pending unlinks */ 355 mod_timer(&hcd->rh_timer, jiffies); 356 } 357 spin_unlock_irqrestore(&uhci->lock, flags); 358 } 359 } 360 361 if (status & USBSTS_RD) 362 usb_hcd_poll_rh_status(hcd); 363 else { 364 spin_lock_irqsave(&uhci->lock, flags); 365 uhci_scan_schedule(uhci, regs); 366 spin_unlock_irqrestore(&uhci->lock, flags); 367 } 368 369 return IRQ_HANDLED; 370 } 371 372 /* 373 * Store the current frame number in uhci->frame_number if the controller 374 * is runnning 375 */ 376 static void uhci_get_current_frame_number(struct uhci_hcd *uhci) 377 { 378 if (!uhci->is_stopped) 379 uhci->frame_number = inw(uhci->io_addr + USBFRNUM); 380 } 381 382 /* 383 * De-allocate all resources 384 */ 385 static void release_uhci(struct uhci_hcd *uhci) 386 { 387 int i; 388 389 if (DEBUG_CONFIGURED) { 390 spin_lock_irq(&uhci->lock); 391 uhci->is_initialized = 0; 392 spin_unlock_irq(&uhci->lock); 393 394 debugfs_remove(uhci->dentry); 395 } 396 397 for (i = 0; i < UHCI_NUM_SKELQH; i++) 398 uhci_free_qh(uhci, uhci->skelqh[i]); 399 400 uhci_free_td(uhci, uhci->term_td); 401 402 dma_pool_destroy(uhci->qh_pool); 403 404 dma_pool_destroy(uhci->td_pool); 405 406 kfree(uhci->frame_cpu); 407 408 dma_free_coherent(uhci_dev(uhci), 409 UHCI_NUMFRAMES * sizeof(*uhci->frame), 410 uhci->frame, uhci->frame_dma_handle); 411 } 412 413 static int uhci_reset(struct usb_hcd *hcd) 414 { 415 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 416 unsigned io_size = (unsigned) hcd->rsrc_len; 417 int port; 418 419 uhci->io_addr = (unsigned long) hcd->rsrc_start; 420 421 /* The UHCI spec says devices must have 2 ports, and goes on to say 422 * they may have more but gives no way to determine how many there 423 * are. However according to the UHCI spec, Bit 7 of the port 424 * status and control register is always set to 1. So we try to 425 * use this to our advantage. Another common failure mode when 426 * a nonexistent register is addressed is to return all ones, so 427 * we test for that also. 428 */ 429 for (port = 0; port < (io_size - USBPORTSC1) / 2; port++) { 430 unsigned int portstatus; 431 432 portstatus = inw(uhci->io_addr + USBPORTSC1 + (port * 2)); 433 if (!(portstatus & 0x0080) || portstatus == 0xffff) 434 break; 435 } 436 if (debug) 437 dev_info(uhci_dev(uhci), "detected %d ports\n", port); 438 439 /* Anything greater than 7 is weird so we'll ignore it. */ 440 if (port > UHCI_RH_MAXCHILD) { 441 dev_info(uhci_dev(uhci), "port count misdetected? " 442 "forcing to 2 ports\n"); 443 port = 2; 444 } 445 uhci->rh_numports = port; 446 447 /* Kick BIOS off this hardware and reset if the controller 448 * isn't already safely quiescent. 449 */ 450 check_and_reset_hc(uhci); 451 return 0; 452 } 453 454 /* Make sure the controller is quiescent and that we're not using it 455 * any more. This is mainly for the benefit of programs which, like kexec, 456 * expect the hardware to be idle: not doing DMA or generating IRQs. 457 * 458 * This routine may be called in a damaged or failing kernel. Hence we 459 * do not acquire the spinlock before shutting down the controller. 460 */ 461 static void uhci_shutdown(struct pci_dev *pdev) 462 { 463 struct usb_hcd *hcd = (struct usb_hcd *) pci_get_drvdata(pdev); 464 465 hc_died(hcd_to_uhci(hcd)); 466 } 467 468 /* 469 * Allocate a frame list, and then setup the skeleton 470 * 471 * The hardware doesn't really know any difference 472 * in the queues, but the order does matter for the 473 * protocols higher up. The order is: 474 * 475 * - any isochronous events handled before any 476 * of the queues. We don't do that here, because 477 * we'll create the actual TD entries on demand. 478 * - The first queue is the interrupt queue. 479 * - The second queue is the control queue, split into low- and full-speed 480 * - The third queue is bulk queue. 481 * - The fourth queue is the bandwidth reclamation queue, which loops back 482 * to the full-speed control queue. 483 */ 484 static int uhci_start(struct usb_hcd *hcd) 485 { 486 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 487 int retval = -EBUSY; 488 int i; 489 struct dentry *dentry; 490 491 hcd->uses_new_polling = 1; 492 493 uhci->fsbr = 0; 494 uhci->fsbrtimeout = 0; 495 496 spin_lock_init(&uhci->lock); 497 498 INIT_LIST_HEAD(&uhci->td_remove_list); 499 INIT_LIST_HEAD(&uhci->idle_qh_list); 500 501 init_waitqueue_head(&uhci->waitqh); 502 503 if (DEBUG_CONFIGURED) { 504 dentry = debugfs_create_file(hcd->self.bus_name, 505 S_IFREG|S_IRUGO|S_IWUSR, uhci_debugfs_root, 506 uhci, &uhci_debug_operations); 507 if (!dentry) { 508 dev_err(uhci_dev(uhci), "couldn't create uhci " 509 "debugfs entry\n"); 510 retval = -ENOMEM; 511 goto err_create_debug_entry; 512 } 513 uhci->dentry = dentry; 514 } 515 516 uhci->frame = dma_alloc_coherent(uhci_dev(uhci), 517 UHCI_NUMFRAMES * sizeof(*uhci->frame), 518 &uhci->frame_dma_handle, 0); 519 if (!uhci->frame) { 520 dev_err(uhci_dev(uhci), "unable to allocate " 521 "consistent memory for frame list\n"); 522 goto err_alloc_frame; 523 } 524 memset(uhci->frame, 0, UHCI_NUMFRAMES * sizeof(*uhci->frame)); 525 526 uhci->frame_cpu = kcalloc(UHCI_NUMFRAMES, sizeof(*uhci->frame_cpu), 527 GFP_KERNEL); 528 if (!uhci->frame_cpu) { 529 dev_err(uhci_dev(uhci), "unable to allocate " 530 "memory for frame pointers\n"); 531 goto err_alloc_frame_cpu; 532 } 533 534 uhci->td_pool = dma_pool_create("uhci_td", uhci_dev(uhci), 535 sizeof(struct uhci_td), 16, 0); 536 if (!uhci->td_pool) { 537 dev_err(uhci_dev(uhci), "unable to create td dma_pool\n"); 538 goto err_create_td_pool; 539 } 540 541 uhci->qh_pool = dma_pool_create("uhci_qh", uhci_dev(uhci), 542 sizeof(struct uhci_qh), 16, 0); 543 if (!uhci->qh_pool) { 544 dev_err(uhci_dev(uhci), "unable to create qh dma_pool\n"); 545 goto err_create_qh_pool; 546 } 547 548 uhci->term_td = uhci_alloc_td(uhci); 549 if (!uhci->term_td) { 550 dev_err(uhci_dev(uhci), "unable to allocate terminating TD\n"); 551 goto err_alloc_term_td; 552 } 553 554 for (i = 0; i < UHCI_NUM_SKELQH; i++) { 555 uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL); 556 if (!uhci->skelqh[i]) { 557 dev_err(uhci_dev(uhci), "unable to allocate QH\n"); 558 goto err_alloc_skelqh; 559 } 560 } 561 562 /* 563 * 8 Interrupt queues; link all higher int queues to int1, 564 * then link int1 to control and control to bulk 565 */ 566 uhci->skel_int128_qh->link = 567 uhci->skel_int64_qh->link = 568 uhci->skel_int32_qh->link = 569 uhci->skel_int16_qh->link = 570 uhci->skel_int8_qh->link = 571 uhci->skel_int4_qh->link = 572 uhci->skel_int2_qh->link = UHCI_PTR_QH | 573 cpu_to_le32(uhci->skel_int1_qh->dma_handle); 574 575 uhci->skel_int1_qh->link = UHCI_PTR_QH | 576 cpu_to_le32(uhci->skel_ls_control_qh->dma_handle); 577 uhci->skel_ls_control_qh->link = UHCI_PTR_QH | 578 cpu_to_le32(uhci->skel_fs_control_qh->dma_handle); 579 uhci->skel_fs_control_qh->link = UHCI_PTR_QH | 580 cpu_to_le32(uhci->skel_bulk_qh->dma_handle); 581 uhci->skel_bulk_qh->link = UHCI_PTR_QH | 582 cpu_to_le32(uhci->skel_term_qh->dma_handle); 583 584 /* This dummy TD is to work around a bug in Intel PIIX controllers */ 585 uhci_fill_td(uhci->term_td, 0, uhci_explen(0) | 586 (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); 587 uhci->term_td->link = cpu_to_le32(uhci->term_td->dma_handle); 588 589 uhci->skel_term_qh->link = UHCI_PTR_TERM; 590 uhci->skel_term_qh->element = cpu_to_le32(uhci->term_td->dma_handle); 591 592 /* 593 * Fill the frame list: make all entries point to the proper 594 * interrupt queue. 595 * 596 * The interrupt queues will be interleaved as evenly as possible. 597 * There's not much to be done about period-1 interrupts; they have 598 * to occur in every frame. But we can schedule period-2 interrupts 599 * in odd-numbered frames, period-4 interrupts in frames congruent 600 * to 2 (mod 4), and so on. This way each frame only has two 601 * interrupt QHs, which will help spread out bandwidth utilization. 602 */ 603 for (i = 0; i < UHCI_NUMFRAMES; i++) { 604 int irq; 605 606 /* 607 * ffs (Find First bit Set) does exactly what we need: 608 * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8], 609 * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc. 610 * ffs >= 7 => not on any high-period queue, so use 611 * skel_int1_qh = skelqh[9]. 612 * Add UHCI_NUMFRAMES to insure at least one bit is set. 613 */ 614 irq = 8 - (int) __ffs(i + UHCI_NUMFRAMES); 615 if (irq <= 1) 616 irq = 9; 617 618 /* Only place we don't use the frame list routines */ 619 uhci->frame[i] = UHCI_PTR_QH | 620 cpu_to_le32(uhci->skelqh[irq]->dma_handle); 621 } 622 623 /* 624 * Some architectures require a full mb() to enforce completion of 625 * the memory writes above before the I/O transfers in configure_hc(). 626 */ 627 mb(); 628 629 configure_hc(uhci); 630 uhci->is_initialized = 1; 631 start_rh(uhci); 632 return 0; 633 634 /* 635 * error exits: 636 */ 637 err_alloc_skelqh: 638 for (i = 0; i < UHCI_NUM_SKELQH; i++) { 639 if (uhci->skelqh[i]) 640 uhci_free_qh(uhci, uhci->skelqh[i]); 641 } 642 643 uhci_free_td(uhci, uhci->term_td); 644 645 err_alloc_term_td: 646 dma_pool_destroy(uhci->qh_pool); 647 648 err_create_qh_pool: 649 dma_pool_destroy(uhci->td_pool); 650 651 err_create_td_pool: 652 kfree(uhci->frame_cpu); 653 654 err_alloc_frame_cpu: 655 dma_free_coherent(uhci_dev(uhci), 656 UHCI_NUMFRAMES * sizeof(*uhci->frame), 657 uhci->frame, uhci->frame_dma_handle); 658 659 err_alloc_frame: 660 debugfs_remove(uhci->dentry); 661 662 err_create_debug_entry: 663 return retval; 664 } 665 666 static void uhci_stop(struct usb_hcd *hcd) 667 { 668 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 669 670 spin_lock_irq(&uhci->lock); 671 if (!uhci->hc_inaccessible) 672 hc_died(uhci); 673 uhci_scan_schedule(uhci, NULL); 674 spin_unlock_irq(&uhci->lock); 675 676 release_uhci(uhci); 677 } 678 679 #ifdef CONFIG_PM 680 static int uhci_rh_suspend(struct usb_hcd *hcd) 681 { 682 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 683 684 spin_lock_irq(&uhci->lock); 685 if (!uhci->hc_inaccessible) /* Not dead */ 686 suspend_rh(uhci, UHCI_RH_SUSPENDED); 687 spin_unlock_irq(&uhci->lock); 688 return 0; 689 } 690 691 static int uhci_rh_resume(struct usb_hcd *hcd) 692 { 693 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 694 int rc = 0; 695 696 spin_lock_irq(&uhci->lock); 697 if (uhci->hc_inaccessible) { 698 if (uhci->rh_state == UHCI_RH_SUSPENDED) { 699 dev_warn(uhci_dev(uhci), "HC isn't running!\n"); 700 rc = -ENODEV; 701 } 702 /* Otherwise the HC is dead */ 703 } else 704 wakeup_rh(uhci); 705 spin_unlock_irq(&uhci->lock); 706 return rc; 707 } 708 709 static int uhci_suspend(struct usb_hcd *hcd, pm_message_t message) 710 { 711 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 712 int rc = 0; 713 714 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__); 715 716 spin_lock_irq(&uhci->lock); 717 if (uhci->hc_inaccessible) /* Dead or already suspended */ 718 goto done; 719 720 if (uhci->rh_state > UHCI_RH_SUSPENDED) { 721 dev_warn(uhci_dev(uhci), "Root hub isn't suspended!\n"); 722 rc = -EBUSY; 723 goto done; 724 }; 725 726 /* All PCI host controllers are required to disable IRQ generation 727 * at the source, so we must turn off PIRQ. 728 */ 729 pci_write_config_word(to_pci_dev(uhci_dev(uhci)), USBLEGSUP, 0); 730 mb(); 731 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 732 uhci->hc_inaccessible = 1; 733 hcd->poll_rh = 0; 734 735 /* FIXME: Enable non-PME# remote wakeup? */ 736 737 done: 738 spin_unlock_irq(&uhci->lock); 739 return rc; 740 } 741 742 static int uhci_resume(struct usb_hcd *hcd) 743 { 744 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 745 746 dev_dbg(uhci_dev(uhci), "%s\n", __FUNCTION__); 747 748 /* Since we aren't in D3 any more, it's safe to set this flag 749 * even if the controller was dead. It might not even be dead 750 * any more, if the firmware or quirks code has reset it. 751 */ 752 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 753 mb(); 754 755 if (uhci->rh_state == UHCI_RH_RESET) /* Dead */ 756 return 0; 757 spin_lock_irq(&uhci->lock); 758 759 /* FIXME: Disable non-PME# remote wakeup? */ 760 761 uhci->hc_inaccessible = 0; 762 763 /* The BIOS may have changed the controller settings during a 764 * system wakeup. Check it and reconfigure to avoid problems. 765 */ 766 check_and_reset_hc(uhci); 767 configure_hc(uhci); 768 769 if (uhci->rh_state == UHCI_RH_RESET) { 770 771 /* The controller had to be reset */ 772 usb_root_hub_lost_power(hcd->self.root_hub); 773 suspend_rh(uhci, UHCI_RH_SUSPENDED); 774 } 775 776 spin_unlock_irq(&uhci->lock); 777 778 if (!uhci->working_RD) { 779 /* Suspended root hub needs to be polled */ 780 hcd->poll_rh = 1; 781 usb_hcd_poll_rh_status(hcd); 782 } 783 return 0; 784 } 785 #endif 786 787 /* Wait until a particular device/endpoint's QH is idle, and free it */ 788 static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd, 789 struct usb_host_endpoint *hep) 790 { 791 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 792 struct uhci_qh *qh; 793 794 spin_lock_irq(&uhci->lock); 795 qh = (struct uhci_qh *) hep->hcpriv; 796 if (qh == NULL) 797 goto done; 798 799 while (qh->state != QH_STATE_IDLE) { 800 ++uhci->num_waiting; 801 spin_unlock_irq(&uhci->lock); 802 wait_event_interruptible(uhci->waitqh, 803 qh->state == QH_STATE_IDLE); 804 spin_lock_irq(&uhci->lock); 805 --uhci->num_waiting; 806 } 807 808 uhci_free_qh(uhci, qh); 809 done: 810 spin_unlock_irq(&uhci->lock); 811 } 812 813 static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) 814 { 815 struct uhci_hcd *uhci = hcd_to_uhci(hcd); 816 unsigned long flags; 817 int is_stopped; 818 int frame_number; 819 820 /* Minimize latency by avoiding the spinlock */ 821 local_irq_save(flags); 822 is_stopped = uhci->is_stopped; 823 smp_rmb(); 824 frame_number = (is_stopped ? uhci->frame_number : 825 inw(uhci->io_addr + USBFRNUM)); 826 local_irq_restore(flags); 827 return frame_number; 828 } 829 830 static const char hcd_name[] = "uhci_hcd"; 831 832 static const struct hc_driver uhci_driver = { 833 .description = hcd_name, 834 .product_desc = "UHCI Host Controller", 835 .hcd_priv_size = sizeof(struct uhci_hcd), 836 837 /* Generic hardware linkage */ 838 .irq = uhci_irq, 839 .flags = HCD_USB11, 840 841 /* Basic lifecycle operations */ 842 .reset = uhci_reset, 843 .start = uhci_start, 844 #ifdef CONFIG_PM 845 .suspend = uhci_suspend, 846 .resume = uhci_resume, 847 .bus_suspend = uhci_rh_suspend, 848 .bus_resume = uhci_rh_resume, 849 #endif 850 .stop = uhci_stop, 851 852 .urb_enqueue = uhci_urb_enqueue, 853 .urb_dequeue = uhci_urb_dequeue, 854 855 .endpoint_disable = uhci_hcd_endpoint_disable, 856 .get_frame_number = uhci_hcd_get_frame_number, 857 858 .hub_status_data = uhci_hub_status_data, 859 .hub_control = uhci_hub_control, 860 }; 861 862 static const struct pci_device_id uhci_pci_ids[] = { { 863 /* handle any USB UHCI controller */ 864 PCI_DEVICE_CLASS(((PCI_CLASS_SERIAL_USB << 8) | 0x00), ~0), 865 .driver_data = (unsigned long) &uhci_driver, 866 }, { /* end: all zeroes */ } 867 }; 868 869 MODULE_DEVICE_TABLE(pci, uhci_pci_ids); 870 871 static struct pci_driver uhci_pci_driver = { 872 .name = (char *)hcd_name, 873 .id_table = uhci_pci_ids, 874 875 .probe = usb_hcd_pci_probe, 876 .remove = usb_hcd_pci_remove, 877 .shutdown = uhci_shutdown, 878 879 #ifdef CONFIG_PM 880 .suspend = usb_hcd_pci_suspend, 881 .resume = usb_hcd_pci_resume, 882 #endif /* PM */ 883 }; 884 885 static int __init uhci_hcd_init(void) 886 { 887 int retval = -ENOMEM; 888 889 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION "\n"); 890 891 if (usb_disabled()) 892 return -ENODEV; 893 894 if (DEBUG_CONFIGURED) { 895 errbuf = kmalloc(ERRBUF_LEN, GFP_KERNEL); 896 if (!errbuf) 897 goto errbuf_failed; 898 uhci_debugfs_root = debugfs_create_dir("uhci", NULL); 899 if (!uhci_debugfs_root) 900 goto debug_failed; 901 } 902 903 uhci_up_cachep = kmem_cache_create("uhci_urb_priv", 904 sizeof(struct urb_priv), 0, 0, NULL, NULL); 905 if (!uhci_up_cachep) 906 goto up_failed; 907 908 retval = pci_register_driver(&uhci_pci_driver); 909 if (retval) 910 goto init_failed; 911 912 return 0; 913 914 init_failed: 915 if (kmem_cache_destroy(uhci_up_cachep)) 916 warn("not all urb_privs were freed!"); 917 918 up_failed: 919 debugfs_remove(uhci_debugfs_root); 920 921 debug_failed: 922 kfree(errbuf); 923 924 errbuf_failed: 925 926 return retval; 927 } 928 929 static void __exit uhci_hcd_cleanup(void) 930 { 931 pci_unregister_driver(&uhci_pci_driver); 932 933 if (kmem_cache_destroy(uhci_up_cachep)) 934 warn("not all urb_privs were freed!"); 935 936 debugfs_remove(uhci_debugfs_root); 937 kfree(errbuf); 938 } 939 940 module_init(uhci_hcd_init); 941 module_exit(uhci_hcd_cleanup); 942 943 MODULE_AUTHOR(DRIVER_AUTHOR); 944 MODULE_DESCRIPTION(DRIVER_DESC); 945 MODULE_LICENSE("GPL"); 946