1 /* 2 * Parallel-port resource manager code. 3 * 4 * Authors: David Campbell <campbell@tirian.che.curtin.edu.au> 5 * Tim Waugh <tim@cyberelk.demon.co.uk> 6 * Jose Renau <renau@acm.org> 7 * Philip Blundell <philb@gnu.org> 8 * Andrea Arcangeli 9 * 10 * based on work by Grant Guenther <grant@torque.net> 11 * and Philip Blundell 12 * 13 * Any part of this program may be used in documents licensed under 14 * the GNU Free Documentation License, Version 1.1 or any later version 15 * published by the Free Software Foundation. 16 */ 17 18 #undef PARPORT_DEBUG_SHARING /* undef for production */ 19 20 #include <linux/module.h> 21 #include <linux/string.h> 22 #include <linux/threads.h> 23 #include <linux/parport.h> 24 #include <linux/delay.h> 25 #include <linux/errno.h> 26 #include <linux/interrupt.h> 27 #include <linux/ioport.h> 28 #include <linux/kernel.h> 29 #include <linux/slab.h> 30 #include <linux/sched/signal.h> 31 #include <linux/kmod.h> 32 #include <linux/device.h> 33 34 #include <linux/spinlock.h> 35 #include <linux/mutex.h> 36 #include <asm/irq.h> 37 38 #undef PARPORT_PARANOID 39 40 #define PARPORT_DEFAULT_TIMESLICE (HZ/5) 41 42 unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE; 43 int parport_default_spintime = DEFAULT_SPIN_TIME; 44 45 static LIST_HEAD(portlist); 46 static DEFINE_SPINLOCK(parportlist_lock); 47 48 /* list of all allocated ports, sorted by ->number */ 49 static LIST_HEAD(all_ports); 50 static DEFINE_SPINLOCK(full_list_lock); 51 52 static DEFINE_MUTEX(registration_lock); 53 54 /* What you can do to a port that's gone away.. */ 55 static void dead_write_lines(struct parport *p, unsigned char b){} 56 static unsigned char dead_read_lines(struct parport *p) { return 0; } 57 static unsigned char dead_frob_lines(struct parport *p, unsigned char b, 58 unsigned char c) { return 0; } 59 static void dead_onearg(struct parport *p){} 60 static void dead_initstate(struct pardevice *d, struct parport_state *s) { } 61 static void dead_state(struct parport *p, struct parport_state *s) { } 62 static size_t dead_write(struct parport *p, const void *b, size_t l, int f) 63 { return 0; } 64 static size_t dead_read(struct parport *p, void *b, size_t l, int f) 65 { return 0; } 66 static struct parport_operations dead_ops = { 67 .write_data = dead_write_lines, /* data */ 68 .read_data = dead_read_lines, 69 70 .write_control = dead_write_lines, /* control */ 71 .read_control = dead_read_lines, 72 .frob_control = dead_frob_lines, 73 74 .read_status = dead_read_lines, /* status */ 75 76 .enable_irq = dead_onearg, /* enable_irq */ 77 .disable_irq = dead_onearg, /* disable_irq */ 78 79 .data_forward = dead_onearg, /* data_forward */ 80 .data_reverse = dead_onearg, /* data_reverse */ 81 82 .init_state = dead_initstate, /* init_state */ 83 .save_state = dead_state, 84 .restore_state = dead_state, 85 86 .epp_write_data = dead_write, /* epp */ 87 .epp_read_data = dead_read, 88 .epp_write_addr = dead_write, 89 .epp_read_addr = dead_read, 90 91 .ecp_write_data = dead_write, /* ecp */ 92 .ecp_read_data = dead_read, 93 .ecp_write_addr = dead_write, 94 95 .compat_write_data = dead_write, /* compat */ 96 .nibble_read_data = dead_read, /* nibble */ 97 .byte_read_data = dead_read, /* byte */ 98 99 .owner = NULL, 100 }; 101 102 static struct device_type parport_device_type = { 103 .name = "parport", 104 }; 105 106 static int is_parport(struct device *dev) 107 { 108 return dev->type == &parport_device_type; 109 } 110 111 static int parport_probe(struct device *dev) 112 { 113 struct parport_driver *drv; 114 115 if (is_parport(dev)) 116 return -ENODEV; 117 118 drv = to_parport_driver(dev->driver); 119 if (!drv->probe) { 120 /* if driver has not defined a custom probe */ 121 struct pardevice *par_dev = to_pardevice(dev); 122 123 if (strcmp(par_dev->name, drv->name)) 124 return -ENODEV; 125 return 0; 126 } 127 /* if driver defined its own probe */ 128 return drv->probe(to_pardevice(dev)); 129 } 130 131 static const struct bus_type parport_bus_type = { 132 .name = "parport", 133 .probe = parport_probe, 134 }; 135 136 int parport_bus_init(void) 137 { 138 return bus_register(&parport_bus_type); 139 } 140 141 void parport_bus_exit(void) 142 { 143 bus_unregister(&parport_bus_type); 144 } 145 146 /* 147 * iterates through all the drivers registered with the bus and sends the port 148 * details to the match_port callback of the driver, so that the driver can 149 * know about the new port that just registered with the bus and decide if it 150 * wants to use this new port. 151 */ 152 static int driver_check(struct device_driver *dev_drv, void *_port) 153 { 154 struct parport *port = _port; 155 struct parport_driver *drv = to_parport_driver(dev_drv); 156 157 if (drv->match_port) 158 drv->match_port(port); 159 return 0; 160 } 161 162 /* Call attach(port) for each registered driver. */ 163 static void attach_driver_chain(struct parport *port) 164 { 165 /* caller has exclusive registration_lock */ 166 167 /* 168 * call the driver_check function of the drivers registered in 169 * new device model 170 */ 171 172 bus_for_each_drv(&parport_bus_type, NULL, port, driver_check); 173 } 174 175 static int driver_detach(struct device_driver *_drv, void *_port) 176 { 177 struct parport *port = _port; 178 struct parport_driver *drv = to_parport_driver(_drv); 179 180 if (drv->detach) 181 drv->detach(port); 182 return 0; 183 } 184 185 /* Call detach(port) for each registered driver. */ 186 static void detach_driver_chain(struct parport *port) 187 { 188 /* caller has exclusive registration_lock */ 189 190 /* 191 * call the detach function of the drivers registered in 192 * new device model 193 */ 194 195 bus_for_each_drv(&parport_bus_type, NULL, port, driver_detach); 196 } 197 198 /* Ask kmod for some lowlevel drivers. */ 199 static void get_lowlevel_driver(void) 200 { 201 /* 202 * There is no actual module called this: you should set 203 * up an alias for modutils. 204 */ 205 request_module("parport_lowlevel"); 206 } 207 208 /* 209 * iterates through all the devices connected to the bus and sends the device 210 * details to the match_port callback of the driver, so that the driver can 211 * know what are all the ports that are connected to the bus and choose the 212 * port to which it wants to register its device. 213 */ 214 static int port_check(struct device *dev, void *dev_drv) 215 { 216 struct parport_driver *drv = dev_drv; 217 218 /* only send ports, do not send other devices connected to bus */ 219 if (is_parport(dev)) 220 drv->match_port(to_parport_dev(dev)); 221 return 0; 222 } 223 224 /* 225 * Iterates through all the devices connected to the bus and return 1 226 * if the device is a parallel port. 227 */ 228 229 static int port_detect(struct device *dev, void *dev_drv) 230 { 231 if (is_parport(dev)) 232 return 1; 233 return 0; 234 } 235 236 /** 237 * __parport_register_driver - register a parallel port device driver 238 * @drv: structure describing the driver 239 * @owner: owner module of drv 240 * @mod_name: module name string 241 * 242 * This can be called by a parallel port device driver in order 243 * to receive notifications about ports being found in the 244 * system, as well as ports no longer available. 245 * 246 * If devmodel is true then the new device model is used 247 * for registration. 248 * 249 * The @drv structure is allocated by the caller and must not be 250 * deallocated until after calling parport_unregister_driver(). 251 * 252 * If using the non device model: 253 * The driver's attach() function may block. The port that 254 * attach() is given will be valid for the duration of the 255 * callback, but if the driver wants to take a copy of the 256 * pointer it must call parport_get_port() to do so. Calling 257 * parport_register_device() on that port will do this for you. 258 * 259 * The driver's detach() function may block. The port that 260 * detach() is given will be valid for the duration of the 261 * callback, but if the driver wants to take a copy of the 262 * pointer it must call parport_get_port() to do so. 263 * 264 * 265 * Returns 0 on success. The non device model will always succeeds. 266 * but the new device model can fail and will return the error code. 267 **/ 268 269 int __parport_register_driver(struct parport_driver *drv, struct module *owner, 270 const char *mod_name) 271 { 272 /* using device model */ 273 int ret; 274 275 /* initialize common driver fields */ 276 drv->driver.name = drv->name; 277 drv->driver.bus = &parport_bus_type; 278 drv->driver.owner = owner; 279 drv->driver.mod_name = mod_name; 280 ret = driver_register(&drv->driver); 281 if (ret) 282 return ret; 283 284 /* 285 * check if bus has any parallel port registered, if 286 * none is found then load the lowlevel driver. 287 */ 288 ret = bus_for_each_dev(&parport_bus_type, NULL, NULL, 289 port_detect); 290 if (!ret) 291 get_lowlevel_driver(); 292 293 mutex_lock(®istration_lock); 294 if (drv->match_port) 295 bus_for_each_dev(&parport_bus_type, NULL, drv, 296 port_check); 297 mutex_unlock(®istration_lock); 298 299 return 0; 300 } 301 EXPORT_SYMBOL(__parport_register_driver); 302 303 static int port_detach(struct device *dev, void *_drv) 304 { 305 struct parport_driver *drv = _drv; 306 307 if (is_parport(dev) && drv->detach) 308 drv->detach(to_parport_dev(dev)); 309 310 return 0; 311 } 312 313 /** 314 * parport_unregister_driver - deregister a parallel port device driver 315 * @drv: structure describing the driver that was given to 316 * parport_register_driver() 317 * 318 * This should be called by a parallel port device driver that 319 * has registered itself using parport_register_driver() when it 320 * is about to be unloaded. 321 * 322 * When it returns, the driver's attach() routine will no longer 323 * be called, and for each port that attach() was called for, the 324 * detach() routine will have been called. 325 * 326 * All the driver's attach() and detach() calls are guaranteed to have 327 * finished by the time this function returns. 328 **/ 329 330 void parport_unregister_driver(struct parport_driver *drv) 331 { 332 mutex_lock(®istration_lock); 333 bus_for_each_dev(&parport_bus_type, NULL, drv, port_detach); 334 driver_unregister(&drv->driver); 335 mutex_unlock(®istration_lock); 336 } 337 EXPORT_SYMBOL(parport_unregister_driver); 338 339 static void free_port(struct device *dev) 340 { 341 int d; 342 struct parport *port = to_parport_dev(dev); 343 344 spin_lock(&full_list_lock); 345 list_del(&port->full_list); 346 spin_unlock(&full_list_lock); 347 for (d = 0; d < 5; d++) { 348 kfree(port->probe_info[d].class_name); 349 kfree(port->probe_info[d].mfr); 350 kfree(port->probe_info[d].model); 351 kfree(port->probe_info[d].cmdset); 352 kfree(port->probe_info[d].description); 353 } 354 355 kfree(port); 356 } 357 358 /** 359 * parport_get_port - increment a port's reference count 360 * @port: the port 361 * 362 * This ensures that a struct parport pointer remains valid 363 * until the matching parport_put_port() call. 364 **/ 365 366 struct parport *parport_get_port(struct parport *port) 367 { 368 struct device *dev = get_device(&port->bus_dev); 369 370 return to_parport_dev(dev); 371 } 372 EXPORT_SYMBOL(parport_get_port); 373 374 void parport_del_port(struct parport *port) 375 { 376 device_unregister(&port->bus_dev); 377 } 378 EXPORT_SYMBOL(parport_del_port); 379 380 /** 381 * parport_put_port - decrement a port's reference count 382 * @port: the port 383 * 384 * This should be called once for each call to parport_get_port(), 385 * once the port is no longer needed. When the reference count reaches 386 * zero (port is no longer used), free_port is called. 387 **/ 388 389 void parport_put_port(struct parport *port) 390 { 391 put_device(&port->bus_dev); 392 } 393 EXPORT_SYMBOL(parport_put_port); 394 395 /** 396 * parport_register_port - register a parallel port 397 * @base: base I/O address 398 * @irq: IRQ line 399 * @dma: DMA channel 400 * @ops: pointer to the port driver's port operations structure 401 * 402 * When a parallel port (lowlevel) driver finds a port that 403 * should be made available to parallel port device drivers, it 404 * should call parport_register_port(). The @base, @irq, and 405 * @dma parameters are for the convenience of port drivers, and 406 * for ports where they aren't meaningful needn't be set to 407 * anything special. They can be altered afterwards by adjusting 408 * the relevant members of the parport structure that is returned 409 * and represents the port. They should not be tampered with 410 * after calling parport_announce_port, however. 411 * 412 * If there are parallel port device drivers in the system that 413 * have registered themselves using parport_register_driver(), 414 * they are not told about the port at this time; that is done by 415 * parport_announce_port(). 416 * 417 * The @ops structure is allocated by the caller, and must not be 418 * deallocated before calling parport_remove_port(). 419 * 420 * If there is no memory to allocate a new parport structure, 421 * this function will return %NULL. 422 **/ 423 424 struct parport *parport_register_port(unsigned long base, int irq, int dma, 425 struct parport_operations *ops) 426 { 427 struct list_head *l; 428 struct parport *tmp; 429 int num; 430 int device; 431 int ret; 432 433 tmp = kzalloc(sizeof(struct parport), GFP_KERNEL); 434 if (!tmp) 435 return NULL; 436 437 /* Init our structure */ 438 tmp->base = base; 439 tmp->irq = irq; 440 tmp->dma = dma; 441 tmp->muxport = tmp->daisy = tmp->muxsel = -1; 442 INIT_LIST_HEAD(&tmp->list); 443 tmp->ops = ops; 444 tmp->physport = tmp; 445 rwlock_init(&tmp->cad_lock); 446 spin_lock_init(&tmp->waitlist_lock); 447 spin_lock_init(&tmp->pardevice_lock); 448 tmp->ieee1284.mode = IEEE1284_MODE_COMPAT; 449 tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE; 450 sema_init(&tmp->ieee1284.irq, 0); 451 tmp->spintime = parport_default_spintime; 452 atomic_set(&tmp->ref_count, 1); 453 454 /* Search for the lowest free parport number. */ 455 456 spin_lock(&full_list_lock); 457 num = 0; 458 list_for_each(l, &all_ports) { 459 struct parport *p = list_entry(l, struct parport, full_list); 460 461 if (p->number != num++) 462 break; 463 } 464 tmp->portnum = tmp->number = num; 465 list_add_tail(&tmp->full_list, l); 466 spin_unlock(&full_list_lock); 467 468 /* 469 * Now that the portnum is known finish doing the Init. 470 */ 471 dev_set_name(&tmp->bus_dev, "parport%d", tmp->portnum); 472 tmp->bus_dev.bus = &parport_bus_type; 473 tmp->bus_dev.release = free_port; 474 tmp->bus_dev.type = &parport_device_type; 475 476 tmp->name = dev_name(&tmp->bus_dev); 477 478 for (device = 0; device < 5; device++) 479 /* assume the worst */ 480 tmp->probe_info[device].class = PARPORT_CLASS_LEGACY; 481 482 ret = device_register(&tmp->bus_dev); 483 if (ret) { 484 put_device(&tmp->bus_dev); 485 return NULL; 486 } 487 488 return tmp; 489 } 490 EXPORT_SYMBOL(parport_register_port); 491 492 /** 493 * parport_announce_port - tell device drivers about a parallel port 494 * @port: parallel port to announce 495 * 496 * After a port driver has registered a parallel port with 497 * parport_register_port, and performed any necessary 498 * initialisation or adjustments, it should call 499 * parport_announce_port() in order to notify all device drivers 500 * that have called parport_register_driver(). Their attach() 501 * functions will be called, with @port as the parameter. 502 **/ 503 504 void parport_announce_port(struct parport *port) 505 { 506 int i; 507 508 #ifdef CONFIG_PARPORT_1284 509 /* Analyse the IEEE1284.3 topology of the port. */ 510 parport_daisy_init(port); 511 #endif 512 513 if (!port->dev) 514 pr_warn("%s: fix this legacy no-device port driver!\n", 515 port->name); 516 517 parport_proc_register(port); 518 mutex_lock(®istration_lock); 519 spin_lock_irq(&parportlist_lock); 520 list_add_tail(&port->list, &portlist); 521 for (i = 1; i < 3; i++) { 522 struct parport *slave = port->slaves[i-1]; 523 if (slave) 524 list_add_tail(&slave->list, &portlist); 525 } 526 spin_unlock_irq(&parportlist_lock); 527 528 /* Let drivers know that new port(s) has arrived. */ 529 attach_driver_chain(port); 530 for (i = 1; i < 3; i++) { 531 struct parport *slave = port->slaves[i-1]; 532 if (slave) 533 attach_driver_chain(slave); 534 } 535 mutex_unlock(®istration_lock); 536 } 537 EXPORT_SYMBOL(parport_announce_port); 538 539 /** 540 * parport_remove_port - deregister a parallel port 541 * @port: parallel port to deregister 542 * 543 * When a parallel port driver is forcibly unloaded, or a 544 * parallel port becomes inaccessible, the port driver must call 545 * this function in order to deal with device drivers that still 546 * want to use it. 547 * 548 * The parport structure associated with the port has its 549 * operations structure replaced with one containing 'null' 550 * operations that return errors or just don't do anything. 551 * 552 * Any drivers that have registered themselves using 553 * parport_register_driver() are notified that the port is no 554 * longer accessible by having their detach() routines called 555 * with @port as the parameter. 556 **/ 557 558 void parport_remove_port(struct parport *port) 559 { 560 int i; 561 562 mutex_lock(®istration_lock); 563 564 /* Spread the word. */ 565 detach_driver_chain(port); 566 567 #ifdef CONFIG_PARPORT_1284 568 /* Forget the IEEE1284.3 topology of the port. */ 569 parport_daisy_fini(port); 570 for (i = 1; i < 3; i++) { 571 struct parport *slave = port->slaves[i-1]; 572 if (!slave) 573 continue; 574 detach_driver_chain(slave); 575 parport_daisy_fini(slave); 576 } 577 #endif 578 579 port->ops = &dead_ops; 580 spin_lock(&parportlist_lock); 581 list_del_init(&port->list); 582 for (i = 1; i < 3; i++) { 583 struct parport *slave = port->slaves[i-1]; 584 if (slave) 585 list_del_init(&slave->list); 586 } 587 spin_unlock(&parportlist_lock); 588 589 mutex_unlock(®istration_lock); 590 591 parport_proc_unregister(port); 592 593 for (i = 1; i < 3; i++) { 594 struct parport *slave = port->slaves[i-1]; 595 if (slave) 596 parport_put_port(slave); 597 } 598 } 599 EXPORT_SYMBOL(parport_remove_port); 600 601 static void free_pardevice(struct device *dev) 602 { 603 struct pardevice *par_dev = to_pardevice(dev); 604 605 kfree_const(par_dev->name); 606 kfree(par_dev); 607 } 608 609 /** 610 * parport_register_dev_model - register a device on a parallel port 611 * @port: port to which the device is attached 612 * @name: a name to refer to the device 613 * @par_dev_cb: struct containing callbacks 614 * @id: device number to be given to the device 615 * 616 * This function, called by parallel port device drivers, 617 * declares that a device is connected to a port, and tells the 618 * system all it needs to know. 619 * 620 * The struct pardev_cb contains pointer to callbacks. preemption 621 * callback function, @preempt, is called when this device driver 622 * has claimed access to the port but another device driver wants 623 * to use it. It is given, @private, as its parameter, and should 624 * return zero if it is willing for the system to release the port 625 * to another driver on its behalf. If it wants to keep control of 626 * the port it should return non-zero, and no action will be taken. 627 * It is good manners for the driver to try to release the port at 628 * the earliest opportunity after its preemption callback rejects a 629 * preemption attempt. Note that if a preemption callback is happy 630 * for preemption to go ahead, there is no need to release the 631 * port; it is done automatically. This function may not block, as 632 * it may be called from interrupt context. If the device driver 633 * does not support preemption, @preempt can be %NULL. 634 * 635 * The wake-up ("kick") callback function, @wakeup, is called when 636 * the port is available to be claimed for exclusive access; that 637 * is, parport_claim() is guaranteed to succeed when called from 638 * inside the wake-up callback function. If the driver wants to 639 * claim the port it should do so; otherwise, it need not take 640 * any action. This function may not block, as it may be called 641 * from interrupt context. If the device driver does not want to 642 * be explicitly invited to claim the port in this way, @wakeup can 643 * be %NULL. 644 * 645 * The interrupt handler, @irq_func, is called when an interrupt 646 * arrives from the parallel port. Note that if a device driver 647 * wants to use interrupts it should use parport_enable_irq(), 648 * and can also check the irq member of the parport structure 649 * representing the port. 650 * 651 * The parallel port (lowlevel) driver is the one that has called 652 * request_irq() and whose interrupt handler is called first. 653 * This handler does whatever needs to be done to the hardware to 654 * acknowledge the interrupt (for PC-style ports there is nothing 655 * special to be done). It then tells the IEEE 1284 code about 656 * the interrupt, which may involve reacting to an IEEE 1284 657 * event depending on the current IEEE 1284 phase. After this, 658 * it calls @irq_func. Needless to say, @irq_func will be called 659 * from interrupt context, and may not block. 660 * 661 * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and 662 * so should only be used when sharing the port with other device 663 * drivers is impossible and would lead to incorrect behaviour. 664 * Use it sparingly! Normally, @flags will be zero. 665 * 666 * This function returns a pointer to a structure that represents 667 * the device on the port, or %NULL if there is not enough memory 668 * to allocate space for that structure. 669 **/ 670 671 struct pardevice * 672 parport_register_dev_model(struct parport *port, const char *name, 673 const struct pardev_cb *par_dev_cb, int id) 674 { 675 struct pardevice *par_dev; 676 const char *devname; 677 int ret; 678 679 if (port->physport->flags & PARPORT_FLAG_EXCL) { 680 /* An exclusive device is registered. */ 681 pr_err("%s: no more devices allowed\n", port->name); 682 return NULL; 683 } 684 685 if (par_dev_cb->flags & PARPORT_DEV_LURK) { 686 if (!par_dev_cb->preempt || !par_dev_cb->wakeup) { 687 pr_info("%s: refused to register lurking device (%s) without callbacks\n", 688 port->name, name); 689 return NULL; 690 } 691 } 692 693 if (par_dev_cb->flags & PARPORT_DEV_EXCL) { 694 if (port->physport->devices) { 695 /* 696 * If a device is already registered and this new 697 * device wants exclusive access, then no need to 698 * continue as we can not grant exclusive access to 699 * this device. 700 */ 701 pr_err("%s: cannot grant exclusive access for device %s\n", 702 port->name, name); 703 return NULL; 704 } 705 } 706 707 if (!try_module_get(port->ops->owner)) 708 return NULL; 709 710 parport_get_port(port); 711 712 par_dev = kzalloc(sizeof(*par_dev), GFP_KERNEL); 713 if (!par_dev) 714 goto err_put_port; 715 716 par_dev->state = kzalloc(sizeof(*par_dev->state), GFP_KERNEL); 717 if (!par_dev->state) 718 goto err_put_par_dev; 719 720 devname = kstrdup_const(name, GFP_KERNEL); 721 if (!devname) 722 goto err_free_par_dev; 723 724 par_dev->name = devname; 725 par_dev->port = port; 726 par_dev->daisy = -1; 727 par_dev->preempt = par_dev_cb->preempt; 728 par_dev->wakeup = par_dev_cb->wakeup; 729 par_dev->private = par_dev_cb->private; 730 par_dev->flags = par_dev_cb->flags; 731 par_dev->irq_func = par_dev_cb->irq_func; 732 par_dev->waiting = 0; 733 par_dev->timeout = 5 * HZ; 734 735 par_dev->dev.parent = &port->bus_dev; 736 par_dev->dev.bus = &parport_bus_type; 737 ret = dev_set_name(&par_dev->dev, "%s.%d", devname, id); 738 if (ret) 739 goto err_free_devname; 740 par_dev->dev.release = free_pardevice; 741 par_dev->devmodel = true; 742 ret = device_register(&par_dev->dev); 743 if (ret) { 744 kfree(par_dev->state); 745 put_device(&par_dev->dev); 746 goto err_put_port; 747 } 748 749 /* Chain this onto the list */ 750 par_dev->prev = NULL; 751 /* 752 * This function must not run from an irq handler so we don' t need 753 * to clear irq on the local CPU. -arca 754 */ 755 spin_lock(&port->physport->pardevice_lock); 756 757 if (par_dev_cb->flags & PARPORT_DEV_EXCL) { 758 if (port->physport->devices) { 759 spin_unlock(&port->physport->pardevice_lock); 760 pr_debug("%s: cannot grant exclusive access for device %s\n", 761 port->name, name); 762 kfree(par_dev->state); 763 device_unregister(&par_dev->dev); 764 goto err_put_port; 765 } 766 port->flags |= PARPORT_FLAG_EXCL; 767 } 768 769 par_dev->next = port->physport->devices; 770 wmb(); /* 771 * Make sure that tmp->next is written before it's 772 * added to the list; see comments marked 'no locking 773 * required' 774 */ 775 if (port->physport->devices) 776 port->physport->devices->prev = par_dev; 777 port->physport->devices = par_dev; 778 spin_unlock(&port->physport->pardevice_lock); 779 780 init_waitqueue_head(&par_dev->wait_q); 781 par_dev->timeslice = parport_default_timeslice; 782 par_dev->waitnext = NULL; 783 par_dev->waitprev = NULL; 784 785 /* 786 * This has to be run as last thing since init_state may need other 787 * pardevice fields. -arca 788 */ 789 port->ops->init_state(par_dev, par_dev->state); 790 if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { 791 port->proc_device = par_dev; 792 parport_device_proc_register(par_dev); 793 } 794 795 return par_dev; 796 797 err_free_devname: 798 kfree_const(devname); 799 err_free_par_dev: 800 kfree(par_dev->state); 801 err_put_par_dev: 802 if (!par_dev->devmodel) 803 kfree(par_dev); 804 err_put_port: 805 parport_put_port(port); 806 module_put(port->ops->owner); 807 808 return NULL; 809 } 810 EXPORT_SYMBOL(parport_register_dev_model); 811 812 /** 813 * parport_unregister_device - deregister a device on a parallel port 814 * @dev: pointer to structure representing device 815 * 816 * This undoes the effect of parport_register_device(). 817 **/ 818 819 void parport_unregister_device(struct pardevice *dev) 820 { 821 struct parport *port; 822 823 #ifdef PARPORT_PARANOID 824 if (!dev) { 825 pr_err("%s: passed NULL\n", __func__); 826 return; 827 } 828 #endif 829 830 port = dev->port->physport; 831 832 if (port->proc_device == dev) { 833 port->proc_device = NULL; 834 clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags); 835 parport_device_proc_unregister(dev); 836 } 837 838 if (port->cad == dev) { 839 printk(KERN_DEBUG "%s: %s forgot to release port\n", 840 port->name, dev->name); 841 parport_release(dev); 842 } 843 844 spin_lock(&port->pardevice_lock); 845 if (dev->next) 846 dev->next->prev = dev->prev; 847 if (dev->prev) 848 dev->prev->next = dev->next; 849 else 850 port->devices = dev->next; 851 852 if (dev->flags & PARPORT_DEV_EXCL) 853 port->flags &= ~PARPORT_FLAG_EXCL; 854 855 spin_unlock(&port->pardevice_lock); 856 857 /* 858 * Make sure we haven't left any pointers around in the wait 859 * list. 860 */ 861 spin_lock_irq(&port->waitlist_lock); 862 if (dev->waitprev || dev->waitnext || port->waithead == dev) { 863 if (dev->waitprev) 864 dev->waitprev->waitnext = dev->waitnext; 865 else 866 port->waithead = dev->waitnext; 867 if (dev->waitnext) 868 dev->waitnext->waitprev = dev->waitprev; 869 else 870 port->waittail = dev->waitprev; 871 } 872 spin_unlock_irq(&port->waitlist_lock); 873 874 kfree(dev->state); 875 device_unregister(&dev->dev); 876 877 module_put(port->ops->owner); 878 parport_put_port(port); 879 } 880 EXPORT_SYMBOL(parport_unregister_device); 881 882 /** 883 * parport_find_number - find a parallel port by number 884 * @number: parallel port number 885 * 886 * This returns the parallel port with the specified number, or 887 * %NULL if there is none. 888 * 889 * There is an implicit parport_get_port() done already; to throw 890 * away the reference to the port that parport_find_number() 891 * gives you, use parport_put_port(). 892 */ 893 894 struct parport *parport_find_number(int number) 895 { 896 struct parport *port, *result = NULL; 897 898 if (list_empty(&portlist)) 899 get_lowlevel_driver(); 900 901 spin_lock(&parportlist_lock); 902 list_for_each_entry(port, &portlist, list) { 903 if (port->number == number) { 904 result = parport_get_port(port); 905 break; 906 } 907 } 908 spin_unlock(&parportlist_lock); 909 return result; 910 } 911 EXPORT_SYMBOL(parport_find_number); 912 913 /** 914 * parport_find_base - find a parallel port by base address 915 * @base: base I/O address 916 * 917 * This returns the parallel port with the specified base 918 * address, or %NULL if there is none. 919 * 920 * There is an implicit parport_get_port() done already; to throw 921 * away the reference to the port that parport_find_base() 922 * gives you, use parport_put_port(). 923 */ 924 925 struct parport *parport_find_base(unsigned long base) 926 { 927 struct parport *port, *result = NULL; 928 929 if (list_empty(&portlist)) 930 get_lowlevel_driver(); 931 932 spin_lock(&parportlist_lock); 933 list_for_each_entry(port, &portlist, list) { 934 if (port->base == base) { 935 result = parport_get_port(port); 936 break; 937 } 938 } 939 spin_unlock(&parportlist_lock); 940 return result; 941 } 942 EXPORT_SYMBOL(parport_find_base); 943 944 /** 945 * parport_claim - claim access to a parallel port device 946 * @dev: pointer to structure representing a device on the port 947 * 948 * This function will not block and so can be used from interrupt 949 * context. If parport_claim() succeeds in claiming access to 950 * the port it returns zero and the port is available to use. It 951 * may fail (returning non-zero) if the port is in use by another 952 * driver and that driver is not willing to relinquish control of 953 * the port. 954 **/ 955 956 int parport_claim(struct pardevice *dev) 957 { 958 struct pardevice *oldcad; 959 struct parport *port = dev->port->physport; 960 unsigned long flags; 961 962 if (port->cad == dev) { 963 pr_info("%s: %s already owner\n", dev->port->name, dev->name); 964 return 0; 965 } 966 967 /* Preempt any current device */ 968 write_lock_irqsave(&port->cad_lock, flags); 969 oldcad = port->cad; 970 if (oldcad) { 971 if (oldcad->preempt) { 972 if (oldcad->preempt(oldcad->private)) 973 goto blocked; 974 port->ops->save_state(port, dev->state); 975 } else 976 goto blocked; 977 978 if (port->cad != oldcad) { 979 /* 980 * I think we'll actually deadlock rather than 981 * get here, but just in case.. 982 */ 983 pr_warn("%s: %s released port when preempted!\n", 984 port->name, oldcad->name); 985 if (port->cad) 986 goto blocked; 987 } 988 } 989 990 /* Can't fail from now on, so mark ourselves as no longer waiting. */ 991 if (dev->waiting & 1) { 992 dev->waiting = 0; 993 994 /* Take ourselves out of the wait list again. */ 995 spin_lock_irq(&port->waitlist_lock); 996 if (dev->waitprev) 997 dev->waitprev->waitnext = dev->waitnext; 998 else 999 port->waithead = dev->waitnext; 1000 if (dev->waitnext) 1001 dev->waitnext->waitprev = dev->waitprev; 1002 else 1003 port->waittail = dev->waitprev; 1004 spin_unlock_irq(&port->waitlist_lock); 1005 dev->waitprev = dev->waitnext = NULL; 1006 } 1007 1008 /* Now we do the change of devices */ 1009 port->cad = dev; 1010 1011 #ifdef CONFIG_PARPORT_1284 1012 /* If it's a mux port, select it. */ 1013 if (dev->port->muxport >= 0) { 1014 /* FIXME */ 1015 port->muxsel = dev->port->muxport; 1016 } 1017 1018 /* If it's a daisy chain device, select it. */ 1019 if (dev->daisy >= 0) { 1020 /* This could be lazier. */ 1021 if (!parport_daisy_select(port, dev->daisy, 1022 IEEE1284_MODE_COMPAT)) 1023 port->daisy = dev->daisy; 1024 } 1025 #endif /* IEEE1284.3 support */ 1026 1027 /* Restore control registers */ 1028 port->ops->restore_state(port, dev->state); 1029 write_unlock_irqrestore(&port->cad_lock, flags); 1030 dev->time = jiffies; 1031 return 0; 1032 1033 blocked: 1034 /* 1035 * If this is the first time we tried to claim the port, register an 1036 * interest. This is only allowed for devices sleeping in 1037 * parport_claim_or_block(), or those with a wakeup function. 1038 */ 1039 1040 /* The cad_lock is still held for writing here */ 1041 if (dev->waiting & 2 || dev->wakeup) { 1042 spin_lock(&port->waitlist_lock); 1043 if (test_and_set_bit(0, &dev->waiting) == 0) { 1044 /* First add ourselves to the end of the wait list. */ 1045 dev->waitnext = NULL; 1046 dev->waitprev = port->waittail; 1047 if (port->waittail) { 1048 port->waittail->waitnext = dev; 1049 port->waittail = dev; 1050 } else 1051 port->waithead = port->waittail = dev; 1052 } 1053 spin_unlock(&port->waitlist_lock); 1054 } 1055 write_unlock_irqrestore(&port->cad_lock, flags); 1056 return -EAGAIN; 1057 } 1058 EXPORT_SYMBOL(parport_claim); 1059 1060 /** 1061 * parport_claim_or_block - claim access to a parallel port device 1062 * @dev: pointer to structure representing a device on the port 1063 * 1064 * This behaves like parport_claim(), but will block if necessary 1065 * to wait for the port to be free. A return value of 1 1066 * indicates that it slept; 0 means that it succeeded without 1067 * needing to sleep. A negative error code indicates failure. 1068 **/ 1069 1070 int parport_claim_or_block(struct pardevice *dev) 1071 { 1072 int r; 1073 1074 /* 1075 * Signal to parport_claim() that we can wait even without a 1076 * wakeup function. 1077 */ 1078 dev->waiting = 2; 1079 1080 /* Try to claim the port. If this fails, we need to sleep. */ 1081 r = parport_claim(dev); 1082 if (r == -EAGAIN) { 1083 #ifdef PARPORT_DEBUG_SHARING 1084 printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", 1085 dev->name); 1086 #endif 1087 /* 1088 * FIXME!!! Use the proper locking for dev->waiting, 1089 * and make this use the "wait_event_interruptible()" 1090 * interfaces. The cli/sti that used to be here 1091 * did nothing. 1092 * 1093 * See also parport_release() 1094 */ 1095 1096 /* 1097 * If dev->waiting is clear now, an interrupt 1098 * gave us the port and we would deadlock if we slept. 1099 */ 1100 if (dev->waiting) { 1101 wait_event_interruptible(dev->wait_q, 1102 !dev->waiting); 1103 if (signal_pending(current)) 1104 return -EINTR; 1105 r = 1; 1106 } else { 1107 r = 0; 1108 #ifdef PARPORT_DEBUG_SHARING 1109 printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n", 1110 dev->name); 1111 #endif 1112 } 1113 1114 #ifdef PARPORT_DEBUG_SHARING 1115 if (dev->port->physport->cad != dev) 1116 printk(KERN_DEBUG "%s: exiting parport_claim_or_block but %s owns port!\n", 1117 dev->name, dev->port->physport->cad ? 1118 dev->port->physport->cad->name : "nobody"); 1119 #endif 1120 } 1121 dev->waiting = 0; 1122 return r; 1123 } 1124 EXPORT_SYMBOL(parport_claim_or_block); 1125 1126 /** 1127 * parport_release - give up access to a parallel port device 1128 * @dev: pointer to structure representing parallel port device 1129 * 1130 * This function cannot fail, but it should not be called without 1131 * the port claimed. Similarly, if the port is already claimed 1132 * you should not try claiming it again. 1133 **/ 1134 1135 void parport_release(struct pardevice *dev) 1136 { 1137 struct parport *port = dev->port->physport; 1138 struct pardevice *pd; 1139 unsigned long flags; 1140 1141 /* Make sure that dev is the current device */ 1142 write_lock_irqsave(&port->cad_lock, flags); 1143 if (port->cad != dev) { 1144 write_unlock_irqrestore(&port->cad_lock, flags); 1145 pr_warn("%s: %s tried to release parport when not owner\n", 1146 port->name, dev->name); 1147 return; 1148 } 1149 1150 #ifdef CONFIG_PARPORT_1284 1151 /* If this is on a mux port, deselect it. */ 1152 if (dev->port->muxport >= 0) { 1153 /* FIXME */ 1154 port->muxsel = -1; 1155 } 1156 1157 /* If this is a daisy device, deselect it. */ 1158 if (dev->daisy >= 0) { 1159 parport_daisy_deselect_all(port); 1160 port->daisy = -1; 1161 } 1162 #endif 1163 1164 port->cad = NULL; 1165 write_unlock_irqrestore(&port->cad_lock, flags); 1166 1167 /* Save control registers */ 1168 port->ops->save_state(port, dev->state); 1169 1170 /* 1171 * If anybody is waiting, find out who's been there longest and 1172 * then wake them up. (Note: no locking required) 1173 */ 1174 /* !!! LOCKING IS NEEDED HERE */ 1175 for (pd = port->waithead; pd; pd = pd->waitnext) { 1176 if (pd->waiting & 2) { /* sleeping in claim_or_block */ 1177 parport_claim(pd); 1178 if (waitqueue_active(&pd->wait_q)) 1179 wake_up_interruptible(&pd->wait_q); 1180 return; 1181 } else if (pd->wakeup) { 1182 pd->wakeup(pd->private); 1183 if (dev->port->cad) /* racy but no matter */ 1184 return; 1185 } else { 1186 pr_err("%s: don't know how to wake %s\n", 1187 port->name, pd->name); 1188 } 1189 } 1190 1191 /* 1192 * Nobody was waiting, so walk the list to see if anyone is 1193 * interested in being woken up. (Note: no locking required) 1194 */ 1195 /* !!! LOCKING IS NEEDED HERE */ 1196 for (pd = port->devices; !port->cad && pd; pd = pd->next) { 1197 if (pd->wakeup && pd != dev) 1198 pd->wakeup(pd->private); 1199 } 1200 } 1201 EXPORT_SYMBOL(parport_release); 1202 1203 irqreturn_t parport_irq_handler(int irq, void *dev_id) 1204 { 1205 struct parport *port = dev_id; 1206 1207 parport_generic_irq(port); 1208 1209 return IRQ_HANDLED; 1210 } 1211 EXPORT_SYMBOL(parport_irq_handler); 1212 1213 MODULE_DESCRIPTION("Parallel-port resource manager"); 1214 MODULE_LICENSE("GPL"); 1215