1 /* 2 * bios-less APM driver for ARM Linux 3 * Jamey Hicks <jamey@crl.dec.com> 4 * adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com) 5 * 6 * APM 1.2 Reference: 7 * Intel Corporation, Microsoft Corporation. Advanced Power Management 8 * (APM) BIOS Interface Specification, Revision 1.2, February 1996. 9 * 10 * This document is available from Microsoft at: 11 * http://www.microsoft.com/whdc/archive/amp_12.mspx 12 */ 13 #include <linux/module.h> 14 #include <linux/poll.h> 15 #include <linux/slab.h> 16 #include <linux/mutex.h> 17 #include <linux/proc_fs.h> 18 #include <linux/seq_file.h> 19 #include <linux/miscdevice.h> 20 #include <linux/apm_bios.h> 21 #include <linux/capability.h> 22 #include <linux/sched.h> 23 #include <linux/suspend.h> 24 #include <linux/apm-emulation.h> 25 #include <linux/freezer.h> 26 #include <linux/device.h> 27 #include <linux/kernel.h> 28 #include <linux/list.h> 29 #include <linux/init.h> 30 #include <linux/completion.h> 31 #include <linux/kthread.h> 32 #include <linux/delay.h> 33 34 35 /* 36 * The apm_bios device is one of the misc char devices. 37 * This is its minor number. 38 */ 39 #define APM_MINOR_DEV 134 40 41 /* 42 * One option can be changed at boot time as follows: 43 * apm=on/off enable/disable APM 44 */ 45 46 /* 47 * Maximum number of events stored 48 */ 49 #define APM_MAX_EVENTS 16 50 51 struct apm_queue { 52 unsigned int event_head; 53 unsigned int event_tail; 54 apm_event_t events[APM_MAX_EVENTS]; 55 }; 56 57 /* 58 * thread states (for threads using a writable /dev/apm_bios fd): 59 * 60 * SUSPEND_NONE: nothing happening 61 * SUSPEND_PENDING: suspend event queued for thread and pending to be read 62 * SUSPEND_READ: suspend event read, pending acknowledgement 63 * SUSPEND_ACKED: acknowledgement received from thread (via ioctl), 64 * waiting for resume 65 * SUSPEND_ACKTO: acknowledgement timeout 66 * SUSPEND_DONE: thread had acked suspend and is now notified of 67 * resume 68 * 69 * SUSPEND_WAIT: this thread invoked suspend and is waiting for resume 70 * 71 * A thread migrates in one of three paths: 72 * NONE -1-> PENDING -2-> READ -3-> ACKED -4-> DONE -5-> NONE 73 * -6-> ACKTO -7-> NONE 74 * NONE -8-> WAIT -9-> NONE 75 * 76 * While in PENDING or READ, the thread is accounted for in the 77 * suspend_acks_pending counter. 78 * 79 * The transitions are invoked as follows: 80 * 1: suspend event is signalled from the core PM code 81 * 2: the suspend event is read from the fd by the userspace thread 82 * 3: userspace thread issues the APM_IOC_SUSPEND ioctl (as ack) 83 * 4: core PM code signals that we have resumed 84 * 5: APM_IOC_SUSPEND ioctl returns 85 * 86 * 6: the notifier invoked from the core PM code timed out waiting 87 * for all relevant threds to enter ACKED state and puts those 88 * that haven't into ACKTO 89 * 7: those threads issue APM_IOC_SUSPEND ioctl too late, 90 * get an error 91 * 92 * 8: userspace thread issues the APM_IOC_SUSPEND ioctl (to suspend), 93 * ioctl code invokes pm_suspend() 94 * 9: pm_suspend() returns indicating resume 95 */ 96 enum apm_suspend_state { 97 SUSPEND_NONE, 98 SUSPEND_PENDING, 99 SUSPEND_READ, 100 SUSPEND_ACKED, 101 SUSPEND_ACKTO, 102 SUSPEND_WAIT, 103 SUSPEND_DONE, 104 }; 105 106 /* 107 * The per-file APM data 108 */ 109 struct apm_user { 110 struct list_head list; 111 112 unsigned int suser: 1; 113 unsigned int writer: 1; 114 unsigned int reader: 1; 115 116 int suspend_result; 117 enum apm_suspend_state suspend_state; 118 119 struct apm_queue queue; 120 }; 121 122 /* 123 * Local variables 124 */ 125 static atomic_t suspend_acks_pending = ATOMIC_INIT(0); 126 static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0); 127 static int apm_disabled; 128 static struct task_struct *kapmd_tsk; 129 130 static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue); 131 static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); 132 133 /* 134 * This is a list of everyone who has opened /dev/apm_bios 135 */ 136 static DECLARE_RWSEM(user_list_lock); 137 static LIST_HEAD(apm_user_list); 138 139 /* 140 * kapmd info. kapmd provides us a process context to handle 141 * "APM" events within - specifically necessary if we're going 142 * to be suspending the system. 143 */ 144 static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait); 145 static DEFINE_SPINLOCK(kapmd_queue_lock); 146 static struct apm_queue kapmd_queue; 147 148 static DEFINE_MUTEX(state_lock); 149 150 static const char driver_version[] = "1.13"; /* no spaces */ 151 152 153 154 /* 155 * Compatibility cruft until the IPAQ people move over to the new 156 * interface. 157 */ 158 static void __apm_get_power_status(struct apm_power_info *info) 159 { 160 } 161 162 /* 163 * This allows machines to provide their own "apm get power status" function. 164 */ 165 void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status; 166 EXPORT_SYMBOL(apm_get_power_status); 167 168 169 /* 170 * APM event queue management. 171 */ 172 static inline int queue_empty(struct apm_queue *q) 173 { 174 return q->event_head == q->event_tail; 175 } 176 177 static inline apm_event_t queue_get_event(struct apm_queue *q) 178 { 179 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; 180 return q->events[q->event_tail]; 181 } 182 183 static void queue_add_event(struct apm_queue *q, apm_event_t event) 184 { 185 q->event_head = (q->event_head + 1) % APM_MAX_EVENTS; 186 if (q->event_head == q->event_tail) { 187 static int notified; 188 189 if (notified++ == 0) 190 printk(KERN_ERR "apm: an event queue overflowed\n"); 191 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; 192 } 193 q->events[q->event_head] = event; 194 } 195 196 static void queue_event(apm_event_t event) 197 { 198 struct apm_user *as; 199 200 down_read(&user_list_lock); 201 list_for_each_entry(as, &apm_user_list, list) { 202 if (as->reader) 203 queue_add_event(&as->queue, event); 204 } 205 up_read(&user_list_lock); 206 wake_up_interruptible(&apm_waitqueue); 207 } 208 209 static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos) 210 { 211 struct apm_user *as = fp->private_data; 212 apm_event_t event; 213 int i = count, ret = 0; 214 215 if (count < sizeof(apm_event_t)) 216 return -EINVAL; 217 218 if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK) 219 return -EAGAIN; 220 221 wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue)); 222 223 while ((i >= sizeof(event)) && !queue_empty(&as->queue)) { 224 event = queue_get_event(&as->queue); 225 226 ret = -EFAULT; 227 if (copy_to_user(buf, &event, sizeof(event))) 228 break; 229 230 mutex_lock(&state_lock); 231 if (as->suspend_state == SUSPEND_PENDING && 232 (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)) 233 as->suspend_state = SUSPEND_READ; 234 mutex_unlock(&state_lock); 235 236 buf += sizeof(event); 237 i -= sizeof(event); 238 } 239 240 if (i < count) 241 ret = count - i; 242 243 return ret; 244 } 245 246 static unsigned int apm_poll(struct file *fp, poll_table * wait) 247 { 248 struct apm_user *as = fp->private_data; 249 250 poll_wait(fp, &apm_waitqueue, wait); 251 return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM; 252 } 253 254 /* 255 * apm_ioctl - handle APM ioctl 256 * 257 * APM_IOC_SUSPEND 258 * This IOCTL is overloaded, and performs two functions. It is used to: 259 * - initiate a suspend 260 * - acknowledge a suspend read from /dev/apm_bios. 261 * Only when everyone who has opened /dev/apm_bios with write permission 262 * has acknowledge does the actual suspend happen. 263 */ 264 static long 265 apm_ioctl(struct file *filp, u_int cmd, u_long arg) 266 { 267 struct apm_user *as = filp->private_data; 268 int err = -EINVAL; 269 270 if (!as->suser || !as->writer) 271 return -EPERM; 272 273 switch (cmd) { 274 case APM_IOC_SUSPEND: 275 mutex_lock(&state_lock); 276 277 as->suspend_result = -EINTR; 278 279 switch (as->suspend_state) { 280 case SUSPEND_READ: 281 /* 282 * If we read a suspend command from /dev/apm_bios, 283 * then the corresponding APM_IOC_SUSPEND ioctl is 284 * interpreted as an acknowledge. 285 */ 286 as->suspend_state = SUSPEND_ACKED; 287 atomic_dec(&suspend_acks_pending); 288 mutex_unlock(&state_lock); 289 290 /* 291 * suspend_acks_pending changed, the notifier needs to 292 * be woken up for this 293 */ 294 wake_up(&apm_suspend_waitqueue); 295 296 /* 297 * Wait for the suspend/resume to complete. If there 298 * are pending acknowledges, we wait here for them. 299 * wait_event_freezable() is interruptible and pending 300 * signal can cause busy looping. We aren't doing 301 * anything critical, chill a bit on each iteration. 302 */ 303 while (wait_event_freezable(apm_suspend_waitqueue, 304 as->suspend_state != SUSPEND_ACKED)) 305 msleep(10); 306 break; 307 case SUSPEND_ACKTO: 308 as->suspend_result = -ETIMEDOUT; 309 mutex_unlock(&state_lock); 310 break; 311 default: 312 as->suspend_state = SUSPEND_WAIT; 313 mutex_unlock(&state_lock); 314 315 /* 316 * Otherwise it is a request to suspend the system. 317 * Just invoke pm_suspend(), we'll handle it from 318 * there via the notifier. 319 */ 320 as->suspend_result = pm_suspend(PM_SUSPEND_MEM); 321 } 322 323 mutex_lock(&state_lock); 324 err = as->suspend_result; 325 as->suspend_state = SUSPEND_NONE; 326 mutex_unlock(&state_lock); 327 break; 328 } 329 330 return err; 331 } 332 333 static int apm_release(struct inode * inode, struct file * filp) 334 { 335 struct apm_user *as = filp->private_data; 336 337 filp->private_data = NULL; 338 339 down_write(&user_list_lock); 340 list_del(&as->list); 341 up_write(&user_list_lock); 342 343 /* 344 * We are now unhooked from the chain. As far as new 345 * events are concerned, we no longer exist. 346 */ 347 mutex_lock(&state_lock); 348 if (as->suspend_state == SUSPEND_PENDING || 349 as->suspend_state == SUSPEND_READ) 350 atomic_dec(&suspend_acks_pending); 351 mutex_unlock(&state_lock); 352 353 wake_up(&apm_suspend_waitqueue); 354 355 kfree(as); 356 return 0; 357 } 358 359 static int apm_open(struct inode * inode, struct file * filp) 360 { 361 struct apm_user *as; 362 363 as = kzalloc(sizeof(*as), GFP_KERNEL); 364 if (as) { 365 /* 366 * XXX - this is a tiny bit broken, when we consider BSD 367 * process accounting. If the device is opened by root, we 368 * instantly flag that we used superuser privs. Who knows, 369 * we might close the device immediately without doing a 370 * privileged operation -- cevans 371 */ 372 as->suser = capable(CAP_SYS_ADMIN); 373 as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE; 374 as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ; 375 376 down_write(&user_list_lock); 377 list_add(&as->list, &apm_user_list); 378 up_write(&user_list_lock); 379 380 filp->private_data = as; 381 } 382 383 return as ? 0 : -ENOMEM; 384 } 385 386 static const struct file_operations apm_bios_fops = { 387 .owner = THIS_MODULE, 388 .read = apm_read, 389 .poll = apm_poll, 390 .unlocked_ioctl = apm_ioctl, 391 .open = apm_open, 392 .release = apm_release, 393 .llseek = noop_llseek, 394 }; 395 396 static struct miscdevice apm_device = { 397 .minor = APM_MINOR_DEV, 398 .name = "apm_bios", 399 .fops = &apm_bios_fops 400 }; 401 402 403 #ifdef CONFIG_PROC_FS 404 /* 405 * Arguments, with symbols from linux/apm_bios.h. 406 * 407 * 0) Linux driver version (this will change if format changes) 408 * 1) APM BIOS Version. Usually 1.0, 1.1 or 1.2. 409 * 2) APM flags from APM Installation Check (0x00): 410 * bit 0: APM_16_BIT_SUPPORT 411 * bit 1: APM_32_BIT_SUPPORT 412 * bit 2: APM_IDLE_SLOWS_CLOCK 413 * bit 3: APM_BIOS_DISABLED 414 * bit 4: APM_BIOS_DISENGAGED 415 * 3) AC line status 416 * 0x00: Off-line 417 * 0x01: On-line 418 * 0x02: On backup power (BIOS >= 1.1 only) 419 * 0xff: Unknown 420 * 4) Battery status 421 * 0x00: High 422 * 0x01: Low 423 * 0x02: Critical 424 * 0x03: Charging 425 * 0x04: Selected battery not present (BIOS >= 1.2 only) 426 * 0xff: Unknown 427 * 5) Battery flag 428 * bit 0: High 429 * bit 1: Low 430 * bit 2: Critical 431 * bit 3: Charging 432 * bit 7: No system battery 433 * 0xff: Unknown 434 * 6) Remaining battery life (percentage of charge): 435 * 0-100: valid 436 * -1: Unknown 437 * 7) Remaining battery life (time units): 438 * Number of remaining minutes or seconds 439 * -1: Unknown 440 * 8) min = minutes; sec = seconds 441 */ 442 static int proc_apm_show(struct seq_file *m, void *v) 443 { 444 struct apm_power_info info; 445 char *units; 446 447 info.ac_line_status = 0xff; 448 info.battery_status = 0xff; 449 info.battery_flag = 0xff; 450 info.battery_life = -1; 451 info.time = -1; 452 info.units = -1; 453 454 if (apm_get_power_status) 455 apm_get_power_status(&info); 456 457 switch (info.units) { 458 default: units = "?"; break; 459 case 0: units = "min"; break; 460 case 1: units = "sec"; break; 461 } 462 463 seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", 464 driver_version, APM_32_BIT_SUPPORT, 465 info.ac_line_status, info.battery_status, 466 info.battery_flag, info.battery_life, 467 info.time, units); 468 469 return 0; 470 } 471 472 static int proc_apm_open(struct inode *inode, struct file *file) 473 { 474 return single_open(file, proc_apm_show, NULL); 475 } 476 477 static const struct file_operations apm_proc_fops = { 478 .owner = THIS_MODULE, 479 .open = proc_apm_open, 480 .read = seq_read, 481 .llseek = seq_lseek, 482 .release = single_release, 483 }; 484 #endif 485 486 static int kapmd(void *arg) 487 { 488 do { 489 apm_event_t event; 490 491 wait_event_interruptible(kapmd_wait, 492 !queue_empty(&kapmd_queue) || kthread_should_stop()); 493 494 if (kthread_should_stop()) 495 break; 496 497 spin_lock_irq(&kapmd_queue_lock); 498 event = 0; 499 if (!queue_empty(&kapmd_queue)) 500 event = queue_get_event(&kapmd_queue); 501 spin_unlock_irq(&kapmd_queue_lock); 502 503 switch (event) { 504 case 0: 505 break; 506 507 case APM_LOW_BATTERY: 508 case APM_POWER_STATUS_CHANGE: 509 queue_event(event); 510 break; 511 512 case APM_USER_SUSPEND: 513 case APM_SYS_SUSPEND: 514 pm_suspend(PM_SUSPEND_MEM); 515 break; 516 517 case APM_CRITICAL_SUSPEND: 518 atomic_inc(&userspace_notification_inhibit); 519 pm_suspend(PM_SUSPEND_MEM); 520 atomic_dec(&userspace_notification_inhibit); 521 break; 522 } 523 } while (1); 524 525 return 0; 526 } 527 528 static int apm_suspend_notifier(struct notifier_block *nb, 529 unsigned long event, 530 void *dummy) 531 { 532 struct apm_user *as; 533 int err; 534 unsigned long apm_event; 535 536 /* short-cut emergency suspends */ 537 if (atomic_read(&userspace_notification_inhibit)) 538 return NOTIFY_DONE; 539 540 switch (event) { 541 case PM_SUSPEND_PREPARE: 542 case PM_HIBERNATION_PREPARE: 543 apm_event = (event == PM_SUSPEND_PREPARE) ? 544 APM_USER_SUSPEND : APM_USER_HIBERNATION; 545 /* 546 * Queue an event to all "writer" users that we want 547 * to suspend and need their ack. 548 */ 549 mutex_lock(&state_lock); 550 down_read(&user_list_lock); 551 552 list_for_each_entry(as, &apm_user_list, list) { 553 if (as->suspend_state != SUSPEND_WAIT && as->reader && 554 as->writer && as->suser) { 555 as->suspend_state = SUSPEND_PENDING; 556 atomic_inc(&suspend_acks_pending); 557 queue_add_event(&as->queue, apm_event); 558 } 559 } 560 561 up_read(&user_list_lock); 562 mutex_unlock(&state_lock); 563 wake_up_interruptible(&apm_waitqueue); 564 565 /* 566 * Wait for the the suspend_acks_pending variable to drop to 567 * zero, meaning everybody acked the suspend event (or the 568 * process was killed.) 569 * 570 * If the app won't answer within a short while we assume it 571 * locked up and ignore it. 572 */ 573 err = wait_event_interruptible_timeout( 574 apm_suspend_waitqueue, 575 atomic_read(&suspend_acks_pending) == 0, 576 5*HZ); 577 578 /* timed out */ 579 if (err == 0) { 580 /* 581 * Move anybody who timed out to "ack timeout" state. 582 * 583 * We could time out and the userspace does the ACK 584 * right after we time out but before we enter the 585 * locked section here, but that's fine. 586 */ 587 mutex_lock(&state_lock); 588 down_read(&user_list_lock); 589 list_for_each_entry(as, &apm_user_list, list) { 590 if (as->suspend_state == SUSPEND_PENDING || 591 as->suspend_state == SUSPEND_READ) { 592 as->suspend_state = SUSPEND_ACKTO; 593 atomic_dec(&suspend_acks_pending); 594 } 595 } 596 up_read(&user_list_lock); 597 mutex_unlock(&state_lock); 598 } 599 600 /* let suspend proceed */ 601 if (err >= 0) 602 return NOTIFY_OK; 603 604 /* interrupted by signal */ 605 return notifier_from_errno(err); 606 607 case PM_POST_SUSPEND: 608 case PM_POST_HIBERNATION: 609 apm_event = (event == PM_POST_SUSPEND) ? 610 APM_NORMAL_RESUME : APM_HIBERNATION_RESUME; 611 /* 612 * Anyone on the APM queues will think we're still suspended. 613 * Send a message so everyone knows we're now awake again. 614 */ 615 queue_event(apm_event); 616 617 /* 618 * Finally, wake up anyone who is sleeping on the suspend. 619 */ 620 mutex_lock(&state_lock); 621 down_read(&user_list_lock); 622 list_for_each_entry(as, &apm_user_list, list) { 623 if (as->suspend_state == SUSPEND_ACKED) { 624 /* 625 * TODO: maybe grab error code, needs core 626 * changes to push the error to the notifier 627 * chain (could use the second parameter if 628 * implemented) 629 */ 630 as->suspend_result = 0; 631 as->suspend_state = SUSPEND_DONE; 632 } 633 } 634 up_read(&user_list_lock); 635 mutex_unlock(&state_lock); 636 637 wake_up(&apm_suspend_waitqueue); 638 return NOTIFY_OK; 639 640 default: 641 return NOTIFY_DONE; 642 } 643 } 644 645 static struct notifier_block apm_notif_block = { 646 .notifier_call = apm_suspend_notifier, 647 }; 648 649 static int __init apm_init(void) 650 { 651 int ret; 652 653 if (apm_disabled) { 654 printk(KERN_NOTICE "apm: disabled on user request.\n"); 655 return -ENODEV; 656 } 657 658 kapmd_tsk = kthread_create(kapmd, NULL, "kapmd"); 659 if (IS_ERR(kapmd_tsk)) { 660 ret = PTR_ERR(kapmd_tsk); 661 kapmd_tsk = NULL; 662 goto out; 663 } 664 wake_up_process(kapmd_tsk); 665 666 #ifdef CONFIG_PROC_FS 667 proc_create("apm", 0, NULL, &apm_proc_fops); 668 #endif 669 670 ret = misc_register(&apm_device); 671 if (ret) 672 goto out_stop; 673 674 ret = register_pm_notifier(&apm_notif_block); 675 if (ret) 676 goto out_unregister; 677 678 return 0; 679 680 out_unregister: 681 misc_deregister(&apm_device); 682 out_stop: 683 remove_proc_entry("apm", NULL); 684 kthread_stop(kapmd_tsk); 685 out: 686 return ret; 687 } 688 689 static void __exit apm_exit(void) 690 { 691 unregister_pm_notifier(&apm_notif_block); 692 misc_deregister(&apm_device); 693 remove_proc_entry("apm", NULL); 694 695 kthread_stop(kapmd_tsk); 696 } 697 698 module_init(apm_init); 699 module_exit(apm_exit); 700 701 MODULE_AUTHOR("Stephen Rothwell"); 702 MODULE_DESCRIPTION("Advanced Power Management"); 703 MODULE_LICENSE("GPL"); 704 705 #ifndef MODULE 706 static int __init apm_setup(char *str) 707 { 708 while ((str != NULL) && (*str != '\0')) { 709 if (strncmp(str, "off", 3) == 0) 710 apm_disabled = 1; 711 if (strncmp(str, "on", 2) == 0) 712 apm_disabled = 0; 713 str = strchr(str, ','); 714 if (str != NULL) 715 str += strspn(str, ", \t"); 716 } 717 return 1; 718 } 719 720 __setup("apm=", apm_setup); 721 #endif 722 723 /** 724 * apm_queue_event - queue an APM event for kapmd 725 * @event: APM event 726 * 727 * Queue an APM event for kapmd to process and ultimately take the 728 * appropriate action. Only a subset of events are handled: 729 * %APM_LOW_BATTERY 730 * %APM_POWER_STATUS_CHANGE 731 * %APM_USER_SUSPEND 732 * %APM_SYS_SUSPEND 733 * %APM_CRITICAL_SUSPEND 734 */ 735 void apm_queue_event(apm_event_t event) 736 { 737 unsigned long flags; 738 739 spin_lock_irqsave(&kapmd_queue_lock, flags); 740 queue_add_event(&kapmd_queue, event); 741 spin_unlock_irqrestore(&kapmd_queue_lock, flags); 742 743 wake_up_interruptible(&kapmd_wait); 744 } 745 EXPORT_SYMBOL(apm_queue_event); 746