1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 23 * 24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 25 * 26 */ 27 28 #include <linux/config.h> 29 #include <linux/module.h> 30 #include <linux/kernel.h> 31 #include <linux/slab.h> 32 #include <linux/mm.h> 33 #include <linux/pci.h> 34 #include <linux/smp_lock.h> 35 #include <linux/interrupt.h> 36 #include <linux/kmod.h> 37 #include <linux/delay.h> 38 #include <linux/workqueue.h> 39 #include <linux/nmi.h> 40 #include <acpi/acpi.h> 41 #include <asm/io.h> 42 #include <acpi/acpi_bus.h> 43 #include <acpi/processor.h> 44 #include <asm/uaccess.h> 45 46 #include <linux/efi.h> 47 48 #define _COMPONENT ACPI_OS_SERVICES 49 ACPI_MODULE_NAME("osl") 50 #define PREFIX "ACPI: " 51 struct acpi_os_dpc { 52 acpi_osd_exec_callback function; 53 void *context; 54 }; 55 56 #ifdef CONFIG_ACPI_CUSTOM_DSDT 57 #include CONFIG_ACPI_CUSTOM_DSDT_FILE 58 #endif 59 60 #ifdef ENABLE_DEBUGGER 61 #include <linux/kdb.h> 62 63 /* stuff for debugger support */ 64 int acpi_in_debugger; 65 EXPORT_SYMBOL(acpi_in_debugger); 66 67 extern char line_buf[80]; 68 #endif /*ENABLE_DEBUGGER */ 69 70 int acpi_specific_hotkey_enabled = TRUE; 71 EXPORT_SYMBOL(acpi_specific_hotkey_enabled); 72 73 static unsigned int acpi_irq_irq; 74 static acpi_osd_handler acpi_irq_handler; 75 static void *acpi_irq_context; 76 static struct workqueue_struct *kacpid_wq; 77 78 acpi_status acpi_os_initialize(void) 79 { 80 return AE_OK; 81 } 82 83 acpi_status acpi_os_initialize1(void) 84 { 85 /* 86 * Initialize PCI configuration space access, as we'll need to access 87 * it while walking the namespace (bus 0 and root bridges w/ _BBNs). 88 */ 89 if (!raw_pci_ops) { 90 printk(KERN_ERR PREFIX 91 "Access to PCI configuration space unavailable\n"); 92 return AE_NULL_ENTRY; 93 } 94 kacpid_wq = create_singlethread_workqueue("kacpid"); 95 BUG_ON(!kacpid_wq); 96 97 return AE_OK; 98 } 99 100 acpi_status acpi_os_terminate(void) 101 { 102 if (acpi_irq_handler) { 103 acpi_os_remove_interrupt_handler(acpi_irq_irq, 104 acpi_irq_handler); 105 } 106 107 destroy_workqueue(kacpid_wq); 108 109 return AE_OK; 110 } 111 112 void acpi_os_printf(const char *fmt, ...) 113 { 114 va_list args; 115 va_start(args, fmt); 116 acpi_os_vprintf(fmt, args); 117 va_end(args); 118 } 119 120 EXPORT_SYMBOL(acpi_os_printf); 121 122 void acpi_os_vprintf(const char *fmt, va_list args) 123 { 124 static char buffer[512]; 125 126 vsprintf(buffer, fmt, args); 127 128 #ifdef ENABLE_DEBUGGER 129 if (acpi_in_debugger) { 130 kdb_printf("%s", buffer); 131 } else { 132 printk("%s", buffer); 133 } 134 #else 135 printk("%s", buffer); 136 #endif 137 } 138 139 extern int acpi_in_resume; 140 void *acpi_os_allocate(acpi_size size) 141 { 142 if (acpi_in_resume) 143 return kmalloc(size, GFP_ATOMIC); 144 else 145 return kmalloc(size, GFP_KERNEL); 146 } 147 148 void acpi_os_free(void *ptr) 149 { 150 kfree(ptr); 151 } 152 153 EXPORT_SYMBOL(acpi_os_free); 154 155 acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr) 156 { 157 if (efi_enabled) { 158 addr->pointer_type = ACPI_PHYSICAL_POINTER; 159 if (efi.acpi20) 160 addr->pointer.physical = 161 (acpi_physical_address) virt_to_phys(efi.acpi20); 162 else if (efi.acpi) 163 addr->pointer.physical = 164 (acpi_physical_address) virt_to_phys(efi.acpi); 165 else { 166 printk(KERN_ERR PREFIX 167 "System description tables not found\n"); 168 return AE_NOT_FOUND; 169 } 170 } else { 171 if (ACPI_FAILURE(acpi_find_root_pointer(flags, addr))) { 172 printk(KERN_ERR PREFIX 173 "System description tables not found\n"); 174 return AE_NOT_FOUND; 175 } 176 } 177 178 return AE_OK; 179 } 180 181 acpi_status 182 acpi_os_map_memory(acpi_physical_address phys, acpi_size size, 183 void __iomem ** virt) 184 { 185 if (efi_enabled) { 186 if (EFI_MEMORY_WB & efi_mem_attributes(phys)) { 187 *virt = (void __iomem *)phys_to_virt(phys); 188 } else { 189 *virt = ioremap(phys, size); 190 } 191 } else { 192 if (phys > ULONG_MAX) { 193 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 194 return AE_BAD_PARAMETER; 195 } 196 /* 197 * ioremap checks to ensure this is in reserved space 198 */ 199 *virt = ioremap((unsigned long)phys, size); 200 } 201 202 if (!*virt) 203 return AE_NO_MEMORY; 204 205 return AE_OK; 206 } 207 208 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) 209 { 210 iounmap(virt); 211 } 212 213 #ifdef ACPI_FUTURE_USAGE 214 acpi_status 215 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 216 { 217 if (!phys || !virt) 218 return AE_BAD_PARAMETER; 219 220 *phys = virt_to_phys(virt); 221 222 return AE_OK; 223 } 224 #endif 225 226 #define ACPI_MAX_OVERRIDE_LEN 100 227 228 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 229 230 acpi_status 231 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 232 acpi_string * new_val) 233 { 234 if (!init_val || !new_val) 235 return AE_BAD_PARAMETER; 236 237 *new_val = NULL; 238 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 239 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 240 acpi_os_name); 241 *new_val = acpi_os_name; 242 } 243 244 return AE_OK; 245 } 246 247 acpi_status 248 acpi_os_table_override(struct acpi_table_header * existing_table, 249 struct acpi_table_header ** new_table) 250 { 251 if (!existing_table || !new_table) 252 return AE_BAD_PARAMETER; 253 254 #ifdef CONFIG_ACPI_CUSTOM_DSDT 255 if (strncmp(existing_table->signature, "DSDT", 4) == 0) 256 *new_table = (struct acpi_table_header *)AmlCode; 257 else 258 *new_table = NULL; 259 #else 260 *new_table = NULL; 261 #endif 262 return AE_OK; 263 } 264 265 static irqreturn_t acpi_irq(int irq, void *dev_id, struct pt_regs *regs) 266 { 267 return (*acpi_irq_handler) (acpi_irq_context) ? IRQ_HANDLED : IRQ_NONE; 268 } 269 270 acpi_status 271 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 272 void *context) 273 { 274 unsigned int irq; 275 276 /* 277 * Ignore the GSI from the core, and use the value in our copy of the 278 * FADT. It may not be the same if an interrupt source override exists 279 * for the SCI. 280 */ 281 gsi = acpi_fadt.sci_int; 282 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 283 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 284 gsi); 285 return AE_OK; 286 } 287 288 acpi_irq_handler = handler; 289 acpi_irq_context = context; 290 if (request_irq(irq, acpi_irq, SA_SHIRQ, "acpi", acpi_irq)) { 291 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 292 return AE_NOT_ACQUIRED; 293 } 294 acpi_irq_irq = irq; 295 296 return AE_OK; 297 } 298 299 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler) 300 { 301 if (irq) { 302 free_irq(irq, acpi_irq); 303 acpi_irq_handler = NULL; 304 acpi_irq_irq = 0; 305 } 306 307 return AE_OK; 308 } 309 310 /* 311 * Running in interpreter thread context, safe to sleep 312 */ 313 314 void acpi_os_sleep(acpi_integer ms) 315 { 316 schedule_timeout_interruptible(msecs_to_jiffies(ms)); 317 } 318 319 EXPORT_SYMBOL(acpi_os_sleep); 320 321 void acpi_os_stall(u32 us) 322 { 323 while (us) { 324 u32 delay = 1000; 325 326 if (delay > us) 327 delay = us; 328 udelay(delay); 329 touch_nmi_watchdog(); 330 us -= delay; 331 } 332 } 333 334 EXPORT_SYMBOL(acpi_os_stall); 335 336 /* 337 * Support ACPI 3.0 AML Timer operand 338 * Returns 64-bit free-running, monotonically increasing timer 339 * with 100ns granularity 340 */ 341 u64 acpi_os_get_timer(void) 342 { 343 static u64 t; 344 345 #ifdef CONFIG_HPET 346 /* TBD: use HPET if available */ 347 #endif 348 349 #ifdef CONFIG_X86_PM_TIMER 350 /* TBD: default to PM timer if HPET was not available */ 351 #endif 352 if (!t) 353 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n"); 354 355 return ++t; 356 } 357 358 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 359 { 360 u32 dummy; 361 362 if (!value) 363 value = &dummy; 364 365 switch (width) { 366 case 8: 367 *(u8 *) value = inb(port); 368 break; 369 case 16: 370 *(u16 *) value = inw(port); 371 break; 372 case 32: 373 *(u32 *) value = inl(port); 374 break; 375 default: 376 BUG(); 377 } 378 379 return AE_OK; 380 } 381 382 EXPORT_SYMBOL(acpi_os_read_port); 383 384 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 385 { 386 switch (width) { 387 case 8: 388 outb(value, port); 389 break; 390 case 16: 391 outw(value, port); 392 break; 393 case 32: 394 outl(value, port); 395 break; 396 default: 397 BUG(); 398 } 399 400 return AE_OK; 401 } 402 403 EXPORT_SYMBOL(acpi_os_write_port); 404 405 acpi_status 406 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) 407 { 408 u32 dummy; 409 void __iomem *virt_addr; 410 int iomem = 0; 411 412 if (efi_enabled) { 413 if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) { 414 /* HACK ALERT! We can use readb/w/l on real memory too.. */ 415 virt_addr = (void __iomem *)phys_to_virt(phys_addr); 416 } else { 417 iomem = 1; 418 virt_addr = ioremap(phys_addr, width); 419 } 420 } else 421 virt_addr = (void __iomem *)phys_to_virt(phys_addr); 422 if (!value) 423 value = &dummy; 424 425 switch (width) { 426 case 8: 427 *(u8 *) value = readb(virt_addr); 428 break; 429 case 16: 430 *(u16 *) value = readw(virt_addr); 431 break; 432 case 32: 433 *(u32 *) value = readl(virt_addr); 434 break; 435 default: 436 BUG(); 437 } 438 439 if (efi_enabled) { 440 if (iomem) 441 iounmap(virt_addr); 442 } 443 444 return AE_OK; 445 } 446 447 acpi_status 448 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) 449 { 450 void __iomem *virt_addr; 451 int iomem = 0; 452 453 if (efi_enabled) { 454 if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) { 455 /* HACK ALERT! We can use writeb/w/l on real memory too */ 456 virt_addr = (void __iomem *)phys_to_virt(phys_addr); 457 } else { 458 iomem = 1; 459 virt_addr = ioremap(phys_addr, width); 460 } 461 } else 462 virt_addr = (void __iomem *)phys_to_virt(phys_addr); 463 464 switch (width) { 465 case 8: 466 writeb(value, virt_addr); 467 break; 468 case 16: 469 writew(value, virt_addr); 470 break; 471 case 32: 472 writel(value, virt_addr); 473 break; 474 default: 475 BUG(); 476 } 477 478 if (iomem) 479 iounmap(virt_addr); 480 481 return AE_OK; 482 } 483 484 acpi_status 485 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 486 void *value, u32 width) 487 { 488 int result, size; 489 490 if (!value) 491 return AE_BAD_PARAMETER; 492 493 switch (width) { 494 case 8: 495 size = 1; 496 break; 497 case 16: 498 size = 2; 499 break; 500 case 32: 501 size = 4; 502 break; 503 default: 504 return AE_ERROR; 505 } 506 507 BUG_ON(!raw_pci_ops); 508 509 result = raw_pci_ops->read(pci_id->segment, pci_id->bus, 510 PCI_DEVFN(pci_id->device, pci_id->function), 511 reg, size, value); 512 513 return (result ? AE_ERROR : AE_OK); 514 } 515 516 EXPORT_SYMBOL(acpi_os_read_pci_configuration); 517 518 acpi_status 519 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 520 acpi_integer value, u32 width) 521 { 522 int result, size; 523 524 switch (width) { 525 case 8: 526 size = 1; 527 break; 528 case 16: 529 size = 2; 530 break; 531 case 32: 532 size = 4; 533 break; 534 default: 535 return AE_ERROR; 536 } 537 538 BUG_ON(!raw_pci_ops); 539 540 result = raw_pci_ops->write(pci_id->segment, pci_id->bus, 541 PCI_DEVFN(pci_id->device, pci_id->function), 542 reg, size, value); 543 544 return (result ? AE_ERROR : AE_OK); 545 } 546 547 /* TODO: Change code to take advantage of driver model more */ 548 static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */ 549 acpi_handle chandle, /* current node */ 550 struct acpi_pci_id **id, 551 int *is_bridge, u8 * bus_number) 552 { 553 acpi_handle handle; 554 struct acpi_pci_id *pci_id = *id; 555 acpi_status status; 556 unsigned long temp; 557 acpi_object_type type; 558 u8 tu8; 559 560 acpi_get_parent(chandle, &handle); 561 if (handle != rhandle) { 562 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge, 563 bus_number); 564 565 status = acpi_get_type(handle, &type); 566 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE)) 567 return; 568 569 status = 570 acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, 571 &temp); 572 if (ACPI_SUCCESS(status)) { 573 pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp)); 574 pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp)); 575 576 if (*is_bridge) 577 pci_id->bus = *bus_number; 578 579 /* any nicer way to get bus number of bridge ? */ 580 status = 581 acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8, 582 8); 583 if (ACPI_SUCCESS(status) 584 && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) { 585 status = 586 acpi_os_read_pci_configuration(pci_id, 0x18, 587 &tu8, 8); 588 if (!ACPI_SUCCESS(status)) { 589 /* Certainly broken... FIX ME */ 590 return; 591 } 592 *is_bridge = 1; 593 pci_id->bus = tu8; 594 status = 595 acpi_os_read_pci_configuration(pci_id, 0x19, 596 &tu8, 8); 597 if (ACPI_SUCCESS(status)) { 598 *bus_number = tu8; 599 } 600 } else 601 *is_bridge = 0; 602 } 603 } 604 } 605 606 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */ 607 acpi_handle chandle, /* current node */ 608 struct acpi_pci_id **id) 609 { 610 int is_bridge = 1; 611 u8 bus_number = (*id)->bus; 612 613 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); 614 } 615 616 static void acpi_os_execute_deferred(void *context) 617 { 618 struct acpi_os_dpc *dpc = NULL; 619 620 ACPI_FUNCTION_TRACE("os_execute_deferred"); 621 622 dpc = (struct acpi_os_dpc *)context; 623 if (!dpc) { 624 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); 625 return_VOID; 626 } 627 628 dpc->function(dpc->context); 629 630 kfree(dpc); 631 632 return_VOID; 633 } 634 635 acpi_status 636 acpi_os_queue_for_execution(u32 priority, 637 acpi_osd_exec_callback function, void *context) 638 { 639 acpi_status status = AE_OK; 640 struct acpi_os_dpc *dpc; 641 struct work_struct *task; 642 643 ACPI_FUNCTION_TRACE("os_queue_for_execution"); 644 645 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 646 "Scheduling function [%p(%p)] for deferred execution.\n", 647 function, context)); 648 649 if (!function) 650 return_ACPI_STATUS(AE_BAD_PARAMETER); 651 652 /* 653 * Allocate/initialize DPC structure. Note that this memory will be 654 * freed by the callee. The kernel handles the tq_struct list in a 655 * way that allows us to also free its memory inside the callee. 656 * Because we may want to schedule several tasks with different 657 * parameters we can't use the approach some kernel code uses of 658 * having a static tq_struct. 659 * We can save time and code by allocating the DPC and tq_structs 660 * from the same memory. 661 */ 662 663 dpc = 664 kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct), 665 GFP_ATOMIC); 666 if (!dpc) 667 return_ACPI_STATUS(AE_NO_MEMORY); 668 669 dpc->function = function; 670 dpc->context = context; 671 672 task = (void *)(dpc + 1); 673 INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc); 674 675 if (!queue_work(kacpid_wq, task)) { 676 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 677 "Call to queue_work() failed.\n")); 678 kfree(dpc); 679 status = AE_ERROR; 680 } 681 682 return_ACPI_STATUS(status); 683 } 684 685 EXPORT_SYMBOL(acpi_os_queue_for_execution); 686 687 void acpi_os_wait_events_complete(void *context) 688 { 689 flush_workqueue(kacpid_wq); 690 } 691 692 EXPORT_SYMBOL(acpi_os_wait_events_complete); 693 694 /* 695 * Allocate the memory for a spinlock and initialize it. 696 */ 697 acpi_status acpi_os_create_lock(acpi_handle * out_handle) 698 { 699 spinlock_t *lock_ptr; 700 701 ACPI_FUNCTION_TRACE("os_create_lock"); 702 703 lock_ptr = acpi_os_allocate(sizeof(spinlock_t)); 704 705 spin_lock_init(lock_ptr); 706 707 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating spinlock[%p].\n", lock_ptr)); 708 709 *out_handle = lock_ptr; 710 711 return_ACPI_STATUS(AE_OK); 712 } 713 714 /* 715 * Deallocate the memory for a spinlock. 716 */ 717 void acpi_os_delete_lock(acpi_handle handle) 718 { 719 ACPI_FUNCTION_TRACE("os_create_lock"); 720 721 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting spinlock[%p].\n", handle)); 722 723 acpi_os_free(handle); 724 725 return_VOID; 726 } 727 728 acpi_status 729 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 730 { 731 struct semaphore *sem = NULL; 732 733 ACPI_FUNCTION_TRACE("os_create_semaphore"); 734 735 sem = acpi_os_allocate(sizeof(struct semaphore)); 736 if (!sem) 737 return_ACPI_STATUS(AE_NO_MEMORY); 738 memset(sem, 0, sizeof(struct semaphore)); 739 740 sema_init(sem, initial_units); 741 742 *handle = (acpi_handle *) sem; 743 744 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 745 *handle, initial_units)); 746 747 return_ACPI_STATUS(AE_OK); 748 } 749 750 EXPORT_SYMBOL(acpi_os_create_semaphore); 751 752 /* 753 * TODO: A better way to delete semaphores? Linux doesn't have a 754 * 'delete_semaphore()' function -- may result in an invalid 755 * pointer dereference for non-synchronized consumers. Should 756 * we at least check for blocked threads and signal/cancel them? 757 */ 758 759 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 760 { 761 struct semaphore *sem = (struct semaphore *)handle; 762 763 ACPI_FUNCTION_TRACE("os_delete_semaphore"); 764 765 if (!sem) 766 return_ACPI_STATUS(AE_BAD_PARAMETER); 767 768 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 769 770 acpi_os_free(sem); 771 sem = NULL; 772 773 return_ACPI_STATUS(AE_OK); 774 } 775 776 EXPORT_SYMBOL(acpi_os_delete_semaphore); 777 778 /* 779 * TODO: The kernel doesn't have a 'down_timeout' function -- had to 780 * improvise. The process is to sleep for one scheduler quantum 781 * until the semaphore becomes available. Downside is that this 782 * may result in starvation for timeout-based waits when there's 783 * lots of semaphore activity. 784 * 785 * TODO: Support for units > 1? 786 */ 787 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 788 { 789 acpi_status status = AE_OK; 790 struct semaphore *sem = (struct semaphore *)handle; 791 int ret = 0; 792 793 ACPI_FUNCTION_TRACE("os_wait_semaphore"); 794 795 if (!sem || (units < 1)) 796 return_ACPI_STATUS(AE_BAD_PARAMETER); 797 798 if (units > 1) 799 return_ACPI_STATUS(AE_SUPPORT); 800 801 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 802 handle, units, timeout)); 803 804 if (in_atomic()) 805 timeout = 0; 806 807 switch (timeout) { 808 /* 809 * No Wait: 810 * -------- 811 * A zero timeout value indicates that we shouldn't wait - just 812 * acquire the semaphore if available otherwise return AE_TIME 813 * (a.k.a. 'would block'). 814 */ 815 case 0: 816 if (down_trylock(sem)) 817 status = AE_TIME; 818 break; 819 820 /* 821 * Wait Indefinitely: 822 * ------------------ 823 */ 824 case ACPI_WAIT_FOREVER: 825 down(sem); 826 break; 827 828 /* 829 * Wait w/ Timeout: 830 * ---------------- 831 */ 832 default: 833 // TODO: A better timeout algorithm? 834 { 835 int i = 0; 836 static const int quantum_ms = 1000 / HZ; 837 838 ret = down_trylock(sem); 839 for (i = timeout; (i > 0 && ret < 0); i -= quantum_ms) { 840 schedule_timeout_interruptible(1); 841 ret = down_trylock(sem); 842 } 843 844 if (ret != 0) 845 status = AE_TIME; 846 } 847 break; 848 } 849 850 if (ACPI_FAILURE(status)) { 851 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 852 "Failed to acquire semaphore[%p|%d|%d], %s\n", 853 handle, units, timeout, 854 acpi_format_exception(status))); 855 } else { 856 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 857 "Acquired semaphore[%p|%d|%d]\n", handle, 858 units, timeout)); 859 } 860 861 return_ACPI_STATUS(status); 862 } 863 864 EXPORT_SYMBOL(acpi_os_wait_semaphore); 865 866 /* 867 * TODO: Support for units > 1? 868 */ 869 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 870 { 871 struct semaphore *sem = (struct semaphore *)handle; 872 873 ACPI_FUNCTION_TRACE("os_signal_semaphore"); 874 875 if (!sem || (units < 1)) 876 return_ACPI_STATUS(AE_BAD_PARAMETER); 877 878 if (units > 1) 879 return_ACPI_STATUS(AE_SUPPORT); 880 881 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 882 units)); 883 884 up(sem); 885 886 return_ACPI_STATUS(AE_OK); 887 } 888 889 EXPORT_SYMBOL(acpi_os_signal_semaphore); 890 891 #ifdef ACPI_FUTURE_USAGE 892 u32 acpi_os_get_line(char *buffer) 893 { 894 895 #ifdef ENABLE_DEBUGGER 896 if (acpi_in_debugger) { 897 u32 chars; 898 899 kdb_read(buffer, sizeof(line_buf)); 900 901 /* remove the CR kdb includes */ 902 chars = strlen(buffer) - 1; 903 buffer[chars] = '\0'; 904 } 905 #endif 906 907 return 0; 908 } 909 #endif /* ACPI_FUTURE_USAGE */ 910 911 /* Assumes no unreadable holes inbetween */ 912 u8 acpi_os_readable(void *ptr, acpi_size len) 913 { 914 #if defined(__i386__) || defined(__x86_64__) 915 char tmp; 916 return !__get_user(tmp, (char __user *)ptr) 917 && !__get_user(tmp, (char __user *)ptr + len - 1); 918 #endif 919 return 1; 920 } 921 922 #ifdef ACPI_FUTURE_USAGE 923 u8 acpi_os_writable(void *ptr, acpi_size len) 924 { 925 /* could do dummy write (racy) or a kernel page table lookup. 926 The later may be difficult at early boot when kmap doesn't work yet. */ 927 return 1; 928 } 929 #endif 930 931 u32 acpi_os_get_thread_id(void) 932 { 933 if (!in_atomic()) 934 return current->pid; 935 936 return 0; 937 } 938 939 acpi_status acpi_os_signal(u32 function, void *info) 940 { 941 switch (function) { 942 case ACPI_SIGNAL_FATAL: 943 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 944 break; 945 case ACPI_SIGNAL_BREAKPOINT: 946 /* 947 * AML Breakpoint 948 * ACPI spec. says to treat it as a NOP unless 949 * you are debugging. So if/when we integrate 950 * AML debugger into the kernel debugger its 951 * hook will go here. But until then it is 952 * not useful to print anything on breakpoints. 953 */ 954 break; 955 default: 956 break; 957 } 958 959 return AE_OK; 960 } 961 962 EXPORT_SYMBOL(acpi_os_signal); 963 964 static int __init acpi_os_name_setup(char *str) 965 { 966 char *p = acpi_os_name; 967 int count = ACPI_MAX_OVERRIDE_LEN - 1; 968 969 if (!str || !*str) 970 return 0; 971 972 for (; count-- && str && *str; str++) { 973 if (isalnum(*str) || *str == ' ' || *str == ':') 974 *p++ = *str; 975 else if (*str == '\'' || *str == '"') 976 continue; 977 else 978 break; 979 } 980 *p = 0; 981 982 return 1; 983 984 } 985 986 __setup("acpi_os_name=", acpi_os_name_setup); 987 988 /* 989 * _OSI control 990 * empty string disables _OSI 991 * TBD additional string adds to _OSI 992 */ 993 static int __init acpi_osi_setup(char *str) 994 { 995 if (str == NULL || *str == '\0') { 996 printk(KERN_INFO PREFIX "_OSI method disabled\n"); 997 acpi_gbl_create_osi_method = FALSE; 998 } else { 999 /* TBD */ 1000 printk(KERN_ERR PREFIX "_OSI additional string ignored -- %s\n", 1001 str); 1002 } 1003 1004 return 1; 1005 } 1006 1007 __setup("acpi_osi=", acpi_osi_setup); 1008 1009 /* enable serialization to combat AE_ALREADY_EXISTS errors */ 1010 static int __init acpi_serialize_setup(char *str) 1011 { 1012 printk(KERN_INFO PREFIX "serialize enabled\n"); 1013 1014 acpi_gbl_all_methods_serialized = TRUE; 1015 1016 return 1; 1017 } 1018 1019 __setup("acpi_serialize", acpi_serialize_setup); 1020 1021 /* 1022 * Wake and Run-Time GPES are expected to be separate. 1023 * We disable wake-GPEs at run-time to prevent spurious 1024 * interrupts. 1025 * 1026 * However, if a system exists that shares Wake and 1027 * Run-time events on the same GPE this flag is available 1028 * to tell Linux to keep the wake-time GPEs enabled at run-time. 1029 */ 1030 static int __init acpi_wake_gpes_always_on_setup(char *str) 1031 { 1032 printk(KERN_INFO PREFIX "wake GPEs not disabled\n"); 1033 1034 acpi_gbl_leave_wake_gpes_disabled = FALSE; 1035 1036 return 1; 1037 } 1038 1039 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup); 1040 1041 static int __init acpi_hotkey_setup(char *str) 1042 { 1043 acpi_specific_hotkey_enabled = FALSE; 1044 return 1; 1045 } 1046 1047 __setup("acpi_generic_hotkey", acpi_hotkey_setup); 1048 1049 /* 1050 * max_cstate is defined in the base kernel so modules can 1051 * change it w/o depending on the state of the processor module. 1052 */ 1053 unsigned int max_cstate = ACPI_PROCESSOR_MAX_POWER; 1054 1055 EXPORT_SYMBOL(max_cstate); 1056 1057 /* 1058 * Acquire a spinlock. 1059 * 1060 * handle is a pointer to the spinlock_t. 1061 * flags is *not* the result of save_flags - it is an ACPI-specific flag variable 1062 * that indicates whether we are at interrupt level. 1063 */ 1064 1065 unsigned long acpi_os_acquire_lock(acpi_handle handle) 1066 { 1067 unsigned long flags; 1068 spin_lock_irqsave((spinlock_t *) handle, flags); 1069 return flags; 1070 } 1071 1072 /* 1073 * Release a spinlock. See above. 1074 */ 1075 1076 void acpi_os_release_lock(acpi_handle handle, unsigned long flags) 1077 { 1078 spin_unlock_irqrestore((spinlock_t *) handle, flags); 1079 } 1080 1081 #ifndef ACPI_USE_LOCAL_CACHE 1082 1083 /******************************************************************************* 1084 * 1085 * FUNCTION: acpi_os_create_cache 1086 * 1087 * PARAMETERS: CacheName - Ascii name for the cache 1088 * ObjectSize - Size of each cached object 1089 * MaxDepth - Maximum depth of the cache (in objects) 1090 * ReturnCache - Where the new cache object is returned 1091 * 1092 * RETURN: Status 1093 * 1094 * DESCRIPTION: Create a cache object 1095 * 1096 ******************************************************************************/ 1097 1098 acpi_status 1099 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1100 { 1101 *cache = kmem_cache_create(name, size, 0, 0, NULL, NULL); 1102 return AE_OK; 1103 } 1104 1105 /******************************************************************************* 1106 * 1107 * FUNCTION: acpi_os_purge_cache 1108 * 1109 * PARAMETERS: Cache - Handle to cache object 1110 * 1111 * RETURN: Status 1112 * 1113 * DESCRIPTION: Free all objects within the requested cache. 1114 * 1115 ******************************************************************************/ 1116 1117 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1118 { 1119 (void)kmem_cache_shrink(cache); 1120 return (AE_OK); 1121 } 1122 1123 /******************************************************************************* 1124 * 1125 * FUNCTION: acpi_os_delete_cache 1126 * 1127 * PARAMETERS: Cache - Handle to cache object 1128 * 1129 * RETURN: Status 1130 * 1131 * DESCRIPTION: Free all objects within the requested cache and delete the 1132 * cache object. 1133 * 1134 ******************************************************************************/ 1135 1136 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1137 { 1138 (void)kmem_cache_destroy(cache); 1139 return (AE_OK); 1140 } 1141 1142 /******************************************************************************* 1143 * 1144 * FUNCTION: acpi_os_release_object 1145 * 1146 * PARAMETERS: Cache - Handle to cache object 1147 * Object - The object to be released 1148 * 1149 * RETURN: None 1150 * 1151 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1152 * the object is deleted. 1153 * 1154 ******************************************************************************/ 1155 1156 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1157 { 1158 kmem_cache_free(cache, object); 1159 return (AE_OK); 1160 } 1161 1162 /******************************************************************************* 1163 * 1164 * FUNCTION: acpi_os_acquire_object 1165 * 1166 * PARAMETERS: Cache - Handle to cache object 1167 * ReturnObject - Where the object is returned 1168 * 1169 * RETURN: Status 1170 * 1171 * DESCRIPTION: Get an object from the specified cache. If cache is empty, 1172 * the object is allocated. 1173 * 1174 ******************************************************************************/ 1175 1176 void *acpi_os_acquire_object(acpi_cache_t * cache) 1177 { 1178 void *object = kmem_cache_alloc(cache, GFP_KERNEL); 1179 WARN_ON(!object); 1180 return object; 1181 } 1182 1183 #endif 1184