1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (c) 2008 Intel Corporation 8 * Author: Matthew Wilcox <willy@linux.intel.com> 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 * 28 */ 29 30 #include <linux/module.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/mm.h> 34 #include <linux/pci.h> 35 #include <linux/interrupt.h> 36 #include <linux/kmod.h> 37 #include <linux/delay.h> 38 #include <linux/dmi.h> 39 #include <linux/workqueue.h> 40 #include <linux/nmi.h> 41 #include <linux/acpi.h> 42 #include <linux/efi.h> 43 #include <linux/ioport.h> 44 #include <linux/list.h> 45 #include <linux/jiffies.h> 46 #include <linux/semaphore.h> 47 48 #include <asm/io.h> 49 #include <asm/uaccess.h> 50 51 #include <acpi/acpi.h> 52 #include <acpi/acpi_bus.h> 53 #include <acpi/processor.h> 54 55 #define _COMPONENT ACPI_OS_SERVICES 56 ACPI_MODULE_NAME("osl"); 57 #define PREFIX "ACPI: " 58 struct acpi_os_dpc { 59 acpi_osd_exec_callback function; 60 void *context; 61 struct work_struct work; 62 }; 63 64 #ifdef CONFIG_ACPI_CUSTOM_DSDT 65 #include CONFIG_ACPI_CUSTOM_DSDT_FILE 66 #endif 67 68 #ifdef ENABLE_DEBUGGER 69 #include <linux/kdb.h> 70 71 /* stuff for debugger support */ 72 int acpi_in_debugger; 73 EXPORT_SYMBOL(acpi_in_debugger); 74 75 extern char line_buf[80]; 76 #endif /*ENABLE_DEBUGGER */ 77 78 static unsigned int acpi_irq_irq; 79 static acpi_osd_handler acpi_irq_handler; 80 static void *acpi_irq_context; 81 static struct workqueue_struct *kacpid_wq; 82 static struct workqueue_struct *kacpi_notify_wq; 83 84 struct acpi_res_list { 85 resource_size_t start; 86 resource_size_t end; 87 acpi_adr_space_type resource_type; /* IO port, System memory, ...*/ 88 char name[5]; /* only can have a length of 4 chars, make use of this 89 one instead of res->name, no need to kalloc then */ 90 struct list_head resource_list; 91 }; 92 93 static LIST_HEAD(resource_list_head); 94 static DEFINE_SPINLOCK(acpi_res_lock); 95 96 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ 97 static char osi_additional_string[OSI_STRING_LENGTH_MAX]; 98 99 /* 100 * "Ode to _OSI(Linux)" 101 * 102 * osi_linux -- Control response to BIOS _OSI(Linux) query. 103 * 104 * As Linux evolves, the features that it supports change. 105 * So an OSI string such as "Linux" is not specific enough 106 * to be useful across multiple versions of Linux. It 107 * doesn't identify any particular feature, interface, 108 * or even any particular version of Linux... 109 * 110 * Unfortunately, Linux-2.6.22 and earlier responded "yes" 111 * to a BIOS _OSI(Linux) query. When 112 * a reference mobile BIOS started using it, its use 113 * started to spread to many vendor platforms. 114 * As it is not supportable, we need to halt that spread. 115 * 116 * Today, most BIOS references to _OSI(Linux) are noise -- 117 * they have no functional effect and are just dead code 118 * carried over from the reference BIOS. 119 * 120 * The next most common case is that _OSI(Linux) harms Linux, 121 * usually by causing the BIOS to follow paths that are 122 * not tested during Windows validation. 123 * 124 * Finally, there is a short list of platforms 125 * where OSI(Linux) benefits Linux. 126 * 127 * In Linux-2.6.23, OSI(Linux) is first disabled by default. 128 * DMI is used to disable the dmesg warning about OSI(Linux) 129 * on platforms where it is known to have no effect. 130 * But a dmesg warning remains for systems where 131 * we do not know if OSI(Linux) is good or bad for the system. 132 * DMI is also used to enable OSI(Linux) for the machines 133 * that are known to need it. 134 * 135 * BIOS writers should NOT query _OSI(Linux) on future systems. 136 * It will be ignored by default, and to get Linux to 137 * not ignore it will require a kernel source update to 138 * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation. 139 */ 140 #define OSI_LINUX_ENABLE 0 141 142 static struct osi_linux { 143 unsigned int enable:1; 144 unsigned int dmi:1; 145 unsigned int cmdline:1; 146 unsigned int known:1; 147 } osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0}; 148 149 static void __init acpi_request_region (struct acpi_generic_address *addr, 150 unsigned int length, char *desc) 151 { 152 struct resource *res; 153 154 if (!addr->address || !length) 155 return; 156 157 if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 158 res = request_region(addr->address, length, desc); 159 else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 160 res = request_mem_region(addr->address, length, desc); 161 } 162 163 static int __init acpi_reserve_resources(void) 164 { 165 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 166 "ACPI PM1a_EVT_BLK"); 167 168 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 169 "ACPI PM1b_EVT_BLK"); 170 171 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 172 "ACPI PM1a_CNT_BLK"); 173 174 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 175 "ACPI PM1b_CNT_BLK"); 176 177 if (acpi_gbl_FADT.pm_timer_length == 4) 178 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 179 180 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 181 "ACPI PM2_CNT_BLK"); 182 183 /* Length of GPE blocks must be a non-negative multiple of 2 */ 184 185 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 186 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 187 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 188 189 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 190 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 191 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 192 193 return 0; 194 } 195 device_initcall(acpi_reserve_resources); 196 197 acpi_status __init acpi_os_initialize(void) 198 { 199 return AE_OK; 200 } 201 202 acpi_status acpi_os_initialize1(void) 203 { 204 kacpid_wq = create_singlethread_workqueue("kacpid"); 205 kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); 206 BUG_ON(!kacpid_wq); 207 BUG_ON(!kacpi_notify_wq); 208 return AE_OK; 209 } 210 211 acpi_status acpi_os_terminate(void) 212 { 213 if (acpi_irq_handler) { 214 acpi_os_remove_interrupt_handler(acpi_irq_irq, 215 acpi_irq_handler); 216 } 217 218 destroy_workqueue(kacpid_wq); 219 destroy_workqueue(kacpi_notify_wq); 220 221 return AE_OK; 222 } 223 224 void acpi_os_printf(const char *fmt, ...) 225 { 226 va_list args; 227 va_start(args, fmt); 228 acpi_os_vprintf(fmt, args); 229 va_end(args); 230 } 231 232 void acpi_os_vprintf(const char *fmt, va_list args) 233 { 234 static char buffer[512]; 235 236 vsprintf(buffer, fmt, args); 237 238 #ifdef ENABLE_DEBUGGER 239 if (acpi_in_debugger) { 240 kdb_printf("%s", buffer); 241 } else { 242 printk("%s", buffer); 243 } 244 #else 245 printk("%s", buffer); 246 #endif 247 } 248 249 acpi_physical_address __init acpi_os_get_root_pointer(void) 250 { 251 if (efi_enabled) { 252 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 253 return efi.acpi20; 254 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 255 return efi.acpi; 256 else { 257 printk(KERN_ERR PREFIX 258 "System description tables not found\n"); 259 return 0; 260 } 261 } else { 262 acpi_physical_address pa = 0; 263 264 acpi_find_root_pointer(&pa); 265 return pa; 266 } 267 } 268 269 void __iomem *__init_refok 270 acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 271 { 272 if (phys > ULONG_MAX) { 273 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 274 return NULL; 275 } 276 if (acpi_gbl_permanent_mmap) 277 /* 278 * ioremap checks to ensure this is in reserved space 279 */ 280 return ioremap((unsigned long)phys, size); 281 else 282 return __acpi_map_table((unsigned long)phys, size); 283 } 284 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 285 286 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size) 287 { 288 if (acpi_gbl_permanent_mmap) { 289 iounmap(virt); 290 } 291 } 292 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 293 294 #ifdef ACPI_FUTURE_USAGE 295 acpi_status 296 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 297 { 298 if (!phys || !virt) 299 return AE_BAD_PARAMETER; 300 301 *phys = virt_to_phys(virt); 302 303 return AE_OK; 304 } 305 #endif 306 307 #define ACPI_MAX_OVERRIDE_LEN 100 308 309 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 310 311 acpi_status 312 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 313 acpi_string * new_val) 314 { 315 if (!init_val || !new_val) 316 return AE_BAD_PARAMETER; 317 318 *new_val = NULL; 319 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 320 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 321 acpi_os_name); 322 *new_val = acpi_os_name; 323 } 324 325 return AE_OK; 326 } 327 328 acpi_status 329 acpi_os_table_override(struct acpi_table_header * existing_table, 330 struct acpi_table_header ** new_table) 331 { 332 if (!existing_table || !new_table) 333 return AE_BAD_PARAMETER; 334 335 *new_table = NULL; 336 337 #ifdef CONFIG_ACPI_CUSTOM_DSDT 338 if (strncmp(existing_table->signature, "DSDT", 4) == 0) 339 *new_table = (struct acpi_table_header *)AmlCode; 340 #endif 341 if (*new_table != NULL) { 342 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], " 343 "this is unsafe: tainting kernel\n", 344 existing_table->signature, 345 existing_table->oem_table_id); 346 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); 347 } 348 return AE_OK; 349 } 350 351 static irqreturn_t acpi_irq(int irq, void *dev_id) 352 { 353 u32 handled; 354 355 handled = (*acpi_irq_handler) (acpi_irq_context); 356 357 if (handled) { 358 acpi_irq_handled++; 359 return IRQ_HANDLED; 360 } else 361 return IRQ_NONE; 362 } 363 364 acpi_status 365 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 366 void *context) 367 { 368 unsigned int irq; 369 370 acpi_irq_stats_init(); 371 372 /* 373 * Ignore the GSI from the core, and use the value in our copy of the 374 * FADT. It may not be the same if an interrupt source override exists 375 * for the SCI. 376 */ 377 gsi = acpi_gbl_FADT.sci_interrupt; 378 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 379 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 380 gsi); 381 return AE_OK; 382 } 383 384 acpi_irq_handler = handler; 385 acpi_irq_context = context; 386 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 387 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 388 return AE_NOT_ACQUIRED; 389 } 390 acpi_irq_irq = irq; 391 392 return AE_OK; 393 } 394 395 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler) 396 { 397 if (irq) { 398 free_irq(irq, acpi_irq); 399 acpi_irq_handler = NULL; 400 acpi_irq_irq = 0; 401 } 402 403 return AE_OK; 404 } 405 406 /* 407 * Running in interpreter thread context, safe to sleep 408 */ 409 410 void acpi_os_sleep(acpi_integer ms) 411 { 412 schedule_timeout_interruptible(msecs_to_jiffies(ms)); 413 } 414 415 void acpi_os_stall(u32 us) 416 { 417 while (us) { 418 u32 delay = 1000; 419 420 if (delay > us) 421 delay = us; 422 udelay(delay); 423 touch_nmi_watchdog(); 424 us -= delay; 425 } 426 } 427 428 /* 429 * Support ACPI 3.0 AML Timer operand 430 * Returns 64-bit free-running, monotonically increasing timer 431 * with 100ns granularity 432 */ 433 u64 acpi_os_get_timer(void) 434 { 435 static u64 t; 436 437 #ifdef CONFIG_HPET 438 /* TBD: use HPET if available */ 439 #endif 440 441 #ifdef CONFIG_X86_PM_TIMER 442 /* TBD: default to PM timer if HPET was not available */ 443 #endif 444 if (!t) 445 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n"); 446 447 return ++t; 448 } 449 450 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 451 { 452 u32 dummy; 453 454 if (!value) 455 value = &dummy; 456 457 *value = 0; 458 if (width <= 8) { 459 *(u8 *) value = inb(port); 460 } else if (width <= 16) { 461 *(u16 *) value = inw(port); 462 } else if (width <= 32) { 463 *(u32 *) value = inl(port); 464 } else { 465 BUG(); 466 } 467 468 return AE_OK; 469 } 470 471 EXPORT_SYMBOL(acpi_os_read_port); 472 473 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 474 { 475 if (width <= 8) { 476 outb(value, port); 477 } else if (width <= 16) { 478 outw(value, port); 479 } else if (width <= 32) { 480 outl(value, port); 481 } else { 482 BUG(); 483 } 484 485 return AE_OK; 486 } 487 488 EXPORT_SYMBOL(acpi_os_write_port); 489 490 acpi_status 491 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) 492 { 493 u32 dummy; 494 void __iomem *virt_addr; 495 496 virt_addr = ioremap(phys_addr, width); 497 if (!value) 498 value = &dummy; 499 500 switch (width) { 501 case 8: 502 *(u8 *) value = readb(virt_addr); 503 break; 504 case 16: 505 *(u16 *) value = readw(virt_addr); 506 break; 507 case 32: 508 *(u32 *) value = readl(virt_addr); 509 break; 510 default: 511 BUG(); 512 } 513 514 iounmap(virt_addr); 515 516 return AE_OK; 517 } 518 519 acpi_status 520 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) 521 { 522 void __iomem *virt_addr; 523 524 virt_addr = ioremap(phys_addr, width); 525 526 switch (width) { 527 case 8: 528 writeb(value, virt_addr); 529 break; 530 case 16: 531 writew(value, virt_addr); 532 break; 533 case 32: 534 writel(value, virt_addr); 535 break; 536 default: 537 BUG(); 538 } 539 540 iounmap(virt_addr); 541 542 return AE_OK; 543 } 544 545 acpi_status 546 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 547 u32 *value, u32 width) 548 { 549 int result, size; 550 551 if (!value) 552 return AE_BAD_PARAMETER; 553 554 switch (width) { 555 case 8: 556 size = 1; 557 break; 558 case 16: 559 size = 2; 560 break; 561 case 32: 562 size = 4; 563 break; 564 default: 565 return AE_ERROR; 566 } 567 568 result = raw_pci_read(pci_id->segment, pci_id->bus, 569 PCI_DEVFN(pci_id->device, pci_id->function), 570 reg, size, value); 571 572 return (result ? AE_ERROR : AE_OK); 573 } 574 575 acpi_status 576 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 577 acpi_integer value, u32 width) 578 { 579 int result, size; 580 581 switch (width) { 582 case 8: 583 size = 1; 584 break; 585 case 16: 586 size = 2; 587 break; 588 case 32: 589 size = 4; 590 break; 591 default: 592 return AE_ERROR; 593 } 594 595 result = raw_pci_write(pci_id->segment, pci_id->bus, 596 PCI_DEVFN(pci_id->device, pci_id->function), 597 reg, size, value); 598 599 return (result ? AE_ERROR : AE_OK); 600 } 601 602 /* TODO: Change code to take advantage of driver model more */ 603 static void acpi_os_derive_pci_id_2(acpi_handle rhandle, /* upper bound */ 604 acpi_handle chandle, /* current node */ 605 struct acpi_pci_id **id, 606 int *is_bridge, u8 * bus_number) 607 { 608 acpi_handle handle; 609 struct acpi_pci_id *pci_id = *id; 610 acpi_status status; 611 unsigned long long temp; 612 acpi_object_type type; 613 614 acpi_get_parent(chandle, &handle); 615 if (handle != rhandle) { 616 acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge, 617 bus_number); 618 619 status = acpi_get_type(handle, &type); 620 if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE)) 621 return; 622 623 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, 624 &temp); 625 if (ACPI_SUCCESS(status)) { 626 u32 val; 627 pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp)); 628 pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp)); 629 630 if (*is_bridge) 631 pci_id->bus = *bus_number; 632 633 /* any nicer way to get bus number of bridge ? */ 634 status = 635 acpi_os_read_pci_configuration(pci_id, 0x0e, &val, 636 8); 637 if (ACPI_SUCCESS(status) 638 && ((val & 0x7f) == 1 || (val & 0x7f) == 2)) { 639 status = 640 acpi_os_read_pci_configuration(pci_id, 0x18, 641 &val, 8); 642 if (!ACPI_SUCCESS(status)) { 643 /* Certainly broken... FIX ME */ 644 return; 645 } 646 *is_bridge = 1; 647 pci_id->bus = val; 648 status = 649 acpi_os_read_pci_configuration(pci_id, 0x19, 650 &val, 8); 651 if (ACPI_SUCCESS(status)) { 652 *bus_number = val; 653 } 654 } else 655 *is_bridge = 0; 656 } 657 } 658 } 659 660 void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */ 661 acpi_handle chandle, /* current node */ 662 struct acpi_pci_id **id) 663 { 664 int is_bridge = 1; 665 u8 bus_number = (*id)->bus; 666 667 acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); 668 } 669 670 static void acpi_os_execute_deferred(struct work_struct *work) 671 { 672 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 673 if (!dpc) { 674 printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); 675 return; 676 } 677 678 dpc->function(dpc->context); 679 kfree(dpc); 680 681 return; 682 } 683 684 static void acpi_os_execute_hp_deferred(struct work_struct *work) 685 { 686 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 687 if (!dpc) { 688 printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); 689 return; 690 } 691 692 acpi_os_wait_events_complete(NULL); 693 694 dpc->function(dpc->context); 695 kfree(dpc); 696 697 return; 698 } 699 700 /******************************************************************************* 701 * 702 * FUNCTION: acpi_os_execute 703 * 704 * PARAMETERS: Type - Type of the callback 705 * Function - Function to be executed 706 * Context - Function parameters 707 * 708 * RETURN: Status 709 * 710 * DESCRIPTION: Depending on type, either queues function for deferred execution or 711 * immediately executes function on a separate thread. 712 * 713 ******************************************************************************/ 714 715 static acpi_status __acpi_os_execute(acpi_execute_type type, 716 acpi_osd_exec_callback function, void *context, int hp) 717 { 718 acpi_status status = AE_OK; 719 struct acpi_os_dpc *dpc; 720 struct workqueue_struct *queue; 721 int ret; 722 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 723 "Scheduling function [%p(%p)] for deferred execution.\n", 724 function, context)); 725 726 if (!function) 727 return AE_BAD_PARAMETER; 728 729 /* 730 * Allocate/initialize DPC structure. Note that this memory will be 731 * freed by the callee. The kernel handles the work_struct list in a 732 * way that allows us to also free its memory inside the callee. 733 * Because we may want to schedule several tasks with different 734 * parameters we can't use the approach some kernel code uses of 735 * having a static work_struct. 736 */ 737 738 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 739 if (!dpc) 740 return_ACPI_STATUS(AE_NO_MEMORY); 741 742 dpc->function = function; 743 dpc->context = context; 744 745 if (!hp) { 746 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 747 queue = (type == OSL_NOTIFY_HANDLER) ? 748 kacpi_notify_wq : kacpid_wq; 749 ret = queue_work(queue, &dpc->work); 750 } else { 751 INIT_WORK(&dpc->work, acpi_os_execute_hp_deferred); 752 ret = schedule_work(&dpc->work); 753 } 754 755 if (!ret) { 756 printk(KERN_ERR PREFIX 757 "Call to queue_work() failed.\n"); 758 status = AE_ERROR; 759 kfree(dpc); 760 } 761 return_ACPI_STATUS(status); 762 } 763 764 acpi_status acpi_os_execute(acpi_execute_type type, 765 acpi_osd_exec_callback function, void *context) 766 { 767 return __acpi_os_execute(type, function, context, 0); 768 } 769 EXPORT_SYMBOL(acpi_os_execute); 770 771 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function, 772 void *context) 773 { 774 return __acpi_os_execute(0, function, context, 1); 775 } 776 777 void acpi_os_wait_events_complete(void *context) 778 { 779 flush_workqueue(kacpid_wq); 780 flush_workqueue(kacpi_notify_wq); 781 } 782 783 EXPORT_SYMBOL(acpi_os_wait_events_complete); 784 785 /* 786 * Allocate the memory for a spinlock and initialize it. 787 */ 788 acpi_status acpi_os_create_lock(acpi_spinlock * handle) 789 { 790 spin_lock_init(*handle); 791 792 return AE_OK; 793 } 794 795 /* 796 * Deallocate the memory for a spinlock. 797 */ 798 void acpi_os_delete_lock(acpi_spinlock handle) 799 { 800 return; 801 } 802 803 acpi_status 804 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 805 { 806 struct semaphore *sem = NULL; 807 808 sem = acpi_os_allocate(sizeof(struct semaphore)); 809 if (!sem) 810 return AE_NO_MEMORY; 811 memset(sem, 0, sizeof(struct semaphore)); 812 813 sema_init(sem, initial_units); 814 815 *handle = (acpi_handle *) sem; 816 817 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 818 *handle, initial_units)); 819 820 return AE_OK; 821 } 822 823 /* 824 * TODO: A better way to delete semaphores? Linux doesn't have a 825 * 'delete_semaphore()' function -- may result in an invalid 826 * pointer dereference for non-synchronized consumers. Should 827 * we at least check for blocked threads and signal/cancel them? 828 */ 829 830 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 831 { 832 struct semaphore *sem = (struct semaphore *)handle; 833 834 if (!sem) 835 return AE_BAD_PARAMETER; 836 837 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 838 839 BUG_ON(!list_empty(&sem->wait_list)); 840 kfree(sem); 841 sem = NULL; 842 843 return AE_OK; 844 } 845 846 /* 847 * TODO: Support for units > 1? 848 */ 849 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 850 { 851 acpi_status status = AE_OK; 852 struct semaphore *sem = (struct semaphore *)handle; 853 long jiffies; 854 int ret = 0; 855 856 if (!sem || (units < 1)) 857 return AE_BAD_PARAMETER; 858 859 if (units > 1) 860 return AE_SUPPORT; 861 862 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 863 handle, units, timeout)); 864 865 if (timeout == ACPI_WAIT_FOREVER) 866 jiffies = MAX_SCHEDULE_TIMEOUT; 867 else 868 jiffies = msecs_to_jiffies(timeout); 869 870 ret = down_timeout(sem, jiffies); 871 if (ret) 872 status = AE_TIME; 873 874 if (ACPI_FAILURE(status)) { 875 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 876 "Failed to acquire semaphore[%p|%d|%d], %s", 877 handle, units, timeout, 878 acpi_format_exception(status))); 879 } else { 880 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 881 "Acquired semaphore[%p|%d|%d]", handle, 882 units, timeout)); 883 } 884 885 return status; 886 } 887 888 /* 889 * TODO: Support for units > 1? 890 */ 891 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 892 { 893 struct semaphore *sem = (struct semaphore *)handle; 894 895 if (!sem || (units < 1)) 896 return AE_BAD_PARAMETER; 897 898 if (units > 1) 899 return AE_SUPPORT; 900 901 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 902 units)); 903 904 up(sem); 905 906 return AE_OK; 907 } 908 909 #ifdef ACPI_FUTURE_USAGE 910 u32 acpi_os_get_line(char *buffer) 911 { 912 913 #ifdef ENABLE_DEBUGGER 914 if (acpi_in_debugger) { 915 u32 chars; 916 917 kdb_read(buffer, sizeof(line_buf)); 918 919 /* remove the CR kdb includes */ 920 chars = strlen(buffer) - 1; 921 buffer[chars] = '\0'; 922 } 923 #endif 924 925 return 0; 926 } 927 #endif /* ACPI_FUTURE_USAGE */ 928 929 acpi_status acpi_os_signal(u32 function, void *info) 930 { 931 switch (function) { 932 case ACPI_SIGNAL_FATAL: 933 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 934 break; 935 case ACPI_SIGNAL_BREAKPOINT: 936 /* 937 * AML Breakpoint 938 * ACPI spec. says to treat it as a NOP unless 939 * you are debugging. So if/when we integrate 940 * AML debugger into the kernel debugger its 941 * hook will go here. But until then it is 942 * not useful to print anything on breakpoints. 943 */ 944 break; 945 default: 946 break; 947 } 948 949 return AE_OK; 950 } 951 952 static int __init acpi_os_name_setup(char *str) 953 { 954 char *p = acpi_os_name; 955 int count = ACPI_MAX_OVERRIDE_LEN - 1; 956 957 if (!str || !*str) 958 return 0; 959 960 for (; count-- && str && *str; str++) { 961 if (isalnum(*str) || *str == ' ' || *str == ':') 962 *p++ = *str; 963 else if (*str == '\'' || *str == '"') 964 continue; 965 else 966 break; 967 } 968 *p = 0; 969 970 return 1; 971 972 } 973 974 __setup("acpi_os_name=", acpi_os_name_setup); 975 976 static void __init set_osi_linux(unsigned int enable) 977 { 978 if (osi_linux.enable != enable) { 979 osi_linux.enable = enable; 980 printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n", 981 enable ? "Add": "Delet"); 982 } 983 return; 984 } 985 986 static void __init acpi_cmdline_osi_linux(unsigned int enable) 987 { 988 osi_linux.cmdline = 1; /* cmdline set the default */ 989 set_osi_linux(enable); 990 991 return; 992 } 993 994 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) 995 { 996 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ 997 998 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 999 1000 if (enable == -1) 1001 return; 1002 1003 osi_linux.known = 1; /* DMI knows which OSI(Linux) default needed */ 1004 1005 set_osi_linux(enable); 1006 1007 return; 1008 } 1009 1010 /* 1011 * Modify the list of "OS Interfaces" reported to BIOS via _OSI 1012 * 1013 * empty string disables _OSI 1014 * string starting with '!' disables that string 1015 * otherwise string is added to list, augmenting built-in strings 1016 */ 1017 int __init acpi_osi_setup(char *str) 1018 { 1019 if (str == NULL || *str == '\0') { 1020 printk(KERN_INFO PREFIX "_OSI method disabled\n"); 1021 acpi_gbl_create_osi_method = FALSE; 1022 } else if (!strcmp("!Linux", str)) { 1023 acpi_cmdline_osi_linux(0); /* !enable */ 1024 } else if (*str == '!') { 1025 if (acpi_osi_invalidate(++str) == AE_OK) 1026 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); 1027 } else if (!strcmp("Linux", str)) { 1028 acpi_cmdline_osi_linux(1); /* enable */ 1029 } else if (*osi_additional_string == '\0') { 1030 strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX); 1031 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); 1032 } 1033 1034 return 1; 1035 } 1036 1037 __setup("acpi_osi=", acpi_osi_setup); 1038 1039 /* enable serialization to combat AE_ALREADY_EXISTS errors */ 1040 static int __init acpi_serialize_setup(char *str) 1041 { 1042 printk(KERN_INFO PREFIX "serialize enabled\n"); 1043 1044 acpi_gbl_all_methods_serialized = TRUE; 1045 1046 return 1; 1047 } 1048 1049 __setup("acpi_serialize", acpi_serialize_setup); 1050 1051 /* 1052 * Wake and Run-Time GPES are expected to be separate. 1053 * We disable wake-GPEs at run-time to prevent spurious 1054 * interrupts. 1055 * 1056 * However, if a system exists that shares Wake and 1057 * Run-time events on the same GPE this flag is available 1058 * to tell Linux to keep the wake-time GPEs enabled at run-time. 1059 */ 1060 static int __init acpi_wake_gpes_always_on_setup(char *str) 1061 { 1062 printk(KERN_INFO PREFIX "wake GPEs not disabled\n"); 1063 1064 acpi_gbl_leave_wake_gpes_disabled = FALSE; 1065 1066 return 1; 1067 } 1068 1069 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup); 1070 1071 /* Check of resource interference between native drivers and ACPI 1072 * OperationRegions (SystemIO and System Memory only). 1073 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1074 * in arbitrary AML code and can interfere with legacy drivers. 1075 * acpi_enforce_resources= can be set to: 1076 * 1077 * - strict (2) 1078 * -> further driver trying to access the resources will not load 1079 * - lax (default) (1) 1080 * -> further driver trying to access the resources will load, but you 1081 * get a system message that something might go wrong... 1082 * 1083 * - no (0) 1084 * -> ACPI Operation Region resources will not be registered 1085 * 1086 */ 1087 #define ENFORCE_RESOURCES_STRICT 2 1088 #define ENFORCE_RESOURCES_LAX 1 1089 #define ENFORCE_RESOURCES_NO 0 1090 1091 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1092 1093 static int __init acpi_enforce_resources_setup(char *str) 1094 { 1095 if (str == NULL || *str == '\0') 1096 return 0; 1097 1098 if (!strcmp("strict", str)) 1099 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1100 else if (!strcmp("lax", str)) 1101 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1102 else if (!strcmp("no", str)) 1103 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1104 1105 return 1; 1106 } 1107 1108 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1109 1110 /* Check for resource conflicts between ACPI OperationRegions and native 1111 * drivers */ 1112 int acpi_check_resource_conflict(struct resource *res) 1113 { 1114 struct acpi_res_list *res_list_elem; 1115 int ioport; 1116 int clash = 0; 1117 1118 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1119 return 0; 1120 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1121 return 0; 1122 1123 ioport = res->flags & IORESOURCE_IO; 1124 1125 spin_lock(&acpi_res_lock); 1126 list_for_each_entry(res_list_elem, &resource_list_head, 1127 resource_list) { 1128 if (ioport && (res_list_elem->resource_type 1129 != ACPI_ADR_SPACE_SYSTEM_IO)) 1130 continue; 1131 if (!ioport && (res_list_elem->resource_type 1132 != ACPI_ADR_SPACE_SYSTEM_MEMORY)) 1133 continue; 1134 1135 if (res->end < res_list_elem->start 1136 || res_list_elem->end < res->start) 1137 continue; 1138 clash = 1; 1139 break; 1140 } 1141 spin_unlock(&acpi_res_lock); 1142 1143 if (clash) { 1144 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1145 printk("%sACPI: %s resource %s [0x%llx-0x%llx]" 1146 " conflicts with ACPI region %s" 1147 " [0x%llx-0x%llx]\n", 1148 acpi_enforce_resources == ENFORCE_RESOURCES_LAX 1149 ? KERN_WARNING : KERN_ERR, 1150 ioport ? "I/O" : "Memory", res->name, 1151 (long long) res->start, (long long) res->end, 1152 res_list_elem->name, 1153 (long long) res_list_elem->start, 1154 (long long) res_list_elem->end); 1155 printk(KERN_INFO "ACPI: Device needs an ACPI driver\n"); 1156 } 1157 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1158 return -EBUSY; 1159 } 1160 return 0; 1161 } 1162 EXPORT_SYMBOL(acpi_check_resource_conflict); 1163 1164 int acpi_check_region(resource_size_t start, resource_size_t n, 1165 const char *name) 1166 { 1167 struct resource res = { 1168 .start = start, 1169 .end = start + n - 1, 1170 .name = name, 1171 .flags = IORESOURCE_IO, 1172 }; 1173 1174 return acpi_check_resource_conflict(&res); 1175 } 1176 EXPORT_SYMBOL(acpi_check_region); 1177 1178 int acpi_check_mem_region(resource_size_t start, resource_size_t n, 1179 const char *name) 1180 { 1181 struct resource res = { 1182 .start = start, 1183 .end = start + n - 1, 1184 .name = name, 1185 .flags = IORESOURCE_MEM, 1186 }; 1187 1188 return acpi_check_resource_conflict(&res); 1189 1190 } 1191 EXPORT_SYMBOL(acpi_check_mem_region); 1192 1193 /* 1194 * Acquire a spinlock. 1195 * 1196 * handle is a pointer to the spinlock_t. 1197 */ 1198 1199 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1200 { 1201 acpi_cpu_flags flags; 1202 spin_lock_irqsave(lockp, flags); 1203 return flags; 1204 } 1205 1206 /* 1207 * Release a spinlock. See above. 1208 */ 1209 1210 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1211 { 1212 spin_unlock_irqrestore(lockp, flags); 1213 } 1214 1215 #ifndef ACPI_USE_LOCAL_CACHE 1216 1217 /******************************************************************************* 1218 * 1219 * FUNCTION: acpi_os_create_cache 1220 * 1221 * PARAMETERS: name - Ascii name for the cache 1222 * size - Size of each cached object 1223 * depth - Maximum depth of the cache (in objects) <ignored> 1224 * cache - Where the new cache object is returned 1225 * 1226 * RETURN: status 1227 * 1228 * DESCRIPTION: Create a cache object 1229 * 1230 ******************************************************************************/ 1231 1232 acpi_status 1233 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1234 { 1235 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1236 if (*cache == NULL) 1237 return AE_ERROR; 1238 else 1239 return AE_OK; 1240 } 1241 1242 /******************************************************************************* 1243 * 1244 * FUNCTION: acpi_os_purge_cache 1245 * 1246 * PARAMETERS: Cache - Handle to cache object 1247 * 1248 * RETURN: Status 1249 * 1250 * DESCRIPTION: Free all objects within the requested cache. 1251 * 1252 ******************************************************************************/ 1253 1254 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1255 { 1256 kmem_cache_shrink(cache); 1257 return (AE_OK); 1258 } 1259 1260 /******************************************************************************* 1261 * 1262 * FUNCTION: acpi_os_delete_cache 1263 * 1264 * PARAMETERS: Cache - Handle to cache object 1265 * 1266 * RETURN: Status 1267 * 1268 * DESCRIPTION: Free all objects within the requested cache and delete the 1269 * cache object. 1270 * 1271 ******************************************************************************/ 1272 1273 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1274 { 1275 kmem_cache_destroy(cache); 1276 return (AE_OK); 1277 } 1278 1279 /******************************************************************************* 1280 * 1281 * FUNCTION: acpi_os_release_object 1282 * 1283 * PARAMETERS: Cache - Handle to cache object 1284 * Object - The object to be released 1285 * 1286 * RETURN: None 1287 * 1288 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1289 * the object is deleted. 1290 * 1291 ******************************************************************************/ 1292 1293 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1294 { 1295 kmem_cache_free(cache, object); 1296 return (AE_OK); 1297 } 1298 1299 /** 1300 * acpi_dmi_dump - dump DMI slots needed for blacklist entry 1301 * 1302 * Returns 0 on success 1303 */ 1304 static int acpi_dmi_dump(void) 1305 { 1306 1307 if (!dmi_available) 1308 return -1; 1309 1310 printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n", 1311 dmi_get_system_info(DMI_SYS_VENDOR)); 1312 printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n", 1313 dmi_get_system_info(DMI_PRODUCT_NAME)); 1314 printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n", 1315 dmi_get_system_info(DMI_PRODUCT_VERSION)); 1316 printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n", 1317 dmi_get_system_info(DMI_BOARD_NAME)); 1318 printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n", 1319 dmi_get_system_info(DMI_BIOS_VENDOR)); 1320 printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n", 1321 dmi_get_system_info(DMI_BIOS_DATE)); 1322 1323 return 0; 1324 } 1325 1326 1327 /****************************************************************************** 1328 * 1329 * FUNCTION: acpi_os_validate_interface 1330 * 1331 * PARAMETERS: interface - Requested interface to be validated 1332 * 1333 * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise 1334 * 1335 * DESCRIPTION: Match an interface string to the interfaces supported by the 1336 * host. Strings originate from an AML call to the _OSI method. 1337 * 1338 *****************************************************************************/ 1339 1340 acpi_status 1341 acpi_os_validate_interface (char *interface) 1342 { 1343 if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX)) 1344 return AE_OK; 1345 if (!strcmp("Linux", interface)) { 1346 1347 printk(KERN_NOTICE PREFIX 1348 "BIOS _OSI(Linux) query %s%s\n", 1349 osi_linux.enable ? "honored" : "ignored", 1350 osi_linux.cmdline ? " via cmdline" : 1351 osi_linux.dmi ? " via DMI" : ""); 1352 1353 if (!osi_linux.dmi) { 1354 if (acpi_dmi_dump()) 1355 printk(KERN_NOTICE PREFIX 1356 "[please extract dmidecode output]\n"); 1357 printk(KERN_NOTICE PREFIX 1358 "Please send DMI info above to " 1359 "linux-acpi@vger.kernel.org\n"); 1360 } 1361 if (!osi_linux.known && !osi_linux.cmdline) { 1362 printk(KERN_NOTICE PREFIX 1363 "If \"acpi_osi=%sLinux\" works better, " 1364 "please notify linux-acpi@vger.kernel.org\n", 1365 osi_linux.enable ? "!" : ""); 1366 } 1367 1368 if (osi_linux.enable) 1369 return AE_OK; 1370 } 1371 return AE_SUPPORT; 1372 } 1373 1374 /****************************************************************************** 1375 * 1376 * FUNCTION: acpi_os_validate_address 1377 * 1378 * PARAMETERS: space_id - ACPI space ID 1379 * address - Physical address 1380 * length - Address length 1381 * 1382 * RETURN: AE_OK if address/length is valid for the space_id. Otherwise, 1383 * should return AE_AML_ILLEGAL_ADDRESS. 1384 * 1385 * DESCRIPTION: Validate a system address via the host OS. Used to validate 1386 * the addresses accessed by AML operation regions. 1387 * 1388 *****************************************************************************/ 1389 1390 acpi_status 1391 acpi_os_validate_address ( 1392 u8 space_id, 1393 acpi_physical_address address, 1394 acpi_size length, 1395 char *name) 1396 { 1397 struct acpi_res_list *res; 1398 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1399 return AE_OK; 1400 1401 switch (space_id) { 1402 case ACPI_ADR_SPACE_SYSTEM_IO: 1403 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 1404 /* Only interference checks against SystemIO and SytemMemory 1405 are needed */ 1406 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL); 1407 if (!res) 1408 return AE_OK; 1409 /* ACPI names are fixed to 4 bytes, still better use strlcpy */ 1410 strlcpy(res->name, name, 5); 1411 res->start = address; 1412 res->end = address + length - 1; 1413 res->resource_type = space_id; 1414 spin_lock(&acpi_res_lock); 1415 list_add(&res->resource_list, &resource_list_head); 1416 spin_unlock(&acpi_res_lock); 1417 pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, " 1418 "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO) 1419 ? "SystemIO" : "System Memory", 1420 (unsigned long long)res->start, 1421 (unsigned long long)res->end, 1422 res->name); 1423 break; 1424 case ACPI_ADR_SPACE_PCI_CONFIG: 1425 case ACPI_ADR_SPACE_EC: 1426 case ACPI_ADR_SPACE_SMBUS: 1427 case ACPI_ADR_SPACE_CMOS: 1428 case ACPI_ADR_SPACE_PCI_BAR_TARGET: 1429 case ACPI_ADR_SPACE_DATA_TABLE: 1430 case ACPI_ADR_SPACE_FIXED_HARDWARE: 1431 break; 1432 } 1433 return AE_OK; 1434 } 1435 1436 #endif 1437