1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (c) 2008 Intel Corporation 8 * Author: Matthew Wilcox <willy@linux.intel.com> 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 */ 25 26 #include <linux/module.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/mm.h> 30 #include <linux/highmem.h> 31 #include <linux/pci.h> 32 #include <linux/interrupt.h> 33 #include <linux/kmod.h> 34 #include <linux/delay.h> 35 #include <linux/workqueue.h> 36 #include <linux/nmi.h> 37 #include <linux/acpi.h> 38 #include <linux/efi.h> 39 #include <linux/ioport.h> 40 #include <linux/list.h> 41 #include <linux/jiffies.h> 42 #include <linux/semaphore.h> 43 44 #include <asm/io.h> 45 #include <asm/uaccess.h> 46 #include <linux/io-64-nonatomic-lo-hi.h> 47 48 #include "internal.h" 49 50 #define _COMPONENT ACPI_OS_SERVICES 51 ACPI_MODULE_NAME("osl"); 52 53 struct acpi_os_dpc { 54 acpi_osd_exec_callback function; 55 void *context; 56 struct work_struct work; 57 }; 58 59 #ifdef CONFIG_ACPI_CUSTOM_DSDT 60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE 61 #endif 62 63 #ifdef ENABLE_DEBUGGER 64 #include <linux/kdb.h> 65 66 /* stuff for debugger support */ 67 int acpi_in_debugger; 68 EXPORT_SYMBOL(acpi_in_debugger); 69 #endif /*ENABLE_DEBUGGER */ 70 71 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 72 u32 pm1b_ctrl); 73 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 74 u32 val_b); 75 76 static acpi_osd_handler acpi_irq_handler; 77 static void *acpi_irq_context; 78 static struct workqueue_struct *kacpid_wq; 79 static struct workqueue_struct *kacpi_notify_wq; 80 static struct workqueue_struct *kacpi_hotplug_wq; 81 static bool acpi_os_initialized; 82 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 83 84 /* 85 * This list of permanent mappings is for memory that may be accessed from 86 * interrupt context, where we can't do the ioremap(). 87 */ 88 struct acpi_ioremap { 89 struct list_head list; 90 void __iomem *virt; 91 acpi_physical_address phys; 92 acpi_size size; 93 unsigned long refcount; 94 }; 95 96 static LIST_HEAD(acpi_ioremaps); 97 static DEFINE_MUTEX(acpi_ioremap_lock); 98 99 static void __init acpi_osi_setup_late(void); 100 101 /* 102 * The story of _OSI(Linux) 103 * 104 * From pre-history through Linux-2.6.22, 105 * Linux responded TRUE upon a BIOS OSI(Linux) query. 106 * 107 * Unfortunately, reference BIOS writers got wind of this 108 * and put OSI(Linux) in their example code, quickly exposing 109 * this string as ill-conceived and opening the door to 110 * an un-bounded number of BIOS incompatibilities. 111 * 112 * For example, OSI(Linux) was used on resume to re-POST a 113 * video card on one system, because Linux at that time 114 * could not do a speedy restore in its native driver. 115 * But then upon gaining quick native restore capability, 116 * Linux has no way to tell the BIOS to skip the time-consuming 117 * POST -- putting Linux at a permanent performance disadvantage. 118 * On another system, the BIOS writer used OSI(Linux) 119 * to infer native OS support for IPMI! On other systems, 120 * OSI(Linux) simply got in the way of Linux claiming to 121 * be compatible with other operating systems, exposing 122 * BIOS issues such as skipped device initialization. 123 * 124 * So "Linux" turned out to be a really poor chose of 125 * OSI string, and from Linux-2.6.23 onward we respond FALSE. 126 * 127 * BIOS writers should NOT query _OSI(Linux) on future systems. 128 * Linux will complain on the console when it sees it, and return FALSE. 129 * To get Linux to return TRUE for your system will require 130 * a kernel source update to add a DMI entry, 131 * or boot with "acpi_osi=Linux" 132 */ 133 134 static struct osi_linux { 135 unsigned int enable:1; 136 unsigned int dmi:1; 137 unsigned int cmdline:1; 138 unsigned int default_disabling:1; 139 } osi_linux = {0, 0, 0, 0}; 140 141 static u32 acpi_osi_handler(acpi_string interface, u32 supported) 142 { 143 if (!strcmp("Linux", interface)) { 144 145 printk_once(KERN_NOTICE FW_BUG PREFIX 146 "BIOS _OSI(Linux) query %s%s\n", 147 osi_linux.enable ? "honored" : "ignored", 148 osi_linux.cmdline ? " via cmdline" : 149 osi_linux.dmi ? " via DMI" : ""); 150 } 151 152 if (!strcmp("Darwin", interface)) { 153 /* 154 * Apple firmware will behave poorly if it receives positive 155 * answers to "Darwin" and any other OS. Respond positively 156 * to Darwin and then disable all other vendor strings. 157 */ 158 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS); 159 supported = ACPI_UINT32_MAX; 160 } 161 162 return supported; 163 } 164 165 static void __init acpi_request_region (struct acpi_generic_address *gas, 166 unsigned int length, char *desc) 167 { 168 u64 addr; 169 170 /* Handle possible alignment issues */ 171 memcpy(&addr, &gas->address, sizeof(addr)); 172 if (!addr || !length) 173 return; 174 175 /* Resources are never freed */ 176 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 177 request_region(addr, length, desc); 178 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 179 request_mem_region(addr, length, desc); 180 } 181 182 static int __init acpi_reserve_resources(void) 183 { 184 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 185 "ACPI PM1a_EVT_BLK"); 186 187 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 188 "ACPI PM1b_EVT_BLK"); 189 190 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 191 "ACPI PM1a_CNT_BLK"); 192 193 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 194 "ACPI PM1b_CNT_BLK"); 195 196 if (acpi_gbl_FADT.pm_timer_length == 4) 197 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 198 199 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 200 "ACPI PM2_CNT_BLK"); 201 202 /* Length of GPE blocks must be a non-negative multiple of 2 */ 203 204 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 205 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 206 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 207 208 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 209 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 210 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 211 212 return 0; 213 } 214 fs_initcall_sync(acpi_reserve_resources); 215 216 void acpi_os_printf(const char *fmt, ...) 217 { 218 va_list args; 219 va_start(args, fmt); 220 acpi_os_vprintf(fmt, args); 221 va_end(args); 222 } 223 EXPORT_SYMBOL(acpi_os_printf); 224 225 void acpi_os_vprintf(const char *fmt, va_list args) 226 { 227 static char buffer[512]; 228 229 vsprintf(buffer, fmt, args); 230 231 #ifdef ENABLE_DEBUGGER 232 if (acpi_in_debugger) { 233 kdb_printf("%s", buffer); 234 } else { 235 printk(KERN_CONT "%s", buffer); 236 } 237 #else 238 if (acpi_debugger_write_log(buffer) < 0) 239 printk(KERN_CONT "%s", buffer); 240 #endif 241 } 242 243 #ifdef CONFIG_KEXEC 244 static unsigned long acpi_rsdp; 245 static int __init setup_acpi_rsdp(char *arg) 246 { 247 if (kstrtoul(arg, 16, &acpi_rsdp)) 248 return -EINVAL; 249 return 0; 250 } 251 early_param("acpi_rsdp", setup_acpi_rsdp); 252 #endif 253 254 acpi_physical_address __init acpi_os_get_root_pointer(void) 255 { 256 #ifdef CONFIG_KEXEC 257 if (acpi_rsdp) 258 return acpi_rsdp; 259 #endif 260 261 if (efi_enabled(EFI_CONFIG_TABLES)) { 262 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 263 return efi.acpi20; 264 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 265 return efi.acpi; 266 else { 267 printk(KERN_ERR PREFIX 268 "System description tables not found\n"); 269 return 0; 270 } 271 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 272 acpi_physical_address pa = 0; 273 274 acpi_find_root_pointer(&pa); 275 return pa; 276 } 277 278 return 0; 279 } 280 281 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 282 static struct acpi_ioremap * 283 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 284 { 285 struct acpi_ioremap *map; 286 287 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 288 if (map->phys <= phys && 289 phys + size <= map->phys + map->size) 290 return map; 291 292 return NULL; 293 } 294 295 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 296 static void __iomem * 297 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 298 { 299 struct acpi_ioremap *map; 300 301 map = acpi_map_lookup(phys, size); 302 if (map) 303 return map->virt + (phys - map->phys); 304 305 return NULL; 306 } 307 308 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 309 { 310 struct acpi_ioremap *map; 311 void __iomem *virt = NULL; 312 313 mutex_lock(&acpi_ioremap_lock); 314 map = acpi_map_lookup(phys, size); 315 if (map) { 316 virt = map->virt + (phys - map->phys); 317 map->refcount++; 318 } 319 mutex_unlock(&acpi_ioremap_lock); 320 return virt; 321 } 322 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 323 324 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 325 static struct acpi_ioremap * 326 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 327 { 328 struct acpi_ioremap *map; 329 330 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 331 if (map->virt <= virt && 332 virt + size <= map->virt + map->size) 333 return map; 334 335 return NULL; 336 } 337 338 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 339 /* ioremap will take care of cache attributes */ 340 #define should_use_kmap(pfn) 0 341 #else 342 #define should_use_kmap(pfn) page_is_ram(pfn) 343 #endif 344 345 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 346 { 347 unsigned long pfn; 348 349 pfn = pg_off >> PAGE_SHIFT; 350 if (should_use_kmap(pfn)) { 351 if (pg_sz > PAGE_SIZE) 352 return NULL; 353 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 354 } else 355 return acpi_os_ioremap(pg_off, pg_sz); 356 } 357 358 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 359 { 360 unsigned long pfn; 361 362 pfn = pg_off >> PAGE_SHIFT; 363 if (should_use_kmap(pfn)) 364 kunmap(pfn_to_page(pfn)); 365 else 366 iounmap(vaddr); 367 } 368 369 /** 370 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 371 * @phys: Start of the physical address range to map. 372 * @size: Size of the physical address range to map. 373 * 374 * Look up the given physical address range in the list of existing ACPI memory 375 * mappings. If found, get a reference to it and return a pointer to it (its 376 * virtual address). If not found, map it, add it to that list and return a 377 * pointer to it. 378 * 379 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this 380 * routine simply calls __acpi_map_table() to get the job done. 381 */ 382 void __iomem *__init_refok 383 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 384 { 385 struct acpi_ioremap *map; 386 void __iomem *virt; 387 acpi_physical_address pg_off; 388 acpi_size pg_sz; 389 390 if (phys > ULONG_MAX) { 391 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 392 return NULL; 393 } 394 395 if (!acpi_gbl_permanent_mmap) 396 return __acpi_map_table((unsigned long)phys, size); 397 398 mutex_lock(&acpi_ioremap_lock); 399 /* Check if there's a suitable mapping already. */ 400 map = acpi_map_lookup(phys, size); 401 if (map) { 402 map->refcount++; 403 goto out; 404 } 405 406 map = kzalloc(sizeof(*map), GFP_KERNEL); 407 if (!map) { 408 mutex_unlock(&acpi_ioremap_lock); 409 return NULL; 410 } 411 412 pg_off = round_down(phys, PAGE_SIZE); 413 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 414 virt = acpi_map(pg_off, pg_sz); 415 if (!virt) { 416 mutex_unlock(&acpi_ioremap_lock); 417 kfree(map); 418 return NULL; 419 } 420 421 INIT_LIST_HEAD(&map->list); 422 map->virt = virt; 423 map->phys = pg_off; 424 map->size = pg_sz; 425 map->refcount = 1; 426 427 list_add_tail_rcu(&map->list, &acpi_ioremaps); 428 429 out: 430 mutex_unlock(&acpi_ioremap_lock); 431 return map->virt + (phys - map->phys); 432 } 433 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 434 435 void *__init_refok 436 acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 437 { 438 return (void *)acpi_os_map_iomem(phys, size); 439 } 440 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 441 442 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 443 { 444 if (!--map->refcount) 445 list_del_rcu(&map->list); 446 } 447 448 static void acpi_os_map_cleanup(struct acpi_ioremap *map) 449 { 450 if (!map->refcount) { 451 synchronize_rcu_expedited(); 452 acpi_unmap(map->phys, map->virt); 453 kfree(map); 454 } 455 } 456 457 /** 458 * acpi_os_unmap_iomem - Drop a memory mapping reference. 459 * @virt: Start of the address range to drop a reference to. 460 * @size: Size of the address range to drop a reference to. 461 * 462 * Look up the given virtual address range in the list of existing ACPI memory 463 * mappings, drop a reference to it and unmap it if there are no more active 464 * references to it. 465 * 466 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this 467 * routine simply calls __acpi_unmap_table() to get the job done. Since 468 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 469 * here. 470 */ 471 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 472 { 473 struct acpi_ioremap *map; 474 475 if (!acpi_gbl_permanent_mmap) { 476 __acpi_unmap_table(virt, size); 477 return; 478 } 479 480 mutex_lock(&acpi_ioremap_lock); 481 map = acpi_map_lookup_virt(virt, size); 482 if (!map) { 483 mutex_unlock(&acpi_ioremap_lock); 484 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 485 return; 486 } 487 acpi_os_drop_map_ref(map); 488 mutex_unlock(&acpi_ioremap_lock); 489 490 acpi_os_map_cleanup(map); 491 } 492 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 493 494 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 495 { 496 return acpi_os_unmap_iomem((void __iomem *)virt, size); 497 } 498 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 499 500 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) 501 { 502 if (!acpi_gbl_permanent_mmap) 503 __acpi_unmap_table(virt, size); 504 } 505 506 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 507 { 508 u64 addr; 509 void __iomem *virt; 510 511 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 512 return 0; 513 514 /* Handle possible alignment issues */ 515 memcpy(&addr, &gas->address, sizeof(addr)); 516 if (!addr || !gas->bit_width) 517 return -EINVAL; 518 519 virt = acpi_os_map_iomem(addr, gas->bit_width / 8); 520 if (!virt) 521 return -EIO; 522 523 return 0; 524 } 525 EXPORT_SYMBOL(acpi_os_map_generic_address); 526 527 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 528 { 529 u64 addr; 530 struct acpi_ioremap *map; 531 532 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 533 return; 534 535 /* Handle possible alignment issues */ 536 memcpy(&addr, &gas->address, sizeof(addr)); 537 if (!addr || !gas->bit_width) 538 return; 539 540 mutex_lock(&acpi_ioremap_lock); 541 map = acpi_map_lookup(addr, gas->bit_width / 8); 542 if (!map) { 543 mutex_unlock(&acpi_ioremap_lock); 544 return; 545 } 546 acpi_os_drop_map_ref(map); 547 mutex_unlock(&acpi_ioremap_lock); 548 549 acpi_os_map_cleanup(map); 550 } 551 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 552 553 #ifdef ACPI_FUTURE_USAGE 554 acpi_status 555 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 556 { 557 if (!phys || !virt) 558 return AE_BAD_PARAMETER; 559 560 *phys = virt_to_phys(virt); 561 562 return AE_OK; 563 } 564 #endif 565 566 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 567 static bool acpi_rev_override; 568 569 int __init acpi_rev_override_setup(char *str) 570 { 571 acpi_rev_override = true; 572 return 1; 573 } 574 __setup("acpi_rev_override", acpi_rev_override_setup); 575 #else 576 #define acpi_rev_override false 577 #endif 578 579 #define ACPI_MAX_OVERRIDE_LEN 100 580 581 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 582 583 acpi_status 584 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 585 char **new_val) 586 { 587 if (!init_val || !new_val) 588 return AE_BAD_PARAMETER; 589 590 *new_val = NULL; 591 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 592 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 593 acpi_os_name); 594 *new_val = acpi_os_name; 595 } 596 597 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 598 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); 599 *new_val = (char *)5; 600 } 601 602 return AE_OK; 603 } 604 605 static void acpi_table_taint(struct acpi_table_header *table) 606 { 607 pr_warn(PREFIX 608 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n", 609 table->signature, table->oem_table_id); 610 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE); 611 } 612 613 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE 614 #include <linux/earlycpio.h> 615 #include <linux/memblock.h> 616 617 static u64 acpi_tables_addr; 618 static int all_tables_size; 619 620 /* Copied from acpica/tbutils.c:acpi_tb_checksum() */ 621 static u8 __init acpi_table_checksum(u8 *buffer, u32 length) 622 { 623 u8 sum = 0; 624 u8 *end = buffer + length; 625 626 while (buffer < end) 627 sum = (u8) (sum + *(buffer++)); 628 return sum; 629 } 630 631 /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */ 632 static const char * const table_sigs[] = { 633 ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ, 634 ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT, 635 ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF, 636 ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET, 637 ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI, 638 ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA, 639 ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT, 640 ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT, 641 ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL }; 642 643 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) 644 645 #define ACPI_OVERRIDE_TABLES 64 646 static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES]; 647 static DECLARE_BITMAP(acpi_initrd_installed, ACPI_OVERRIDE_TABLES); 648 649 #define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT) 650 651 void __init acpi_initrd_override(void *data, size_t size) 652 { 653 int sig, no, table_nr = 0, total_offset = 0; 654 long offset = 0; 655 struct acpi_table_header *table; 656 char cpio_path[32] = "kernel/firmware/acpi/"; 657 struct cpio_data file; 658 659 if (data == NULL || size == 0) 660 return; 661 662 for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) { 663 file = find_cpio_data(cpio_path, data, size, &offset); 664 if (!file.data) 665 break; 666 667 data += offset; 668 size -= offset; 669 670 if (file.size < sizeof(struct acpi_table_header)) { 671 pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n", 672 cpio_path, file.name); 673 continue; 674 } 675 676 table = file.data; 677 678 for (sig = 0; table_sigs[sig]; sig++) 679 if (!memcmp(table->signature, table_sigs[sig], 4)) 680 break; 681 682 if (!table_sigs[sig]) { 683 pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n", 684 cpio_path, file.name); 685 continue; 686 } 687 if (file.size != table->length) { 688 pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n", 689 cpio_path, file.name); 690 continue; 691 } 692 if (acpi_table_checksum(file.data, table->length)) { 693 pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n", 694 cpio_path, file.name); 695 continue; 696 } 697 698 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n", 699 table->signature, cpio_path, file.name, table->length); 700 701 all_tables_size += table->length; 702 acpi_initrd_files[table_nr].data = file.data; 703 acpi_initrd_files[table_nr].size = file.size; 704 table_nr++; 705 } 706 if (table_nr == 0) 707 return; 708 709 acpi_tables_addr = 710 memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT, 711 all_tables_size, PAGE_SIZE); 712 if (!acpi_tables_addr) { 713 WARN_ON(1); 714 return; 715 } 716 /* 717 * Only calling e820_add_reserve does not work and the 718 * tables are invalid (memory got used) later. 719 * memblock_reserve works as expected and the tables won't get modified. 720 * But it's not enough on X86 because ioremap will 721 * complain later (used by acpi_os_map_memory) that the pages 722 * that should get mapped are not marked "reserved". 723 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area) 724 * works fine. 725 */ 726 memblock_reserve(acpi_tables_addr, all_tables_size); 727 arch_reserve_mem_area(acpi_tables_addr, all_tables_size); 728 729 /* 730 * early_ioremap only can remap 256k one time. If we map all 731 * tables one time, we will hit the limit. Need to map chunks 732 * one by one during copying the same as that in relocate_initrd(). 733 */ 734 for (no = 0; no < table_nr; no++) { 735 unsigned char *src_p = acpi_initrd_files[no].data; 736 phys_addr_t size = acpi_initrd_files[no].size; 737 phys_addr_t dest_addr = acpi_tables_addr + total_offset; 738 phys_addr_t slop, clen; 739 char *dest_p; 740 741 total_offset += size; 742 743 while (size) { 744 slop = dest_addr & ~PAGE_MASK; 745 clen = size; 746 if (clen > MAP_CHUNK_SIZE - slop) 747 clen = MAP_CHUNK_SIZE - slop; 748 dest_p = early_ioremap(dest_addr & PAGE_MASK, 749 clen + slop); 750 memcpy(dest_p + slop, src_p, clen); 751 early_iounmap(dest_p, clen + slop); 752 src_p += clen; 753 dest_addr += clen; 754 size -= clen; 755 } 756 } 757 } 758 759 acpi_status 760 acpi_os_physical_table_override(struct acpi_table_header *existing_table, 761 acpi_physical_address *address, u32 *length) 762 { 763 int table_offset = 0; 764 int table_index = 0; 765 struct acpi_table_header *table; 766 u32 table_length; 767 768 *length = 0; 769 *address = 0; 770 if (!acpi_tables_addr) 771 return AE_OK; 772 773 while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) { 774 table = acpi_os_map_memory(acpi_tables_addr + table_offset, 775 ACPI_HEADER_SIZE); 776 if (table_offset + table->length > all_tables_size) { 777 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); 778 WARN_ON(1); 779 return AE_OK; 780 } 781 782 table_length = table->length; 783 784 /* Only override tables matched */ 785 if (test_bit(table_index, acpi_initrd_installed) || 786 memcmp(existing_table->signature, table->signature, 4) || 787 memcmp(table->oem_table_id, existing_table->oem_table_id, 788 ACPI_OEM_TABLE_ID_SIZE)) { 789 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); 790 goto next_table; 791 } 792 793 *length = table_length; 794 *address = acpi_tables_addr + table_offset; 795 acpi_table_taint(existing_table); 796 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); 797 set_bit(table_index, acpi_initrd_installed); 798 break; 799 800 next_table: 801 table_offset += table_length; 802 table_index++; 803 } 804 return AE_OK; 805 } 806 807 void __init acpi_initrd_initialize_tables(void) 808 { 809 int table_offset = 0; 810 int table_index = 0; 811 u32 table_length; 812 struct acpi_table_header *table; 813 814 if (!acpi_tables_addr) 815 return; 816 817 while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) { 818 table = acpi_os_map_memory(acpi_tables_addr + table_offset, 819 ACPI_HEADER_SIZE); 820 if (table_offset + table->length > all_tables_size) { 821 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); 822 WARN_ON(1); 823 return; 824 } 825 826 table_length = table->length; 827 828 /* Skip RSDT/XSDT which should only be used for override */ 829 if (test_bit(table_index, acpi_initrd_installed) || 830 ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) || 831 ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) { 832 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); 833 goto next_table; 834 } 835 836 acpi_table_taint(table); 837 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); 838 acpi_install_table(acpi_tables_addr + table_offset, TRUE); 839 set_bit(table_index, acpi_initrd_installed); 840 next_table: 841 table_offset += table_length; 842 table_index++; 843 } 844 } 845 #else 846 acpi_status 847 acpi_os_physical_table_override(struct acpi_table_header *existing_table, 848 acpi_physical_address *address, 849 u32 *table_length) 850 { 851 *table_length = 0; 852 *address = 0; 853 return AE_OK; 854 } 855 856 void __init acpi_initrd_initialize_tables(void) 857 { 858 } 859 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */ 860 861 acpi_status 862 acpi_os_table_override(struct acpi_table_header *existing_table, 863 struct acpi_table_header **new_table) 864 { 865 if (!existing_table || !new_table) 866 return AE_BAD_PARAMETER; 867 868 *new_table = NULL; 869 870 #ifdef CONFIG_ACPI_CUSTOM_DSDT 871 if (strncmp(existing_table->signature, "DSDT", 4) == 0) 872 *new_table = (struct acpi_table_header *)AmlCode; 873 #endif 874 if (*new_table != NULL) 875 acpi_table_taint(existing_table); 876 return AE_OK; 877 } 878 879 static irqreturn_t acpi_irq(int irq, void *dev_id) 880 { 881 u32 handled; 882 883 handled = (*acpi_irq_handler) (acpi_irq_context); 884 885 if (handled) { 886 acpi_irq_handled++; 887 return IRQ_HANDLED; 888 } else { 889 acpi_irq_not_handled++; 890 return IRQ_NONE; 891 } 892 } 893 894 acpi_status 895 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 896 void *context) 897 { 898 unsigned int irq; 899 900 acpi_irq_stats_init(); 901 902 /* 903 * ACPI interrupts different from the SCI in our copy of the FADT are 904 * not supported. 905 */ 906 if (gsi != acpi_gbl_FADT.sci_interrupt) 907 return AE_BAD_PARAMETER; 908 909 if (acpi_irq_handler) 910 return AE_ALREADY_ACQUIRED; 911 912 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 913 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 914 gsi); 915 return AE_OK; 916 } 917 918 acpi_irq_handler = handler; 919 acpi_irq_context = context; 920 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 921 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 922 acpi_irq_handler = NULL; 923 return AE_NOT_ACQUIRED; 924 } 925 acpi_sci_irq = irq; 926 927 return AE_OK; 928 } 929 930 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 931 { 932 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 933 return AE_BAD_PARAMETER; 934 935 free_irq(acpi_sci_irq, acpi_irq); 936 acpi_irq_handler = NULL; 937 acpi_sci_irq = INVALID_ACPI_IRQ; 938 939 return AE_OK; 940 } 941 942 /* 943 * Running in interpreter thread context, safe to sleep 944 */ 945 946 void acpi_os_sleep(u64 ms) 947 { 948 msleep(ms); 949 } 950 951 void acpi_os_stall(u32 us) 952 { 953 while (us) { 954 u32 delay = 1000; 955 956 if (delay > us) 957 delay = us; 958 udelay(delay); 959 touch_nmi_watchdog(); 960 us -= delay; 961 } 962 } 963 964 /* 965 * Support ACPI 3.0 AML Timer operand 966 * Returns 64-bit free-running, monotonically increasing timer 967 * with 100ns granularity 968 */ 969 u64 acpi_os_get_timer(void) 970 { 971 u64 time_ns = ktime_to_ns(ktime_get()); 972 do_div(time_ns, 100); 973 return time_ns; 974 } 975 976 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 977 { 978 u32 dummy; 979 980 if (!value) 981 value = &dummy; 982 983 *value = 0; 984 if (width <= 8) { 985 *(u8 *) value = inb(port); 986 } else if (width <= 16) { 987 *(u16 *) value = inw(port); 988 } else if (width <= 32) { 989 *(u32 *) value = inl(port); 990 } else { 991 BUG(); 992 } 993 994 return AE_OK; 995 } 996 997 EXPORT_SYMBOL(acpi_os_read_port); 998 999 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 1000 { 1001 if (width <= 8) { 1002 outb(value, port); 1003 } else if (width <= 16) { 1004 outw(value, port); 1005 } else if (width <= 32) { 1006 outl(value, port); 1007 } else { 1008 BUG(); 1009 } 1010 1011 return AE_OK; 1012 } 1013 1014 EXPORT_SYMBOL(acpi_os_write_port); 1015 1016 acpi_status 1017 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 1018 { 1019 void __iomem *virt_addr; 1020 unsigned int size = width / 8; 1021 bool unmap = false; 1022 u64 dummy; 1023 1024 rcu_read_lock(); 1025 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 1026 if (!virt_addr) { 1027 rcu_read_unlock(); 1028 virt_addr = acpi_os_ioremap(phys_addr, size); 1029 if (!virt_addr) 1030 return AE_BAD_ADDRESS; 1031 unmap = true; 1032 } 1033 1034 if (!value) 1035 value = &dummy; 1036 1037 switch (width) { 1038 case 8: 1039 *(u8 *) value = readb(virt_addr); 1040 break; 1041 case 16: 1042 *(u16 *) value = readw(virt_addr); 1043 break; 1044 case 32: 1045 *(u32 *) value = readl(virt_addr); 1046 break; 1047 case 64: 1048 *(u64 *) value = readq(virt_addr); 1049 break; 1050 default: 1051 BUG(); 1052 } 1053 1054 if (unmap) 1055 iounmap(virt_addr); 1056 else 1057 rcu_read_unlock(); 1058 1059 return AE_OK; 1060 } 1061 1062 acpi_status 1063 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 1064 { 1065 void __iomem *virt_addr; 1066 unsigned int size = width / 8; 1067 bool unmap = false; 1068 1069 rcu_read_lock(); 1070 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 1071 if (!virt_addr) { 1072 rcu_read_unlock(); 1073 virt_addr = acpi_os_ioremap(phys_addr, size); 1074 if (!virt_addr) 1075 return AE_BAD_ADDRESS; 1076 unmap = true; 1077 } 1078 1079 switch (width) { 1080 case 8: 1081 writeb(value, virt_addr); 1082 break; 1083 case 16: 1084 writew(value, virt_addr); 1085 break; 1086 case 32: 1087 writel(value, virt_addr); 1088 break; 1089 case 64: 1090 writeq(value, virt_addr); 1091 break; 1092 default: 1093 BUG(); 1094 } 1095 1096 if (unmap) 1097 iounmap(virt_addr); 1098 else 1099 rcu_read_unlock(); 1100 1101 return AE_OK; 1102 } 1103 1104 acpi_status 1105 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 1106 u64 *value, u32 width) 1107 { 1108 int result, size; 1109 u32 value32; 1110 1111 if (!value) 1112 return AE_BAD_PARAMETER; 1113 1114 switch (width) { 1115 case 8: 1116 size = 1; 1117 break; 1118 case 16: 1119 size = 2; 1120 break; 1121 case 32: 1122 size = 4; 1123 break; 1124 default: 1125 return AE_ERROR; 1126 } 1127 1128 result = raw_pci_read(pci_id->segment, pci_id->bus, 1129 PCI_DEVFN(pci_id->device, pci_id->function), 1130 reg, size, &value32); 1131 *value = value32; 1132 1133 return (result ? AE_ERROR : AE_OK); 1134 } 1135 1136 acpi_status 1137 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 1138 u64 value, u32 width) 1139 { 1140 int result, size; 1141 1142 switch (width) { 1143 case 8: 1144 size = 1; 1145 break; 1146 case 16: 1147 size = 2; 1148 break; 1149 case 32: 1150 size = 4; 1151 break; 1152 default: 1153 return AE_ERROR; 1154 } 1155 1156 result = raw_pci_write(pci_id->segment, pci_id->bus, 1157 PCI_DEVFN(pci_id->device, pci_id->function), 1158 reg, size, value); 1159 1160 return (result ? AE_ERROR : AE_OK); 1161 } 1162 1163 static void acpi_os_execute_deferred(struct work_struct *work) 1164 { 1165 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 1166 1167 dpc->function(dpc->context); 1168 kfree(dpc); 1169 } 1170 1171 #ifdef CONFIG_ACPI_DEBUGGER 1172 static struct acpi_debugger acpi_debugger; 1173 static bool acpi_debugger_initialized; 1174 1175 int acpi_register_debugger(struct module *owner, 1176 const struct acpi_debugger_ops *ops) 1177 { 1178 int ret = 0; 1179 1180 mutex_lock(&acpi_debugger.lock); 1181 if (acpi_debugger.ops) { 1182 ret = -EBUSY; 1183 goto err_lock; 1184 } 1185 1186 acpi_debugger.owner = owner; 1187 acpi_debugger.ops = ops; 1188 1189 err_lock: 1190 mutex_unlock(&acpi_debugger.lock); 1191 return ret; 1192 } 1193 EXPORT_SYMBOL(acpi_register_debugger); 1194 1195 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 1196 { 1197 mutex_lock(&acpi_debugger.lock); 1198 if (ops == acpi_debugger.ops) { 1199 acpi_debugger.ops = NULL; 1200 acpi_debugger.owner = NULL; 1201 } 1202 mutex_unlock(&acpi_debugger.lock); 1203 } 1204 EXPORT_SYMBOL(acpi_unregister_debugger); 1205 1206 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 1207 { 1208 int ret; 1209 int (*func)(acpi_osd_exec_callback, void *); 1210 struct module *owner; 1211 1212 if (!acpi_debugger_initialized) 1213 return -ENODEV; 1214 mutex_lock(&acpi_debugger.lock); 1215 if (!acpi_debugger.ops) { 1216 ret = -ENODEV; 1217 goto err_lock; 1218 } 1219 if (!try_module_get(acpi_debugger.owner)) { 1220 ret = -ENODEV; 1221 goto err_lock; 1222 } 1223 func = acpi_debugger.ops->create_thread; 1224 owner = acpi_debugger.owner; 1225 mutex_unlock(&acpi_debugger.lock); 1226 1227 ret = func(function, context); 1228 1229 mutex_lock(&acpi_debugger.lock); 1230 module_put(owner); 1231 err_lock: 1232 mutex_unlock(&acpi_debugger.lock); 1233 return ret; 1234 } 1235 1236 ssize_t acpi_debugger_write_log(const char *msg) 1237 { 1238 ssize_t ret; 1239 ssize_t (*func)(const char *); 1240 struct module *owner; 1241 1242 if (!acpi_debugger_initialized) 1243 return -ENODEV; 1244 mutex_lock(&acpi_debugger.lock); 1245 if (!acpi_debugger.ops) { 1246 ret = -ENODEV; 1247 goto err_lock; 1248 } 1249 if (!try_module_get(acpi_debugger.owner)) { 1250 ret = -ENODEV; 1251 goto err_lock; 1252 } 1253 func = acpi_debugger.ops->write_log; 1254 owner = acpi_debugger.owner; 1255 mutex_unlock(&acpi_debugger.lock); 1256 1257 ret = func(msg); 1258 1259 mutex_lock(&acpi_debugger.lock); 1260 module_put(owner); 1261 err_lock: 1262 mutex_unlock(&acpi_debugger.lock); 1263 return ret; 1264 } 1265 1266 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 1267 { 1268 ssize_t ret; 1269 ssize_t (*func)(char *, size_t); 1270 struct module *owner; 1271 1272 if (!acpi_debugger_initialized) 1273 return -ENODEV; 1274 mutex_lock(&acpi_debugger.lock); 1275 if (!acpi_debugger.ops) { 1276 ret = -ENODEV; 1277 goto err_lock; 1278 } 1279 if (!try_module_get(acpi_debugger.owner)) { 1280 ret = -ENODEV; 1281 goto err_lock; 1282 } 1283 func = acpi_debugger.ops->read_cmd; 1284 owner = acpi_debugger.owner; 1285 mutex_unlock(&acpi_debugger.lock); 1286 1287 ret = func(buffer, buffer_length); 1288 1289 mutex_lock(&acpi_debugger.lock); 1290 module_put(owner); 1291 err_lock: 1292 mutex_unlock(&acpi_debugger.lock); 1293 return ret; 1294 } 1295 1296 int acpi_debugger_wait_command_ready(void) 1297 { 1298 int ret; 1299 int (*func)(bool, char *, size_t); 1300 struct module *owner; 1301 1302 if (!acpi_debugger_initialized) 1303 return -ENODEV; 1304 mutex_lock(&acpi_debugger.lock); 1305 if (!acpi_debugger.ops) { 1306 ret = -ENODEV; 1307 goto err_lock; 1308 } 1309 if (!try_module_get(acpi_debugger.owner)) { 1310 ret = -ENODEV; 1311 goto err_lock; 1312 } 1313 func = acpi_debugger.ops->wait_command_ready; 1314 owner = acpi_debugger.owner; 1315 mutex_unlock(&acpi_debugger.lock); 1316 1317 ret = func(acpi_gbl_method_executing, 1318 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 1319 1320 mutex_lock(&acpi_debugger.lock); 1321 module_put(owner); 1322 err_lock: 1323 mutex_unlock(&acpi_debugger.lock); 1324 return ret; 1325 } 1326 1327 int acpi_debugger_notify_command_complete(void) 1328 { 1329 int ret; 1330 int (*func)(void); 1331 struct module *owner; 1332 1333 if (!acpi_debugger_initialized) 1334 return -ENODEV; 1335 mutex_lock(&acpi_debugger.lock); 1336 if (!acpi_debugger.ops) { 1337 ret = -ENODEV; 1338 goto err_lock; 1339 } 1340 if (!try_module_get(acpi_debugger.owner)) { 1341 ret = -ENODEV; 1342 goto err_lock; 1343 } 1344 func = acpi_debugger.ops->notify_command_complete; 1345 owner = acpi_debugger.owner; 1346 mutex_unlock(&acpi_debugger.lock); 1347 1348 ret = func(); 1349 1350 mutex_lock(&acpi_debugger.lock); 1351 module_put(owner); 1352 err_lock: 1353 mutex_unlock(&acpi_debugger.lock); 1354 return ret; 1355 } 1356 1357 int __init acpi_debugger_init(void) 1358 { 1359 mutex_init(&acpi_debugger.lock); 1360 acpi_debugger_initialized = true; 1361 return 0; 1362 } 1363 #endif 1364 1365 /******************************************************************************* 1366 * 1367 * FUNCTION: acpi_os_execute 1368 * 1369 * PARAMETERS: Type - Type of the callback 1370 * Function - Function to be executed 1371 * Context - Function parameters 1372 * 1373 * RETURN: Status 1374 * 1375 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1376 * immediately executes function on a separate thread. 1377 * 1378 ******************************************************************************/ 1379 1380 acpi_status acpi_os_execute(acpi_execute_type type, 1381 acpi_osd_exec_callback function, void *context) 1382 { 1383 acpi_status status = AE_OK; 1384 struct acpi_os_dpc *dpc; 1385 struct workqueue_struct *queue; 1386 int ret; 1387 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1388 "Scheduling function [%p(%p)] for deferred execution.\n", 1389 function, context)); 1390 1391 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1392 ret = acpi_debugger_create_thread(function, context); 1393 if (ret) { 1394 pr_err("Call to kthread_create() failed.\n"); 1395 status = AE_ERROR; 1396 } 1397 goto out_thread; 1398 } 1399 1400 /* 1401 * Allocate/initialize DPC structure. Note that this memory will be 1402 * freed by the callee. The kernel handles the work_struct list in a 1403 * way that allows us to also free its memory inside the callee. 1404 * Because we may want to schedule several tasks with different 1405 * parameters we can't use the approach some kernel code uses of 1406 * having a static work_struct. 1407 */ 1408 1409 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1410 if (!dpc) 1411 return AE_NO_MEMORY; 1412 1413 dpc->function = function; 1414 dpc->context = context; 1415 1416 /* 1417 * To prevent lockdep from complaining unnecessarily, make sure that 1418 * there is a different static lockdep key for each workqueue by using 1419 * INIT_WORK() for each of them separately. 1420 */ 1421 if (type == OSL_NOTIFY_HANDLER) { 1422 queue = kacpi_notify_wq; 1423 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1424 } else if (type == OSL_GPE_HANDLER) { 1425 queue = kacpid_wq; 1426 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1427 } else { 1428 pr_err("Unsupported os_execute type %d.\n", type); 1429 status = AE_ERROR; 1430 } 1431 1432 if (ACPI_FAILURE(status)) 1433 goto err_workqueue; 1434 1435 /* 1436 * On some machines, a software-initiated SMI causes corruption unless 1437 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1438 * typically it's done in GPE-related methods that are run via 1439 * workqueues, so we can avoid the known corruption cases by always 1440 * queueing on CPU 0. 1441 */ 1442 ret = queue_work_on(0, queue, &dpc->work); 1443 if (!ret) { 1444 printk(KERN_ERR PREFIX 1445 "Call to queue_work() failed.\n"); 1446 status = AE_ERROR; 1447 } 1448 err_workqueue: 1449 if (ACPI_FAILURE(status)) 1450 kfree(dpc); 1451 out_thread: 1452 return status; 1453 } 1454 EXPORT_SYMBOL(acpi_os_execute); 1455 1456 void acpi_os_wait_events_complete(void) 1457 { 1458 /* 1459 * Make sure the GPE handler or the fixed event handler is not used 1460 * on another CPU after removal. 1461 */ 1462 if (acpi_sci_irq_valid()) 1463 synchronize_hardirq(acpi_sci_irq); 1464 flush_workqueue(kacpid_wq); 1465 flush_workqueue(kacpi_notify_wq); 1466 } 1467 1468 struct acpi_hp_work { 1469 struct work_struct work; 1470 struct acpi_device *adev; 1471 u32 src; 1472 }; 1473 1474 static void acpi_hotplug_work_fn(struct work_struct *work) 1475 { 1476 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1477 1478 acpi_os_wait_events_complete(); 1479 acpi_device_hotplug(hpw->adev, hpw->src); 1480 kfree(hpw); 1481 } 1482 1483 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1484 { 1485 struct acpi_hp_work *hpw; 1486 1487 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1488 "Scheduling hotplug event (%p, %u) for deferred execution.\n", 1489 adev, src)); 1490 1491 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1492 if (!hpw) 1493 return AE_NO_MEMORY; 1494 1495 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1496 hpw->adev = adev; 1497 hpw->src = src; 1498 /* 1499 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1500 * the hotplug code may call driver .remove() functions, which may 1501 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1502 * these workqueues. 1503 */ 1504 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1505 kfree(hpw); 1506 return AE_ERROR; 1507 } 1508 return AE_OK; 1509 } 1510 1511 bool acpi_queue_hotplug_work(struct work_struct *work) 1512 { 1513 return queue_work(kacpi_hotplug_wq, work); 1514 } 1515 1516 acpi_status 1517 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1518 { 1519 struct semaphore *sem = NULL; 1520 1521 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1522 if (!sem) 1523 return AE_NO_MEMORY; 1524 1525 sema_init(sem, initial_units); 1526 1527 *handle = (acpi_handle *) sem; 1528 1529 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1530 *handle, initial_units)); 1531 1532 return AE_OK; 1533 } 1534 1535 /* 1536 * TODO: A better way to delete semaphores? Linux doesn't have a 1537 * 'delete_semaphore()' function -- may result in an invalid 1538 * pointer dereference for non-synchronized consumers. Should 1539 * we at least check for blocked threads and signal/cancel them? 1540 */ 1541 1542 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1543 { 1544 struct semaphore *sem = (struct semaphore *)handle; 1545 1546 if (!sem) 1547 return AE_BAD_PARAMETER; 1548 1549 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1550 1551 BUG_ON(!list_empty(&sem->wait_list)); 1552 kfree(sem); 1553 sem = NULL; 1554 1555 return AE_OK; 1556 } 1557 1558 /* 1559 * TODO: Support for units > 1? 1560 */ 1561 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1562 { 1563 acpi_status status = AE_OK; 1564 struct semaphore *sem = (struct semaphore *)handle; 1565 long jiffies; 1566 int ret = 0; 1567 1568 if (!acpi_os_initialized) 1569 return AE_OK; 1570 1571 if (!sem || (units < 1)) 1572 return AE_BAD_PARAMETER; 1573 1574 if (units > 1) 1575 return AE_SUPPORT; 1576 1577 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1578 handle, units, timeout)); 1579 1580 if (timeout == ACPI_WAIT_FOREVER) 1581 jiffies = MAX_SCHEDULE_TIMEOUT; 1582 else 1583 jiffies = msecs_to_jiffies(timeout); 1584 1585 ret = down_timeout(sem, jiffies); 1586 if (ret) 1587 status = AE_TIME; 1588 1589 if (ACPI_FAILURE(status)) { 1590 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1591 "Failed to acquire semaphore[%p|%d|%d], %s", 1592 handle, units, timeout, 1593 acpi_format_exception(status))); 1594 } else { 1595 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1596 "Acquired semaphore[%p|%d|%d]", handle, 1597 units, timeout)); 1598 } 1599 1600 return status; 1601 } 1602 1603 /* 1604 * TODO: Support for units > 1? 1605 */ 1606 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1607 { 1608 struct semaphore *sem = (struct semaphore *)handle; 1609 1610 if (!acpi_os_initialized) 1611 return AE_OK; 1612 1613 if (!sem || (units < 1)) 1614 return AE_BAD_PARAMETER; 1615 1616 if (units > 1) 1617 return AE_SUPPORT; 1618 1619 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1620 units)); 1621 1622 up(sem); 1623 1624 return AE_OK; 1625 } 1626 1627 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1628 { 1629 #ifdef ENABLE_DEBUGGER 1630 if (acpi_in_debugger) { 1631 u32 chars; 1632 1633 kdb_read(buffer, buffer_length); 1634 1635 /* remove the CR kdb includes */ 1636 chars = strlen(buffer) - 1; 1637 buffer[chars] = '\0'; 1638 } 1639 #else 1640 int ret; 1641 1642 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1643 if (ret < 0) 1644 return AE_ERROR; 1645 if (bytes_read) 1646 *bytes_read = ret; 1647 #endif 1648 1649 return AE_OK; 1650 } 1651 EXPORT_SYMBOL(acpi_os_get_line); 1652 1653 acpi_status acpi_os_wait_command_ready(void) 1654 { 1655 int ret; 1656 1657 ret = acpi_debugger_wait_command_ready(); 1658 if (ret < 0) 1659 return AE_ERROR; 1660 return AE_OK; 1661 } 1662 1663 acpi_status acpi_os_notify_command_complete(void) 1664 { 1665 int ret; 1666 1667 ret = acpi_debugger_notify_command_complete(); 1668 if (ret < 0) 1669 return AE_ERROR; 1670 return AE_OK; 1671 } 1672 1673 acpi_status acpi_os_signal(u32 function, void *info) 1674 { 1675 switch (function) { 1676 case ACPI_SIGNAL_FATAL: 1677 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1678 break; 1679 case ACPI_SIGNAL_BREAKPOINT: 1680 /* 1681 * AML Breakpoint 1682 * ACPI spec. says to treat it as a NOP unless 1683 * you are debugging. So if/when we integrate 1684 * AML debugger into the kernel debugger its 1685 * hook will go here. But until then it is 1686 * not useful to print anything on breakpoints. 1687 */ 1688 break; 1689 default: 1690 break; 1691 } 1692 1693 return AE_OK; 1694 } 1695 1696 static int __init acpi_os_name_setup(char *str) 1697 { 1698 char *p = acpi_os_name; 1699 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1700 1701 if (!str || !*str) 1702 return 0; 1703 1704 for (; count-- && *str; str++) { 1705 if (isalnum(*str) || *str == ' ' || *str == ':') 1706 *p++ = *str; 1707 else if (*str == '\'' || *str == '"') 1708 continue; 1709 else 1710 break; 1711 } 1712 *p = 0; 1713 1714 return 1; 1715 1716 } 1717 1718 __setup("acpi_os_name=", acpi_os_name_setup); 1719 1720 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ 1721 #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */ 1722 1723 struct osi_setup_entry { 1724 char string[OSI_STRING_LENGTH_MAX]; 1725 bool enable; 1726 }; 1727 1728 static struct osi_setup_entry 1729 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = { 1730 {"Module Device", true}, 1731 {"Processor Device", true}, 1732 {"3.0 _SCP Extensions", true}, 1733 {"Processor Aggregator Device", true}, 1734 }; 1735 1736 void __init acpi_osi_setup(char *str) 1737 { 1738 struct osi_setup_entry *osi; 1739 bool enable = true; 1740 int i; 1741 1742 if (!acpi_gbl_create_osi_method) 1743 return; 1744 1745 if (str == NULL || *str == '\0') { 1746 printk(KERN_INFO PREFIX "_OSI method disabled\n"); 1747 acpi_gbl_create_osi_method = FALSE; 1748 return; 1749 } 1750 1751 if (*str == '!') { 1752 str++; 1753 if (*str == '\0') { 1754 osi_linux.default_disabling = 1; 1755 return; 1756 } else if (*str == '*') { 1757 acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS); 1758 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1759 osi = &osi_setup_entries[i]; 1760 osi->enable = false; 1761 } 1762 return; 1763 } 1764 enable = false; 1765 } 1766 1767 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1768 osi = &osi_setup_entries[i]; 1769 if (!strcmp(osi->string, str)) { 1770 osi->enable = enable; 1771 break; 1772 } else if (osi->string[0] == '\0') { 1773 osi->enable = enable; 1774 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX); 1775 break; 1776 } 1777 } 1778 } 1779 1780 static void __init set_osi_linux(unsigned int enable) 1781 { 1782 if (osi_linux.enable != enable) 1783 osi_linux.enable = enable; 1784 1785 if (osi_linux.enable) 1786 acpi_osi_setup("Linux"); 1787 else 1788 acpi_osi_setup("!Linux"); 1789 1790 return; 1791 } 1792 1793 static void __init acpi_cmdline_osi_linux(unsigned int enable) 1794 { 1795 osi_linux.cmdline = 1; /* cmdline set the default and override DMI */ 1796 osi_linux.dmi = 0; 1797 set_osi_linux(enable); 1798 1799 return; 1800 } 1801 1802 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) 1803 { 1804 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 1805 1806 if (enable == -1) 1807 return; 1808 1809 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ 1810 set_osi_linux(enable); 1811 1812 return; 1813 } 1814 1815 /* 1816 * Modify the list of "OS Interfaces" reported to BIOS via _OSI 1817 * 1818 * empty string disables _OSI 1819 * string starting with '!' disables that string 1820 * otherwise string is added to list, augmenting built-in strings 1821 */ 1822 static void __init acpi_osi_setup_late(void) 1823 { 1824 struct osi_setup_entry *osi; 1825 char *str; 1826 int i; 1827 acpi_status status; 1828 1829 if (osi_linux.default_disabling) { 1830 status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS); 1831 1832 if (ACPI_SUCCESS(status)) 1833 printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n"); 1834 } 1835 1836 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1837 osi = &osi_setup_entries[i]; 1838 str = osi->string; 1839 1840 if (*str == '\0') 1841 break; 1842 if (osi->enable) { 1843 status = acpi_install_interface(str); 1844 1845 if (ACPI_SUCCESS(status)) 1846 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); 1847 } else { 1848 status = acpi_remove_interface(str); 1849 1850 if (ACPI_SUCCESS(status)) 1851 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); 1852 } 1853 } 1854 } 1855 1856 static int __init osi_setup(char *str) 1857 { 1858 if (str && !strcmp("Linux", str)) 1859 acpi_cmdline_osi_linux(1); 1860 else if (str && !strcmp("!Linux", str)) 1861 acpi_cmdline_osi_linux(0); 1862 else 1863 acpi_osi_setup(str); 1864 1865 return 1; 1866 } 1867 1868 __setup("acpi_osi=", osi_setup); 1869 1870 /* 1871 * Disable the auto-serialization of named objects creation methods. 1872 * 1873 * This feature is enabled by default. It marks the AML control methods 1874 * that contain the opcodes to create named objects as "Serialized". 1875 */ 1876 static int __init acpi_no_auto_serialize_setup(char *str) 1877 { 1878 acpi_gbl_auto_serialize_methods = FALSE; 1879 pr_info("ACPI: auto-serialization disabled\n"); 1880 1881 return 1; 1882 } 1883 1884 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1885 1886 /* Check of resource interference between native drivers and ACPI 1887 * OperationRegions (SystemIO and System Memory only). 1888 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1889 * in arbitrary AML code and can interfere with legacy drivers. 1890 * acpi_enforce_resources= can be set to: 1891 * 1892 * - strict (default) (2) 1893 * -> further driver trying to access the resources will not load 1894 * - lax (1) 1895 * -> further driver trying to access the resources will load, but you 1896 * get a system message that something might go wrong... 1897 * 1898 * - no (0) 1899 * -> ACPI Operation Region resources will not be registered 1900 * 1901 */ 1902 #define ENFORCE_RESOURCES_STRICT 2 1903 #define ENFORCE_RESOURCES_LAX 1 1904 #define ENFORCE_RESOURCES_NO 0 1905 1906 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1907 1908 static int __init acpi_enforce_resources_setup(char *str) 1909 { 1910 if (str == NULL || *str == '\0') 1911 return 0; 1912 1913 if (!strcmp("strict", str)) 1914 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1915 else if (!strcmp("lax", str)) 1916 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1917 else if (!strcmp("no", str)) 1918 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1919 1920 return 1; 1921 } 1922 1923 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1924 1925 /* Check for resource conflicts between ACPI OperationRegions and native 1926 * drivers */ 1927 int acpi_check_resource_conflict(const struct resource *res) 1928 { 1929 acpi_adr_space_type space_id; 1930 acpi_size length; 1931 u8 warn = 0; 1932 int clash = 0; 1933 1934 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1935 return 0; 1936 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1937 return 0; 1938 1939 if (res->flags & IORESOURCE_IO) 1940 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1941 else 1942 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1943 1944 length = resource_size(res); 1945 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1946 warn = 1; 1947 clash = acpi_check_address_range(space_id, res->start, length, warn); 1948 1949 if (clash) { 1950 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1951 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1952 printk(KERN_NOTICE "ACPI: This conflict may" 1953 " cause random problems and system" 1954 " instability\n"); 1955 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1956 " for this device, you should use it instead of" 1957 " the native driver\n"); 1958 } 1959 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1960 return -EBUSY; 1961 } 1962 return 0; 1963 } 1964 EXPORT_SYMBOL(acpi_check_resource_conflict); 1965 1966 int acpi_check_region(resource_size_t start, resource_size_t n, 1967 const char *name) 1968 { 1969 struct resource res = { 1970 .start = start, 1971 .end = start + n - 1, 1972 .name = name, 1973 .flags = IORESOURCE_IO, 1974 }; 1975 1976 return acpi_check_resource_conflict(&res); 1977 } 1978 EXPORT_SYMBOL(acpi_check_region); 1979 1980 /* 1981 * Let drivers know whether the resource checks are effective 1982 */ 1983 int acpi_resources_are_enforced(void) 1984 { 1985 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1986 } 1987 EXPORT_SYMBOL(acpi_resources_are_enforced); 1988 1989 bool acpi_osi_is_win8(void) 1990 { 1991 return acpi_gbl_osi_data >= ACPI_OSI_WIN_8; 1992 } 1993 EXPORT_SYMBOL(acpi_osi_is_win8); 1994 1995 /* 1996 * Deallocate the memory for a spinlock. 1997 */ 1998 void acpi_os_delete_lock(acpi_spinlock handle) 1999 { 2000 ACPI_FREE(handle); 2001 } 2002 2003 /* 2004 * Acquire a spinlock. 2005 * 2006 * handle is a pointer to the spinlock_t. 2007 */ 2008 2009 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 2010 { 2011 acpi_cpu_flags flags; 2012 spin_lock_irqsave(lockp, flags); 2013 return flags; 2014 } 2015 2016 /* 2017 * Release a spinlock. See above. 2018 */ 2019 2020 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 2021 { 2022 spin_unlock_irqrestore(lockp, flags); 2023 } 2024 2025 #ifndef ACPI_USE_LOCAL_CACHE 2026 2027 /******************************************************************************* 2028 * 2029 * FUNCTION: acpi_os_create_cache 2030 * 2031 * PARAMETERS: name - Ascii name for the cache 2032 * size - Size of each cached object 2033 * depth - Maximum depth of the cache (in objects) <ignored> 2034 * cache - Where the new cache object is returned 2035 * 2036 * RETURN: status 2037 * 2038 * DESCRIPTION: Create a cache object 2039 * 2040 ******************************************************************************/ 2041 2042 acpi_status 2043 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 2044 { 2045 *cache = kmem_cache_create(name, size, 0, 0, NULL); 2046 if (*cache == NULL) 2047 return AE_ERROR; 2048 else 2049 return AE_OK; 2050 } 2051 2052 /******************************************************************************* 2053 * 2054 * FUNCTION: acpi_os_purge_cache 2055 * 2056 * PARAMETERS: Cache - Handle to cache object 2057 * 2058 * RETURN: Status 2059 * 2060 * DESCRIPTION: Free all objects within the requested cache. 2061 * 2062 ******************************************************************************/ 2063 2064 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 2065 { 2066 kmem_cache_shrink(cache); 2067 return (AE_OK); 2068 } 2069 2070 /******************************************************************************* 2071 * 2072 * FUNCTION: acpi_os_delete_cache 2073 * 2074 * PARAMETERS: Cache - Handle to cache object 2075 * 2076 * RETURN: Status 2077 * 2078 * DESCRIPTION: Free all objects within the requested cache and delete the 2079 * cache object. 2080 * 2081 ******************************************************************************/ 2082 2083 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 2084 { 2085 kmem_cache_destroy(cache); 2086 return (AE_OK); 2087 } 2088 2089 /******************************************************************************* 2090 * 2091 * FUNCTION: acpi_os_release_object 2092 * 2093 * PARAMETERS: Cache - Handle to cache object 2094 * Object - The object to be released 2095 * 2096 * RETURN: None 2097 * 2098 * DESCRIPTION: Release an object to the specified cache. If cache is full, 2099 * the object is deleted. 2100 * 2101 ******************************************************************************/ 2102 2103 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 2104 { 2105 kmem_cache_free(cache, object); 2106 return (AE_OK); 2107 } 2108 #endif 2109 2110 static int __init acpi_no_static_ssdt_setup(char *s) 2111 { 2112 acpi_gbl_disable_ssdt_table_install = TRUE; 2113 pr_info("ACPI: static SSDT installation disabled\n"); 2114 2115 return 0; 2116 } 2117 2118 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 2119 2120 static int __init acpi_disable_return_repair(char *s) 2121 { 2122 printk(KERN_NOTICE PREFIX 2123 "ACPI: Predefined validation mechanism disabled\n"); 2124 acpi_gbl_disable_auto_repair = TRUE; 2125 2126 return 1; 2127 } 2128 2129 __setup("acpica_no_return_repair", acpi_disable_return_repair); 2130 2131 acpi_status __init acpi_os_initialize(void) 2132 { 2133 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 2134 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 2135 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 2136 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 2137 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 2138 /* 2139 * Use acpi_os_map_generic_address to pre-map the reset 2140 * register if it's in system memory. 2141 */ 2142 int rv; 2143 2144 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 2145 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); 2146 } 2147 acpi_os_initialized = true; 2148 2149 return AE_OK; 2150 } 2151 2152 acpi_status __init acpi_os_initialize1(void) 2153 { 2154 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 2155 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 2156 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 2157 BUG_ON(!kacpid_wq); 2158 BUG_ON(!kacpi_notify_wq); 2159 BUG_ON(!kacpi_hotplug_wq); 2160 acpi_install_interface_handler(acpi_osi_handler); 2161 acpi_osi_setup_late(); 2162 return AE_OK; 2163 } 2164 2165 acpi_status acpi_os_terminate(void) 2166 { 2167 if (acpi_irq_handler) { 2168 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 2169 acpi_irq_handler); 2170 } 2171 2172 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 2173 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 2174 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 2175 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 2176 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 2177 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 2178 2179 destroy_workqueue(kacpid_wq); 2180 destroy_workqueue(kacpi_notify_wq); 2181 destroy_workqueue(kacpi_hotplug_wq); 2182 2183 return AE_OK; 2184 } 2185 2186 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 2187 u32 pm1b_control) 2188 { 2189 int rc = 0; 2190 if (__acpi_os_prepare_sleep) 2191 rc = __acpi_os_prepare_sleep(sleep_state, 2192 pm1a_control, pm1b_control); 2193 if (rc < 0) 2194 return AE_ERROR; 2195 else if (rc > 0) 2196 return AE_CTRL_SKIP; 2197 2198 return AE_OK; 2199 } 2200 2201 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 2202 u32 pm1a_ctrl, u32 pm1b_ctrl)) 2203 { 2204 __acpi_os_prepare_sleep = func; 2205 } 2206 2207 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 2208 u32 val_b) 2209 { 2210 int rc = 0; 2211 if (__acpi_os_prepare_extended_sleep) 2212 rc = __acpi_os_prepare_extended_sleep(sleep_state, 2213 val_a, val_b); 2214 if (rc < 0) 2215 return AE_ERROR; 2216 else if (rc > 0) 2217 return AE_CTRL_SKIP; 2218 2219 return AE_OK; 2220 } 2221 2222 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 2223 u32 val_a, u32 val_b)) 2224 { 2225 __acpi_os_prepare_extended_sleep = func; 2226 } 2227