1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (c) 2008 Intel Corporation 8 * Author: Matthew Wilcox <willy@linux.intel.com> 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 */ 25 26 #include <linux/module.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/mm.h> 30 #include <linux/highmem.h> 31 #include <linux/pci.h> 32 #include <linux/interrupt.h> 33 #include <linux/kmod.h> 34 #include <linux/delay.h> 35 #include <linux/workqueue.h> 36 #include <linux/nmi.h> 37 #include <linux/acpi.h> 38 #include <linux/efi.h> 39 #include <linux/ioport.h> 40 #include <linux/list.h> 41 #include <linux/jiffies.h> 42 #include <linux/semaphore.h> 43 44 #include <asm/io.h> 45 #include <asm/uaccess.h> 46 #include <asm-generic/io-64-nonatomic-lo-hi.h> 47 48 #include "internal.h" 49 50 #define _COMPONENT ACPI_OS_SERVICES 51 ACPI_MODULE_NAME("osl"); 52 53 struct acpi_os_dpc { 54 acpi_osd_exec_callback function; 55 void *context; 56 struct work_struct work; 57 }; 58 59 #ifdef CONFIG_ACPI_CUSTOM_DSDT 60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE 61 #endif 62 63 #ifdef ENABLE_DEBUGGER 64 #include <linux/kdb.h> 65 66 /* stuff for debugger support */ 67 int acpi_in_debugger; 68 EXPORT_SYMBOL(acpi_in_debugger); 69 70 extern char line_buf[80]; 71 #endif /*ENABLE_DEBUGGER */ 72 73 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 74 u32 pm1b_ctrl); 75 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 76 u32 val_b); 77 78 static acpi_osd_handler acpi_irq_handler; 79 static void *acpi_irq_context; 80 static struct workqueue_struct *kacpid_wq; 81 static struct workqueue_struct *kacpi_notify_wq; 82 static struct workqueue_struct *kacpi_hotplug_wq; 83 static bool acpi_os_initialized; 84 85 /* 86 * This list of permanent mappings is for memory that may be accessed from 87 * interrupt context, where we can't do the ioremap(). 88 */ 89 struct acpi_ioremap { 90 struct list_head list; 91 void __iomem *virt; 92 acpi_physical_address phys; 93 acpi_size size; 94 unsigned long refcount; 95 }; 96 97 static LIST_HEAD(acpi_ioremaps); 98 static DEFINE_MUTEX(acpi_ioremap_lock); 99 100 static void __init acpi_osi_setup_late(void); 101 102 /* 103 * The story of _OSI(Linux) 104 * 105 * From pre-history through Linux-2.6.22, 106 * Linux responded TRUE upon a BIOS OSI(Linux) query. 107 * 108 * Unfortunately, reference BIOS writers got wind of this 109 * and put OSI(Linux) in their example code, quickly exposing 110 * this string as ill-conceived and opening the door to 111 * an un-bounded number of BIOS incompatibilities. 112 * 113 * For example, OSI(Linux) was used on resume to re-POST a 114 * video card on one system, because Linux at that time 115 * could not do a speedy restore in its native driver. 116 * But then upon gaining quick native restore capability, 117 * Linux has no way to tell the BIOS to skip the time-consuming 118 * POST -- putting Linux at a permanent performance disadvantage. 119 * On another system, the BIOS writer used OSI(Linux) 120 * to infer native OS support for IPMI! On other systems, 121 * OSI(Linux) simply got in the way of Linux claiming to 122 * be compatible with other operating systems, exposing 123 * BIOS issues such as skipped device initialization. 124 * 125 * So "Linux" turned out to be a really poor chose of 126 * OSI string, and from Linux-2.6.23 onward we respond FALSE. 127 * 128 * BIOS writers should NOT query _OSI(Linux) on future systems. 129 * Linux will complain on the console when it sees it, and return FALSE. 130 * To get Linux to return TRUE for your system will require 131 * a kernel source update to add a DMI entry, 132 * or boot with "acpi_osi=Linux" 133 */ 134 135 static struct osi_linux { 136 unsigned int enable:1; 137 unsigned int dmi:1; 138 unsigned int cmdline:1; 139 unsigned int default_disabling:1; 140 } osi_linux = {0, 0, 0, 0}; 141 142 static u32 acpi_osi_handler(acpi_string interface, u32 supported) 143 { 144 if (!strcmp("Linux", interface)) { 145 146 printk_once(KERN_NOTICE FW_BUG PREFIX 147 "BIOS _OSI(Linux) query %s%s\n", 148 osi_linux.enable ? "honored" : "ignored", 149 osi_linux.cmdline ? " via cmdline" : 150 osi_linux.dmi ? " via DMI" : ""); 151 } 152 153 if (!strcmp("Darwin", interface)) { 154 /* 155 * Apple firmware will behave poorly if it receives positive 156 * answers to "Darwin" and any other OS. Respond positively 157 * to Darwin and then disable all other vendor strings. 158 */ 159 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS); 160 supported = ACPI_UINT32_MAX; 161 } 162 163 return supported; 164 } 165 166 static void __init acpi_request_region (struct acpi_generic_address *gas, 167 unsigned int length, char *desc) 168 { 169 u64 addr; 170 171 /* Handle possible alignment issues */ 172 memcpy(&addr, &gas->address, sizeof(addr)); 173 if (!addr || !length) 174 return; 175 176 /* Resources are never freed */ 177 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 178 request_region(addr, length, desc); 179 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 180 request_mem_region(addr, length, desc); 181 } 182 183 static int __init acpi_reserve_resources(void) 184 { 185 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 186 "ACPI PM1a_EVT_BLK"); 187 188 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 189 "ACPI PM1b_EVT_BLK"); 190 191 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 192 "ACPI PM1a_CNT_BLK"); 193 194 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 195 "ACPI PM1b_CNT_BLK"); 196 197 if (acpi_gbl_FADT.pm_timer_length == 4) 198 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 199 200 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 201 "ACPI PM2_CNT_BLK"); 202 203 /* Length of GPE blocks must be a non-negative multiple of 2 */ 204 205 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 206 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 207 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 208 209 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 210 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 211 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 212 213 return 0; 214 } 215 fs_initcall_sync(acpi_reserve_resources); 216 217 void acpi_os_printf(const char *fmt, ...) 218 { 219 va_list args; 220 va_start(args, fmt); 221 acpi_os_vprintf(fmt, args); 222 va_end(args); 223 } 224 225 void acpi_os_vprintf(const char *fmt, va_list args) 226 { 227 static char buffer[512]; 228 229 vsprintf(buffer, fmt, args); 230 231 #ifdef ENABLE_DEBUGGER 232 if (acpi_in_debugger) { 233 kdb_printf("%s", buffer); 234 } else { 235 printk(KERN_CONT "%s", buffer); 236 } 237 #else 238 printk(KERN_CONT "%s", buffer); 239 #endif 240 } 241 242 #ifdef CONFIG_KEXEC 243 static unsigned long acpi_rsdp; 244 static int __init setup_acpi_rsdp(char *arg) 245 { 246 if (kstrtoul(arg, 16, &acpi_rsdp)) 247 return -EINVAL; 248 return 0; 249 } 250 early_param("acpi_rsdp", setup_acpi_rsdp); 251 #endif 252 253 acpi_physical_address __init acpi_os_get_root_pointer(void) 254 { 255 #ifdef CONFIG_KEXEC 256 if (acpi_rsdp) 257 return acpi_rsdp; 258 #endif 259 260 if (efi_enabled(EFI_CONFIG_TABLES)) { 261 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 262 return efi.acpi20; 263 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 264 return efi.acpi; 265 else { 266 printk(KERN_ERR PREFIX 267 "System description tables not found\n"); 268 return 0; 269 } 270 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 271 acpi_physical_address pa = 0; 272 273 acpi_find_root_pointer(&pa); 274 return pa; 275 } 276 277 return 0; 278 } 279 280 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 281 static struct acpi_ioremap * 282 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 283 { 284 struct acpi_ioremap *map; 285 286 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 287 if (map->phys <= phys && 288 phys + size <= map->phys + map->size) 289 return map; 290 291 return NULL; 292 } 293 294 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 295 static void __iomem * 296 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 297 { 298 struct acpi_ioremap *map; 299 300 map = acpi_map_lookup(phys, size); 301 if (map) 302 return map->virt + (phys - map->phys); 303 304 return NULL; 305 } 306 307 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 308 { 309 struct acpi_ioremap *map; 310 void __iomem *virt = NULL; 311 312 mutex_lock(&acpi_ioremap_lock); 313 map = acpi_map_lookup(phys, size); 314 if (map) { 315 virt = map->virt + (phys - map->phys); 316 map->refcount++; 317 } 318 mutex_unlock(&acpi_ioremap_lock); 319 return virt; 320 } 321 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 322 323 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 324 static struct acpi_ioremap * 325 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 326 { 327 struct acpi_ioremap *map; 328 329 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 330 if (map->virt <= virt && 331 virt + size <= map->virt + map->size) 332 return map; 333 334 return NULL; 335 } 336 337 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 338 /* ioremap will take care of cache attributes */ 339 #define should_use_kmap(pfn) 0 340 #else 341 #define should_use_kmap(pfn) page_is_ram(pfn) 342 #endif 343 344 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 345 { 346 unsigned long pfn; 347 348 pfn = pg_off >> PAGE_SHIFT; 349 if (should_use_kmap(pfn)) { 350 if (pg_sz > PAGE_SIZE) 351 return NULL; 352 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 353 } else 354 return acpi_os_ioremap(pg_off, pg_sz); 355 } 356 357 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 358 { 359 unsigned long pfn; 360 361 pfn = pg_off >> PAGE_SHIFT; 362 if (should_use_kmap(pfn)) 363 kunmap(pfn_to_page(pfn)); 364 else 365 iounmap(vaddr); 366 } 367 368 void __iomem *__init_refok 369 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 370 { 371 struct acpi_ioremap *map; 372 void __iomem *virt; 373 acpi_physical_address pg_off; 374 acpi_size pg_sz; 375 376 if (phys > ULONG_MAX) { 377 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 378 return NULL; 379 } 380 381 if (!acpi_gbl_permanent_mmap) 382 return __acpi_map_table((unsigned long)phys, size); 383 384 mutex_lock(&acpi_ioremap_lock); 385 /* Check if there's a suitable mapping already. */ 386 map = acpi_map_lookup(phys, size); 387 if (map) { 388 map->refcount++; 389 goto out; 390 } 391 392 map = kzalloc(sizeof(*map), GFP_KERNEL); 393 if (!map) { 394 mutex_unlock(&acpi_ioremap_lock); 395 return NULL; 396 } 397 398 pg_off = round_down(phys, PAGE_SIZE); 399 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 400 virt = acpi_map(pg_off, pg_sz); 401 if (!virt) { 402 mutex_unlock(&acpi_ioremap_lock); 403 kfree(map); 404 return NULL; 405 } 406 407 INIT_LIST_HEAD(&map->list); 408 map->virt = virt; 409 map->phys = pg_off; 410 map->size = pg_sz; 411 map->refcount = 1; 412 413 list_add_tail_rcu(&map->list, &acpi_ioremaps); 414 415 out: 416 mutex_unlock(&acpi_ioremap_lock); 417 return map->virt + (phys - map->phys); 418 } 419 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 420 421 void *__init_refok 422 acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 423 { 424 return (void *)acpi_os_map_iomem(phys, size); 425 } 426 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 427 428 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 429 { 430 if (!--map->refcount) 431 list_del_rcu(&map->list); 432 } 433 434 static void acpi_os_map_cleanup(struct acpi_ioremap *map) 435 { 436 if (!map->refcount) { 437 synchronize_rcu_expedited(); 438 acpi_unmap(map->phys, map->virt); 439 kfree(map); 440 } 441 } 442 443 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 444 { 445 struct acpi_ioremap *map; 446 447 if (!acpi_gbl_permanent_mmap) { 448 __acpi_unmap_table(virt, size); 449 return; 450 } 451 452 mutex_lock(&acpi_ioremap_lock); 453 map = acpi_map_lookup_virt(virt, size); 454 if (!map) { 455 mutex_unlock(&acpi_ioremap_lock); 456 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 457 return; 458 } 459 acpi_os_drop_map_ref(map); 460 mutex_unlock(&acpi_ioremap_lock); 461 462 acpi_os_map_cleanup(map); 463 } 464 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 465 466 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 467 { 468 return acpi_os_unmap_iomem((void __iomem *)virt, size); 469 } 470 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 471 472 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) 473 { 474 if (!acpi_gbl_permanent_mmap) 475 __acpi_unmap_table(virt, size); 476 } 477 478 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 479 { 480 u64 addr; 481 void __iomem *virt; 482 483 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 484 return 0; 485 486 /* Handle possible alignment issues */ 487 memcpy(&addr, &gas->address, sizeof(addr)); 488 if (!addr || !gas->bit_width) 489 return -EINVAL; 490 491 virt = acpi_os_map_iomem(addr, gas->bit_width / 8); 492 if (!virt) 493 return -EIO; 494 495 return 0; 496 } 497 EXPORT_SYMBOL(acpi_os_map_generic_address); 498 499 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 500 { 501 u64 addr; 502 struct acpi_ioremap *map; 503 504 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 505 return; 506 507 /* Handle possible alignment issues */ 508 memcpy(&addr, &gas->address, sizeof(addr)); 509 if (!addr || !gas->bit_width) 510 return; 511 512 mutex_lock(&acpi_ioremap_lock); 513 map = acpi_map_lookup(addr, gas->bit_width / 8); 514 if (!map) { 515 mutex_unlock(&acpi_ioremap_lock); 516 return; 517 } 518 acpi_os_drop_map_ref(map); 519 mutex_unlock(&acpi_ioremap_lock); 520 521 acpi_os_map_cleanup(map); 522 } 523 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 524 525 #ifdef ACPI_FUTURE_USAGE 526 acpi_status 527 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 528 { 529 if (!phys || !virt) 530 return AE_BAD_PARAMETER; 531 532 *phys = virt_to_phys(virt); 533 534 return AE_OK; 535 } 536 #endif 537 538 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 539 static bool acpi_rev_override; 540 541 int __init acpi_rev_override_setup(char *str) 542 { 543 acpi_rev_override = true; 544 return 1; 545 } 546 __setup("acpi_rev_override", acpi_rev_override_setup); 547 #else 548 #define acpi_rev_override false 549 #endif 550 551 #define ACPI_MAX_OVERRIDE_LEN 100 552 553 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 554 555 acpi_status 556 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 557 char **new_val) 558 { 559 if (!init_val || !new_val) 560 return AE_BAD_PARAMETER; 561 562 *new_val = NULL; 563 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 564 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 565 acpi_os_name); 566 *new_val = acpi_os_name; 567 } 568 569 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 570 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); 571 *new_val = (char *)5; 572 } 573 574 return AE_OK; 575 } 576 577 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE 578 #include <linux/earlycpio.h> 579 #include <linux/memblock.h> 580 581 static u64 acpi_tables_addr; 582 static int all_tables_size; 583 584 /* Copied from acpica/tbutils.c:acpi_tb_checksum() */ 585 static u8 __init acpi_table_checksum(u8 *buffer, u32 length) 586 { 587 u8 sum = 0; 588 u8 *end = buffer + length; 589 590 while (buffer < end) 591 sum = (u8) (sum + *(buffer++)); 592 return sum; 593 } 594 595 /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */ 596 static const char * const table_sigs[] = { 597 ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ, 598 ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT, 599 ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF, 600 ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET, 601 ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI, 602 ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA, 603 ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT, 604 ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT, 605 ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL }; 606 607 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) 608 609 #define ACPI_OVERRIDE_TABLES 64 610 static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES]; 611 612 #define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT) 613 614 void __init acpi_initrd_override(void *data, size_t size) 615 { 616 int sig, no, table_nr = 0, total_offset = 0; 617 long offset = 0; 618 struct acpi_table_header *table; 619 char cpio_path[32] = "kernel/firmware/acpi/"; 620 struct cpio_data file; 621 622 if (data == NULL || size == 0) 623 return; 624 625 for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) { 626 file = find_cpio_data(cpio_path, data, size, &offset); 627 if (!file.data) 628 break; 629 630 data += offset; 631 size -= offset; 632 633 if (file.size < sizeof(struct acpi_table_header)) { 634 pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n", 635 cpio_path, file.name); 636 continue; 637 } 638 639 table = file.data; 640 641 for (sig = 0; table_sigs[sig]; sig++) 642 if (!memcmp(table->signature, table_sigs[sig], 4)) 643 break; 644 645 if (!table_sigs[sig]) { 646 pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n", 647 cpio_path, file.name); 648 continue; 649 } 650 if (file.size != table->length) { 651 pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n", 652 cpio_path, file.name); 653 continue; 654 } 655 if (acpi_table_checksum(file.data, table->length)) { 656 pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n", 657 cpio_path, file.name); 658 continue; 659 } 660 661 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n", 662 table->signature, cpio_path, file.name, table->length); 663 664 all_tables_size += table->length; 665 acpi_initrd_files[table_nr].data = file.data; 666 acpi_initrd_files[table_nr].size = file.size; 667 table_nr++; 668 } 669 if (table_nr == 0) 670 return; 671 672 acpi_tables_addr = 673 memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT, 674 all_tables_size, PAGE_SIZE); 675 if (!acpi_tables_addr) { 676 WARN_ON(1); 677 return; 678 } 679 /* 680 * Only calling e820_add_reserve does not work and the 681 * tables are invalid (memory got used) later. 682 * memblock_reserve works as expected and the tables won't get modified. 683 * But it's not enough on X86 because ioremap will 684 * complain later (used by acpi_os_map_memory) that the pages 685 * that should get mapped are not marked "reserved". 686 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area) 687 * works fine. 688 */ 689 memblock_reserve(acpi_tables_addr, all_tables_size); 690 arch_reserve_mem_area(acpi_tables_addr, all_tables_size); 691 692 /* 693 * early_ioremap only can remap 256k one time. If we map all 694 * tables one time, we will hit the limit. Need to map chunks 695 * one by one during copying the same as that in relocate_initrd(). 696 */ 697 for (no = 0; no < table_nr; no++) { 698 unsigned char *src_p = acpi_initrd_files[no].data; 699 phys_addr_t size = acpi_initrd_files[no].size; 700 phys_addr_t dest_addr = acpi_tables_addr + total_offset; 701 phys_addr_t slop, clen; 702 char *dest_p; 703 704 total_offset += size; 705 706 while (size) { 707 slop = dest_addr & ~PAGE_MASK; 708 clen = size; 709 if (clen > MAP_CHUNK_SIZE - slop) 710 clen = MAP_CHUNK_SIZE - slop; 711 dest_p = early_ioremap(dest_addr & PAGE_MASK, 712 clen + slop); 713 memcpy(dest_p + slop, src_p, clen); 714 early_iounmap(dest_p, clen + slop); 715 src_p += clen; 716 dest_addr += clen; 717 size -= clen; 718 } 719 } 720 } 721 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */ 722 723 static void acpi_table_taint(struct acpi_table_header *table) 724 { 725 pr_warn(PREFIX 726 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n", 727 table->signature, table->oem_table_id); 728 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE); 729 } 730 731 732 acpi_status 733 acpi_os_table_override(struct acpi_table_header * existing_table, 734 struct acpi_table_header ** new_table) 735 { 736 if (!existing_table || !new_table) 737 return AE_BAD_PARAMETER; 738 739 *new_table = NULL; 740 741 #ifdef CONFIG_ACPI_CUSTOM_DSDT 742 if (strncmp(existing_table->signature, "DSDT", 4) == 0) 743 *new_table = (struct acpi_table_header *)AmlCode; 744 #endif 745 if (*new_table != NULL) 746 acpi_table_taint(existing_table); 747 return AE_OK; 748 } 749 750 acpi_status 751 acpi_os_physical_table_override(struct acpi_table_header *existing_table, 752 acpi_physical_address *address, 753 u32 *table_length) 754 { 755 #ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE 756 *table_length = 0; 757 *address = 0; 758 return AE_OK; 759 #else 760 int table_offset = 0; 761 struct acpi_table_header *table; 762 763 *table_length = 0; 764 *address = 0; 765 766 if (!acpi_tables_addr) 767 return AE_OK; 768 769 do { 770 if (table_offset + ACPI_HEADER_SIZE > all_tables_size) { 771 WARN_ON(1); 772 return AE_OK; 773 } 774 775 table = acpi_os_map_memory(acpi_tables_addr + table_offset, 776 ACPI_HEADER_SIZE); 777 778 if (table_offset + table->length > all_tables_size) { 779 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); 780 WARN_ON(1); 781 return AE_OK; 782 } 783 784 table_offset += table->length; 785 786 if (memcmp(existing_table->signature, table->signature, 4)) { 787 acpi_os_unmap_memory(table, 788 ACPI_HEADER_SIZE); 789 continue; 790 } 791 792 /* Only override tables with matching oem id */ 793 if (memcmp(table->oem_table_id, existing_table->oem_table_id, 794 ACPI_OEM_TABLE_ID_SIZE)) { 795 acpi_os_unmap_memory(table, 796 ACPI_HEADER_SIZE); 797 continue; 798 } 799 800 table_offset -= table->length; 801 *table_length = table->length; 802 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); 803 *address = acpi_tables_addr + table_offset; 804 break; 805 } while (table_offset + ACPI_HEADER_SIZE < all_tables_size); 806 807 if (*address != 0) 808 acpi_table_taint(existing_table); 809 return AE_OK; 810 #endif 811 } 812 813 static irqreturn_t acpi_irq(int irq, void *dev_id) 814 { 815 u32 handled; 816 817 handled = (*acpi_irq_handler) (acpi_irq_context); 818 819 if (handled) { 820 acpi_irq_handled++; 821 return IRQ_HANDLED; 822 } else { 823 acpi_irq_not_handled++; 824 return IRQ_NONE; 825 } 826 } 827 828 acpi_status 829 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 830 void *context) 831 { 832 unsigned int irq; 833 834 acpi_irq_stats_init(); 835 836 /* 837 * ACPI interrupts different from the SCI in our copy of the FADT are 838 * not supported. 839 */ 840 if (gsi != acpi_gbl_FADT.sci_interrupt) 841 return AE_BAD_PARAMETER; 842 843 if (acpi_irq_handler) 844 return AE_ALREADY_ACQUIRED; 845 846 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 847 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 848 gsi); 849 return AE_OK; 850 } 851 852 acpi_irq_handler = handler; 853 acpi_irq_context = context; 854 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 855 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 856 acpi_irq_handler = NULL; 857 return AE_NOT_ACQUIRED; 858 } 859 860 return AE_OK; 861 } 862 863 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler) 864 { 865 if (irq != acpi_gbl_FADT.sci_interrupt) 866 return AE_BAD_PARAMETER; 867 868 free_irq(irq, acpi_irq); 869 acpi_irq_handler = NULL; 870 871 return AE_OK; 872 } 873 874 /* 875 * Running in interpreter thread context, safe to sleep 876 */ 877 878 void acpi_os_sleep(u64 ms) 879 { 880 msleep(ms); 881 } 882 883 void acpi_os_stall(u32 us) 884 { 885 while (us) { 886 u32 delay = 1000; 887 888 if (delay > us) 889 delay = us; 890 udelay(delay); 891 touch_nmi_watchdog(); 892 us -= delay; 893 } 894 } 895 896 /* 897 * Support ACPI 3.0 AML Timer operand 898 * Returns 64-bit free-running, monotonically increasing timer 899 * with 100ns granularity 900 */ 901 u64 acpi_os_get_timer(void) 902 { 903 u64 time_ns = ktime_to_ns(ktime_get()); 904 do_div(time_ns, 100); 905 return time_ns; 906 } 907 908 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 909 { 910 u32 dummy; 911 912 if (!value) 913 value = &dummy; 914 915 *value = 0; 916 if (width <= 8) { 917 *(u8 *) value = inb(port); 918 } else if (width <= 16) { 919 *(u16 *) value = inw(port); 920 } else if (width <= 32) { 921 *(u32 *) value = inl(port); 922 } else { 923 BUG(); 924 } 925 926 return AE_OK; 927 } 928 929 EXPORT_SYMBOL(acpi_os_read_port); 930 931 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 932 { 933 if (width <= 8) { 934 outb(value, port); 935 } else if (width <= 16) { 936 outw(value, port); 937 } else if (width <= 32) { 938 outl(value, port); 939 } else { 940 BUG(); 941 } 942 943 return AE_OK; 944 } 945 946 EXPORT_SYMBOL(acpi_os_write_port); 947 948 acpi_status 949 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 950 { 951 void __iomem *virt_addr; 952 unsigned int size = width / 8; 953 bool unmap = false; 954 u64 dummy; 955 956 rcu_read_lock(); 957 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 958 if (!virt_addr) { 959 rcu_read_unlock(); 960 virt_addr = acpi_os_ioremap(phys_addr, size); 961 if (!virt_addr) 962 return AE_BAD_ADDRESS; 963 unmap = true; 964 } 965 966 if (!value) 967 value = &dummy; 968 969 switch (width) { 970 case 8: 971 *(u8 *) value = readb(virt_addr); 972 break; 973 case 16: 974 *(u16 *) value = readw(virt_addr); 975 break; 976 case 32: 977 *(u32 *) value = readl(virt_addr); 978 break; 979 case 64: 980 *(u64 *) value = readq(virt_addr); 981 break; 982 default: 983 BUG(); 984 } 985 986 if (unmap) 987 iounmap(virt_addr); 988 else 989 rcu_read_unlock(); 990 991 return AE_OK; 992 } 993 994 acpi_status 995 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 996 { 997 void __iomem *virt_addr; 998 unsigned int size = width / 8; 999 bool unmap = false; 1000 1001 rcu_read_lock(); 1002 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 1003 if (!virt_addr) { 1004 rcu_read_unlock(); 1005 virt_addr = acpi_os_ioremap(phys_addr, size); 1006 if (!virt_addr) 1007 return AE_BAD_ADDRESS; 1008 unmap = true; 1009 } 1010 1011 switch (width) { 1012 case 8: 1013 writeb(value, virt_addr); 1014 break; 1015 case 16: 1016 writew(value, virt_addr); 1017 break; 1018 case 32: 1019 writel(value, virt_addr); 1020 break; 1021 case 64: 1022 writeq(value, virt_addr); 1023 break; 1024 default: 1025 BUG(); 1026 } 1027 1028 if (unmap) 1029 iounmap(virt_addr); 1030 else 1031 rcu_read_unlock(); 1032 1033 return AE_OK; 1034 } 1035 1036 acpi_status 1037 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 1038 u64 *value, u32 width) 1039 { 1040 int result, size; 1041 u32 value32; 1042 1043 if (!value) 1044 return AE_BAD_PARAMETER; 1045 1046 switch (width) { 1047 case 8: 1048 size = 1; 1049 break; 1050 case 16: 1051 size = 2; 1052 break; 1053 case 32: 1054 size = 4; 1055 break; 1056 default: 1057 return AE_ERROR; 1058 } 1059 1060 result = raw_pci_read(pci_id->segment, pci_id->bus, 1061 PCI_DEVFN(pci_id->device, pci_id->function), 1062 reg, size, &value32); 1063 *value = value32; 1064 1065 return (result ? AE_ERROR : AE_OK); 1066 } 1067 1068 acpi_status 1069 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 1070 u64 value, u32 width) 1071 { 1072 int result, size; 1073 1074 switch (width) { 1075 case 8: 1076 size = 1; 1077 break; 1078 case 16: 1079 size = 2; 1080 break; 1081 case 32: 1082 size = 4; 1083 break; 1084 default: 1085 return AE_ERROR; 1086 } 1087 1088 result = raw_pci_write(pci_id->segment, pci_id->bus, 1089 PCI_DEVFN(pci_id->device, pci_id->function), 1090 reg, size, value); 1091 1092 return (result ? AE_ERROR : AE_OK); 1093 } 1094 1095 static void acpi_os_execute_deferred(struct work_struct *work) 1096 { 1097 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 1098 1099 dpc->function(dpc->context); 1100 kfree(dpc); 1101 } 1102 1103 /******************************************************************************* 1104 * 1105 * FUNCTION: acpi_os_execute 1106 * 1107 * PARAMETERS: Type - Type of the callback 1108 * Function - Function to be executed 1109 * Context - Function parameters 1110 * 1111 * RETURN: Status 1112 * 1113 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1114 * immediately executes function on a separate thread. 1115 * 1116 ******************************************************************************/ 1117 1118 acpi_status acpi_os_execute(acpi_execute_type type, 1119 acpi_osd_exec_callback function, void *context) 1120 { 1121 acpi_status status = AE_OK; 1122 struct acpi_os_dpc *dpc; 1123 struct workqueue_struct *queue; 1124 int ret; 1125 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1126 "Scheduling function [%p(%p)] for deferred execution.\n", 1127 function, context)); 1128 1129 /* 1130 * Allocate/initialize DPC structure. Note that this memory will be 1131 * freed by the callee. The kernel handles the work_struct list in a 1132 * way that allows us to also free its memory inside the callee. 1133 * Because we may want to schedule several tasks with different 1134 * parameters we can't use the approach some kernel code uses of 1135 * having a static work_struct. 1136 */ 1137 1138 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1139 if (!dpc) 1140 return AE_NO_MEMORY; 1141 1142 dpc->function = function; 1143 dpc->context = context; 1144 1145 /* 1146 * To prevent lockdep from complaining unnecessarily, make sure that 1147 * there is a different static lockdep key for each workqueue by using 1148 * INIT_WORK() for each of them separately. 1149 */ 1150 if (type == OSL_NOTIFY_HANDLER) { 1151 queue = kacpi_notify_wq; 1152 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1153 } else { 1154 queue = kacpid_wq; 1155 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1156 } 1157 1158 /* 1159 * On some machines, a software-initiated SMI causes corruption unless 1160 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1161 * typically it's done in GPE-related methods that are run via 1162 * workqueues, so we can avoid the known corruption cases by always 1163 * queueing on CPU 0. 1164 */ 1165 ret = queue_work_on(0, queue, &dpc->work); 1166 1167 if (!ret) { 1168 printk(KERN_ERR PREFIX 1169 "Call to queue_work() failed.\n"); 1170 status = AE_ERROR; 1171 kfree(dpc); 1172 } 1173 return status; 1174 } 1175 EXPORT_SYMBOL(acpi_os_execute); 1176 1177 void acpi_os_wait_events_complete(void) 1178 { 1179 /* 1180 * Make sure the GPE handler or the fixed event handler is not used 1181 * on another CPU after removal. 1182 */ 1183 if (acpi_irq_handler) 1184 synchronize_hardirq(acpi_gbl_FADT.sci_interrupt); 1185 flush_workqueue(kacpid_wq); 1186 flush_workqueue(kacpi_notify_wq); 1187 } 1188 1189 struct acpi_hp_work { 1190 struct work_struct work; 1191 struct acpi_device *adev; 1192 u32 src; 1193 }; 1194 1195 static void acpi_hotplug_work_fn(struct work_struct *work) 1196 { 1197 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1198 1199 acpi_os_wait_events_complete(); 1200 acpi_device_hotplug(hpw->adev, hpw->src); 1201 kfree(hpw); 1202 } 1203 1204 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1205 { 1206 struct acpi_hp_work *hpw; 1207 1208 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1209 "Scheduling hotplug event (%p, %u) for deferred execution.\n", 1210 adev, src)); 1211 1212 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1213 if (!hpw) 1214 return AE_NO_MEMORY; 1215 1216 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1217 hpw->adev = adev; 1218 hpw->src = src; 1219 /* 1220 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1221 * the hotplug code may call driver .remove() functions, which may 1222 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1223 * these workqueues. 1224 */ 1225 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1226 kfree(hpw); 1227 return AE_ERROR; 1228 } 1229 return AE_OK; 1230 } 1231 1232 bool acpi_queue_hotplug_work(struct work_struct *work) 1233 { 1234 return queue_work(kacpi_hotplug_wq, work); 1235 } 1236 1237 acpi_status 1238 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1239 { 1240 struct semaphore *sem = NULL; 1241 1242 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1243 if (!sem) 1244 return AE_NO_MEMORY; 1245 1246 sema_init(sem, initial_units); 1247 1248 *handle = (acpi_handle *) sem; 1249 1250 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1251 *handle, initial_units)); 1252 1253 return AE_OK; 1254 } 1255 1256 /* 1257 * TODO: A better way to delete semaphores? Linux doesn't have a 1258 * 'delete_semaphore()' function -- may result in an invalid 1259 * pointer dereference for non-synchronized consumers. Should 1260 * we at least check for blocked threads and signal/cancel them? 1261 */ 1262 1263 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1264 { 1265 struct semaphore *sem = (struct semaphore *)handle; 1266 1267 if (!sem) 1268 return AE_BAD_PARAMETER; 1269 1270 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1271 1272 BUG_ON(!list_empty(&sem->wait_list)); 1273 kfree(sem); 1274 sem = NULL; 1275 1276 return AE_OK; 1277 } 1278 1279 /* 1280 * TODO: Support for units > 1? 1281 */ 1282 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1283 { 1284 acpi_status status = AE_OK; 1285 struct semaphore *sem = (struct semaphore *)handle; 1286 long jiffies; 1287 int ret = 0; 1288 1289 if (!acpi_os_initialized) 1290 return AE_OK; 1291 1292 if (!sem || (units < 1)) 1293 return AE_BAD_PARAMETER; 1294 1295 if (units > 1) 1296 return AE_SUPPORT; 1297 1298 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1299 handle, units, timeout)); 1300 1301 if (timeout == ACPI_WAIT_FOREVER) 1302 jiffies = MAX_SCHEDULE_TIMEOUT; 1303 else 1304 jiffies = msecs_to_jiffies(timeout); 1305 1306 ret = down_timeout(sem, jiffies); 1307 if (ret) 1308 status = AE_TIME; 1309 1310 if (ACPI_FAILURE(status)) { 1311 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1312 "Failed to acquire semaphore[%p|%d|%d], %s", 1313 handle, units, timeout, 1314 acpi_format_exception(status))); 1315 } else { 1316 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1317 "Acquired semaphore[%p|%d|%d]", handle, 1318 units, timeout)); 1319 } 1320 1321 return status; 1322 } 1323 1324 /* 1325 * TODO: Support for units > 1? 1326 */ 1327 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1328 { 1329 struct semaphore *sem = (struct semaphore *)handle; 1330 1331 if (!acpi_os_initialized) 1332 return AE_OK; 1333 1334 if (!sem || (units < 1)) 1335 return AE_BAD_PARAMETER; 1336 1337 if (units > 1) 1338 return AE_SUPPORT; 1339 1340 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1341 units)); 1342 1343 up(sem); 1344 1345 return AE_OK; 1346 } 1347 1348 #ifdef ACPI_FUTURE_USAGE 1349 u32 acpi_os_get_line(char *buffer) 1350 { 1351 1352 #ifdef ENABLE_DEBUGGER 1353 if (acpi_in_debugger) { 1354 u32 chars; 1355 1356 kdb_read(buffer, sizeof(line_buf)); 1357 1358 /* remove the CR kdb includes */ 1359 chars = strlen(buffer) - 1; 1360 buffer[chars] = '\0'; 1361 } 1362 #endif 1363 1364 return 0; 1365 } 1366 #endif /* ACPI_FUTURE_USAGE */ 1367 1368 acpi_status acpi_os_signal(u32 function, void *info) 1369 { 1370 switch (function) { 1371 case ACPI_SIGNAL_FATAL: 1372 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1373 break; 1374 case ACPI_SIGNAL_BREAKPOINT: 1375 /* 1376 * AML Breakpoint 1377 * ACPI spec. says to treat it as a NOP unless 1378 * you are debugging. So if/when we integrate 1379 * AML debugger into the kernel debugger its 1380 * hook will go here. But until then it is 1381 * not useful to print anything on breakpoints. 1382 */ 1383 break; 1384 default: 1385 break; 1386 } 1387 1388 return AE_OK; 1389 } 1390 1391 static int __init acpi_os_name_setup(char *str) 1392 { 1393 char *p = acpi_os_name; 1394 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1395 1396 if (!str || !*str) 1397 return 0; 1398 1399 for (; count-- && *str; str++) { 1400 if (isalnum(*str) || *str == ' ' || *str == ':') 1401 *p++ = *str; 1402 else if (*str == '\'' || *str == '"') 1403 continue; 1404 else 1405 break; 1406 } 1407 *p = 0; 1408 1409 return 1; 1410 1411 } 1412 1413 __setup("acpi_os_name=", acpi_os_name_setup); 1414 1415 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ 1416 #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */ 1417 1418 struct osi_setup_entry { 1419 char string[OSI_STRING_LENGTH_MAX]; 1420 bool enable; 1421 }; 1422 1423 static struct osi_setup_entry 1424 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = { 1425 {"Module Device", true}, 1426 {"Processor Device", true}, 1427 {"3.0 _SCP Extensions", true}, 1428 {"Processor Aggregator Device", true}, 1429 }; 1430 1431 void __init acpi_osi_setup(char *str) 1432 { 1433 struct osi_setup_entry *osi; 1434 bool enable = true; 1435 int i; 1436 1437 if (!acpi_gbl_create_osi_method) 1438 return; 1439 1440 if (str == NULL || *str == '\0') { 1441 printk(KERN_INFO PREFIX "_OSI method disabled\n"); 1442 acpi_gbl_create_osi_method = FALSE; 1443 return; 1444 } 1445 1446 if (*str == '!') { 1447 str++; 1448 if (*str == '\0') { 1449 osi_linux.default_disabling = 1; 1450 return; 1451 } else if (*str == '*') { 1452 acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS); 1453 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1454 osi = &osi_setup_entries[i]; 1455 osi->enable = false; 1456 } 1457 return; 1458 } 1459 enable = false; 1460 } 1461 1462 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1463 osi = &osi_setup_entries[i]; 1464 if (!strcmp(osi->string, str)) { 1465 osi->enable = enable; 1466 break; 1467 } else if (osi->string[0] == '\0') { 1468 osi->enable = enable; 1469 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX); 1470 break; 1471 } 1472 } 1473 } 1474 1475 static void __init set_osi_linux(unsigned int enable) 1476 { 1477 if (osi_linux.enable != enable) 1478 osi_linux.enable = enable; 1479 1480 if (osi_linux.enable) 1481 acpi_osi_setup("Linux"); 1482 else 1483 acpi_osi_setup("!Linux"); 1484 1485 return; 1486 } 1487 1488 static void __init acpi_cmdline_osi_linux(unsigned int enable) 1489 { 1490 osi_linux.cmdline = 1; /* cmdline set the default and override DMI */ 1491 osi_linux.dmi = 0; 1492 set_osi_linux(enable); 1493 1494 return; 1495 } 1496 1497 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) 1498 { 1499 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 1500 1501 if (enable == -1) 1502 return; 1503 1504 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ 1505 set_osi_linux(enable); 1506 1507 return; 1508 } 1509 1510 /* 1511 * Modify the list of "OS Interfaces" reported to BIOS via _OSI 1512 * 1513 * empty string disables _OSI 1514 * string starting with '!' disables that string 1515 * otherwise string is added to list, augmenting built-in strings 1516 */ 1517 static void __init acpi_osi_setup_late(void) 1518 { 1519 struct osi_setup_entry *osi; 1520 char *str; 1521 int i; 1522 acpi_status status; 1523 1524 if (osi_linux.default_disabling) { 1525 status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS); 1526 1527 if (ACPI_SUCCESS(status)) 1528 printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n"); 1529 } 1530 1531 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1532 osi = &osi_setup_entries[i]; 1533 str = osi->string; 1534 1535 if (*str == '\0') 1536 break; 1537 if (osi->enable) { 1538 status = acpi_install_interface(str); 1539 1540 if (ACPI_SUCCESS(status)) 1541 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); 1542 } else { 1543 status = acpi_remove_interface(str); 1544 1545 if (ACPI_SUCCESS(status)) 1546 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); 1547 } 1548 } 1549 } 1550 1551 static int __init osi_setup(char *str) 1552 { 1553 if (str && !strcmp("Linux", str)) 1554 acpi_cmdline_osi_linux(1); 1555 else if (str && !strcmp("!Linux", str)) 1556 acpi_cmdline_osi_linux(0); 1557 else 1558 acpi_osi_setup(str); 1559 1560 return 1; 1561 } 1562 1563 __setup("acpi_osi=", osi_setup); 1564 1565 /* 1566 * Disable the auto-serialization of named objects creation methods. 1567 * 1568 * This feature is enabled by default. It marks the AML control methods 1569 * that contain the opcodes to create named objects as "Serialized". 1570 */ 1571 static int __init acpi_no_auto_serialize_setup(char *str) 1572 { 1573 acpi_gbl_auto_serialize_methods = FALSE; 1574 pr_info("ACPI: auto-serialization disabled\n"); 1575 1576 return 1; 1577 } 1578 1579 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1580 1581 /* Check of resource interference between native drivers and ACPI 1582 * OperationRegions (SystemIO and System Memory only). 1583 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1584 * in arbitrary AML code and can interfere with legacy drivers. 1585 * acpi_enforce_resources= can be set to: 1586 * 1587 * - strict (default) (2) 1588 * -> further driver trying to access the resources will not load 1589 * - lax (1) 1590 * -> further driver trying to access the resources will load, but you 1591 * get a system message that something might go wrong... 1592 * 1593 * - no (0) 1594 * -> ACPI Operation Region resources will not be registered 1595 * 1596 */ 1597 #define ENFORCE_RESOURCES_STRICT 2 1598 #define ENFORCE_RESOURCES_LAX 1 1599 #define ENFORCE_RESOURCES_NO 0 1600 1601 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1602 1603 static int __init acpi_enforce_resources_setup(char *str) 1604 { 1605 if (str == NULL || *str == '\0') 1606 return 0; 1607 1608 if (!strcmp("strict", str)) 1609 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1610 else if (!strcmp("lax", str)) 1611 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1612 else if (!strcmp("no", str)) 1613 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1614 1615 return 1; 1616 } 1617 1618 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1619 1620 /* Check for resource conflicts between ACPI OperationRegions and native 1621 * drivers */ 1622 int acpi_check_resource_conflict(const struct resource *res) 1623 { 1624 acpi_adr_space_type space_id; 1625 acpi_size length; 1626 u8 warn = 0; 1627 int clash = 0; 1628 1629 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1630 return 0; 1631 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1632 return 0; 1633 1634 if (res->flags & IORESOURCE_IO) 1635 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1636 else 1637 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1638 1639 length = resource_size(res); 1640 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1641 warn = 1; 1642 clash = acpi_check_address_range(space_id, res->start, length, warn); 1643 1644 if (clash) { 1645 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1646 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1647 printk(KERN_NOTICE "ACPI: This conflict may" 1648 " cause random problems and system" 1649 " instability\n"); 1650 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1651 " for this device, you should use it instead of" 1652 " the native driver\n"); 1653 } 1654 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1655 return -EBUSY; 1656 } 1657 return 0; 1658 } 1659 EXPORT_SYMBOL(acpi_check_resource_conflict); 1660 1661 int acpi_check_region(resource_size_t start, resource_size_t n, 1662 const char *name) 1663 { 1664 struct resource res = { 1665 .start = start, 1666 .end = start + n - 1, 1667 .name = name, 1668 .flags = IORESOURCE_IO, 1669 }; 1670 1671 return acpi_check_resource_conflict(&res); 1672 } 1673 EXPORT_SYMBOL(acpi_check_region); 1674 1675 /* 1676 * Let drivers know whether the resource checks are effective 1677 */ 1678 int acpi_resources_are_enforced(void) 1679 { 1680 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1681 } 1682 EXPORT_SYMBOL(acpi_resources_are_enforced); 1683 1684 bool acpi_osi_is_win8(void) 1685 { 1686 return acpi_gbl_osi_data >= ACPI_OSI_WIN_8; 1687 } 1688 EXPORT_SYMBOL(acpi_osi_is_win8); 1689 1690 /* 1691 * Deallocate the memory for a spinlock. 1692 */ 1693 void acpi_os_delete_lock(acpi_spinlock handle) 1694 { 1695 ACPI_FREE(handle); 1696 } 1697 1698 /* 1699 * Acquire a spinlock. 1700 * 1701 * handle is a pointer to the spinlock_t. 1702 */ 1703 1704 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1705 { 1706 acpi_cpu_flags flags; 1707 spin_lock_irqsave(lockp, flags); 1708 return flags; 1709 } 1710 1711 /* 1712 * Release a spinlock. See above. 1713 */ 1714 1715 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1716 { 1717 spin_unlock_irqrestore(lockp, flags); 1718 } 1719 1720 #ifndef ACPI_USE_LOCAL_CACHE 1721 1722 /******************************************************************************* 1723 * 1724 * FUNCTION: acpi_os_create_cache 1725 * 1726 * PARAMETERS: name - Ascii name for the cache 1727 * size - Size of each cached object 1728 * depth - Maximum depth of the cache (in objects) <ignored> 1729 * cache - Where the new cache object is returned 1730 * 1731 * RETURN: status 1732 * 1733 * DESCRIPTION: Create a cache object 1734 * 1735 ******************************************************************************/ 1736 1737 acpi_status 1738 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1739 { 1740 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1741 if (*cache == NULL) 1742 return AE_ERROR; 1743 else 1744 return AE_OK; 1745 } 1746 1747 /******************************************************************************* 1748 * 1749 * FUNCTION: acpi_os_purge_cache 1750 * 1751 * PARAMETERS: Cache - Handle to cache object 1752 * 1753 * RETURN: Status 1754 * 1755 * DESCRIPTION: Free all objects within the requested cache. 1756 * 1757 ******************************************************************************/ 1758 1759 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1760 { 1761 kmem_cache_shrink(cache); 1762 return (AE_OK); 1763 } 1764 1765 /******************************************************************************* 1766 * 1767 * FUNCTION: acpi_os_delete_cache 1768 * 1769 * PARAMETERS: Cache - Handle to cache object 1770 * 1771 * RETURN: Status 1772 * 1773 * DESCRIPTION: Free all objects within the requested cache and delete the 1774 * cache object. 1775 * 1776 ******************************************************************************/ 1777 1778 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1779 { 1780 kmem_cache_destroy(cache); 1781 return (AE_OK); 1782 } 1783 1784 /******************************************************************************* 1785 * 1786 * FUNCTION: acpi_os_release_object 1787 * 1788 * PARAMETERS: Cache - Handle to cache object 1789 * Object - The object to be released 1790 * 1791 * RETURN: None 1792 * 1793 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1794 * the object is deleted. 1795 * 1796 ******************************************************************************/ 1797 1798 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1799 { 1800 kmem_cache_free(cache, object); 1801 return (AE_OK); 1802 } 1803 #endif 1804 1805 static int __init acpi_no_static_ssdt_setup(char *s) 1806 { 1807 acpi_gbl_disable_ssdt_table_install = TRUE; 1808 pr_info("ACPI: static SSDT installation disabled\n"); 1809 1810 return 0; 1811 } 1812 1813 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1814 1815 static int __init acpi_disable_return_repair(char *s) 1816 { 1817 printk(KERN_NOTICE PREFIX 1818 "ACPI: Predefined validation mechanism disabled\n"); 1819 acpi_gbl_disable_auto_repair = TRUE; 1820 1821 return 1; 1822 } 1823 1824 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1825 1826 acpi_status __init acpi_os_initialize(void) 1827 { 1828 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1829 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1830 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1831 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1832 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1833 /* 1834 * Use acpi_os_map_generic_address to pre-map the reset 1835 * register if it's in system memory. 1836 */ 1837 int rv; 1838 1839 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1840 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); 1841 } 1842 acpi_os_initialized = true; 1843 1844 return AE_OK; 1845 } 1846 1847 acpi_status __init acpi_os_initialize1(void) 1848 { 1849 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1850 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1851 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1852 BUG_ON(!kacpid_wq); 1853 BUG_ON(!kacpi_notify_wq); 1854 BUG_ON(!kacpi_hotplug_wq); 1855 acpi_install_interface_handler(acpi_osi_handler); 1856 acpi_osi_setup_late(); 1857 return AE_OK; 1858 } 1859 1860 acpi_status acpi_os_terminate(void) 1861 { 1862 if (acpi_irq_handler) { 1863 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1864 acpi_irq_handler); 1865 } 1866 1867 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1868 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1869 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1870 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1871 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1872 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1873 1874 destroy_workqueue(kacpid_wq); 1875 destroy_workqueue(kacpi_notify_wq); 1876 destroy_workqueue(kacpi_hotplug_wq); 1877 1878 return AE_OK; 1879 } 1880 1881 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1882 u32 pm1b_control) 1883 { 1884 int rc = 0; 1885 if (__acpi_os_prepare_sleep) 1886 rc = __acpi_os_prepare_sleep(sleep_state, 1887 pm1a_control, pm1b_control); 1888 if (rc < 0) 1889 return AE_ERROR; 1890 else if (rc > 0) 1891 return AE_CTRL_SKIP; 1892 1893 return AE_OK; 1894 } 1895 1896 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1897 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1898 { 1899 __acpi_os_prepare_sleep = func; 1900 } 1901 1902 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1903 u32 val_b) 1904 { 1905 int rc = 0; 1906 if (__acpi_os_prepare_extended_sleep) 1907 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1908 val_a, val_b); 1909 if (rc < 0) 1910 return AE_ERROR; 1911 else if (rc > 0) 1912 return AE_CTRL_SKIP; 1913 1914 return AE_OK; 1915 } 1916 1917 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1918 u32 val_a, u32 val_b)) 1919 { 1920 __acpi_os_prepare_extended_sleep = func; 1921 } 1922