1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (c) 2008 Intel Corporation 8 * Author: Matthew Wilcox <willy@linux.intel.com> 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 25 * 26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 * 28 */ 29 30 #include <linux/module.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/mm.h> 34 #include <linux/highmem.h> 35 #include <linux/pci.h> 36 #include <linux/interrupt.h> 37 #include <linux/kmod.h> 38 #include <linux/delay.h> 39 #include <linux/workqueue.h> 40 #include <linux/nmi.h> 41 #include <linux/acpi.h> 42 #include <linux/acpi_io.h> 43 #include <linux/efi.h> 44 #include <linux/ioport.h> 45 #include <linux/list.h> 46 #include <linux/jiffies.h> 47 #include <linux/semaphore.h> 48 49 #include <asm/io.h> 50 #include <asm/uaccess.h> 51 52 #include <acpi/acpi.h> 53 #include <acpi/acpi_bus.h> 54 #include <acpi/processor.h> 55 56 #define _COMPONENT ACPI_OS_SERVICES 57 ACPI_MODULE_NAME("osl"); 58 #define PREFIX "ACPI: " 59 struct acpi_os_dpc { 60 acpi_osd_exec_callback function; 61 void *context; 62 struct work_struct work; 63 int wait; 64 }; 65 66 #ifdef CONFIG_ACPI_CUSTOM_DSDT 67 #include CONFIG_ACPI_CUSTOM_DSDT_FILE 68 #endif 69 70 #ifdef ENABLE_DEBUGGER 71 #include <linux/kdb.h> 72 73 /* stuff for debugger support */ 74 int acpi_in_debugger; 75 EXPORT_SYMBOL(acpi_in_debugger); 76 77 extern char line_buf[80]; 78 #endif /*ENABLE_DEBUGGER */ 79 80 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 81 u32 pm1b_ctrl); 82 83 static acpi_osd_handler acpi_irq_handler; 84 static void *acpi_irq_context; 85 static struct workqueue_struct *kacpid_wq; 86 static struct workqueue_struct *kacpi_notify_wq; 87 struct workqueue_struct *kacpi_hotplug_wq; 88 EXPORT_SYMBOL(kacpi_hotplug_wq); 89 90 /* 91 * This list of permanent mappings is for memory that may be accessed from 92 * interrupt context, where we can't do the ioremap(). 93 */ 94 struct acpi_ioremap { 95 struct list_head list; 96 void __iomem *virt; 97 acpi_physical_address phys; 98 acpi_size size; 99 unsigned long refcount; 100 }; 101 102 static LIST_HEAD(acpi_ioremaps); 103 static DEFINE_MUTEX(acpi_ioremap_lock); 104 105 static void __init acpi_osi_setup_late(void); 106 107 /* 108 * The story of _OSI(Linux) 109 * 110 * From pre-history through Linux-2.6.22, 111 * Linux responded TRUE upon a BIOS OSI(Linux) query. 112 * 113 * Unfortunately, reference BIOS writers got wind of this 114 * and put OSI(Linux) in their example code, quickly exposing 115 * this string as ill-conceived and opening the door to 116 * an un-bounded number of BIOS incompatibilities. 117 * 118 * For example, OSI(Linux) was used on resume to re-POST a 119 * video card on one system, because Linux at that time 120 * could not do a speedy restore in its native driver. 121 * But then upon gaining quick native restore capability, 122 * Linux has no way to tell the BIOS to skip the time-consuming 123 * POST -- putting Linux at a permanent performance disadvantage. 124 * On another system, the BIOS writer used OSI(Linux) 125 * to infer native OS support for IPMI! On other systems, 126 * OSI(Linux) simply got in the way of Linux claiming to 127 * be compatible with other operating systems, exposing 128 * BIOS issues such as skipped device initialization. 129 * 130 * So "Linux" turned out to be a really poor chose of 131 * OSI string, and from Linux-2.6.23 onward we respond FALSE. 132 * 133 * BIOS writers should NOT query _OSI(Linux) on future systems. 134 * Linux will complain on the console when it sees it, and return FALSE. 135 * To get Linux to return TRUE for your system will require 136 * a kernel source update to add a DMI entry, 137 * or boot with "acpi_osi=Linux" 138 */ 139 140 static struct osi_linux { 141 unsigned int enable:1; 142 unsigned int dmi:1; 143 unsigned int cmdline:1; 144 } osi_linux = {0, 0, 0}; 145 146 static u32 acpi_osi_handler(acpi_string interface, u32 supported) 147 { 148 if (!strcmp("Linux", interface)) { 149 150 printk_once(KERN_NOTICE FW_BUG PREFIX 151 "BIOS _OSI(Linux) query %s%s\n", 152 osi_linux.enable ? "honored" : "ignored", 153 osi_linux.cmdline ? " via cmdline" : 154 osi_linux.dmi ? " via DMI" : ""); 155 } 156 157 return supported; 158 } 159 160 static void __init acpi_request_region (struct acpi_generic_address *gas, 161 unsigned int length, char *desc) 162 { 163 u64 addr; 164 165 /* Handle possible alignment issues */ 166 memcpy(&addr, &gas->address, sizeof(addr)); 167 if (!addr || !length) 168 return; 169 170 /* Resources are never freed */ 171 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 172 request_region(addr, length, desc); 173 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 174 request_mem_region(addr, length, desc); 175 } 176 177 static int __init acpi_reserve_resources(void) 178 { 179 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 180 "ACPI PM1a_EVT_BLK"); 181 182 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 183 "ACPI PM1b_EVT_BLK"); 184 185 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 186 "ACPI PM1a_CNT_BLK"); 187 188 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 189 "ACPI PM1b_CNT_BLK"); 190 191 if (acpi_gbl_FADT.pm_timer_length == 4) 192 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 193 194 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 195 "ACPI PM2_CNT_BLK"); 196 197 /* Length of GPE blocks must be a non-negative multiple of 2 */ 198 199 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 200 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 201 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 202 203 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 204 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 205 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 206 207 return 0; 208 } 209 device_initcall(acpi_reserve_resources); 210 211 void acpi_os_printf(const char *fmt, ...) 212 { 213 va_list args; 214 va_start(args, fmt); 215 acpi_os_vprintf(fmt, args); 216 va_end(args); 217 } 218 219 void acpi_os_vprintf(const char *fmt, va_list args) 220 { 221 static char buffer[512]; 222 223 vsprintf(buffer, fmt, args); 224 225 #ifdef ENABLE_DEBUGGER 226 if (acpi_in_debugger) { 227 kdb_printf("%s", buffer); 228 } else { 229 printk(KERN_CONT "%s", buffer); 230 } 231 #else 232 printk(KERN_CONT "%s", buffer); 233 #endif 234 } 235 236 #ifdef CONFIG_KEXEC 237 static unsigned long acpi_rsdp; 238 static int __init setup_acpi_rsdp(char *arg) 239 { 240 acpi_rsdp = simple_strtoul(arg, NULL, 16); 241 return 0; 242 } 243 early_param("acpi_rsdp", setup_acpi_rsdp); 244 #endif 245 246 acpi_physical_address __init acpi_os_get_root_pointer(void) 247 { 248 #ifdef CONFIG_KEXEC 249 if (acpi_rsdp) 250 return acpi_rsdp; 251 #endif 252 253 if (efi_enabled) { 254 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 255 return efi.acpi20; 256 else if (efi.acpi != EFI_INVALID_TABLE_ADDR) 257 return efi.acpi; 258 else { 259 printk(KERN_ERR PREFIX 260 "System description tables not found\n"); 261 return 0; 262 } 263 } else { 264 acpi_physical_address pa = 0; 265 266 acpi_find_root_pointer(&pa); 267 return pa; 268 } 269 } 270 271 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 272 static struct acpi_ioremap * 273 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 274 { 275 struct acpi_ioremap *map; 276 277 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 278 if (map->phys <= phys && 279 phys + size <= map->phys + map->size) 280 return map; 281 282 return NULL; 283 } 284 285 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 286 static void __iomem * 287 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 288 { 289 struct acpi_ioremap *map; 290 291 map = acpi_map_lookup(phys, size); 292 if (map) 293 return map->virt + (phys - map->phys); 294 295 return NULL; 296 } 297 298 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 299 { 300 struct acpi_ioremap *map; 301 void __iomem *virt = NULL; 302 303 mutex_lock(&acpi_ioremap_lock); 304 map = acpi_map_lookup(phys, size); 305 if (map) { 306 virt = map->virt + (phys - map->phys); 307 map->refcount++; 308 } 309 mutex_unlock(&acpi_ioremap_lock); 310 return virt; 311 } 312 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 313 314 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 315 static struct acpi_ioremap * 316 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 317 { 318 struct acpi_ioremap *map; 319 320 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 321 if (map->virt <= virt && 322 virt + size <= map->virt + map->size) 323 return map; 324 325 return NULL; 326 } 327 328 #ifndef CONFIG_IA64 329 #define should_use_kmap(pfn) page_is_ram(pfn) 330 #else 331 /* ioremap will take care of cache attributes */ 332 #define should_use_kmap(pfn) 0 333 #endif 334 335 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 336 { 337 unsigned long pfn; 338 339 pfn = pg_off >> PAGE_SHIFT; 340 if (should_use_kmap(pfn)) { 341 if (pg_sz > PAGE_SIZE) 342 return NULL; 343 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 344 } else 345 return acpi_os_ioremap(pg_off, pg_sz); 346 } 347 348 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 349 { 350 unsigned long pfn; 351 352 pfn = pg_off >> PAGE_SHIFT; 353 if (should_use_kmap(pfn)) 354 kunmap(pfn_to_page(pfn)); 355 else 356 iounmap(vaddr); 357 } 358 359 void __iomem *__init_refok 360 acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 361 { 362 struct acpi_ioremap *map; 363 void __iomem *virt; 364 acpi_physical_address pg_off; 365 acpi_size pg_sz; 366 367 if (phys > ULONG_MAX) { 368 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 369 return NULL; 370 } 371 372 if (!acpi_gbl_permanent_mmap) 373 return __acpi_map_table((unsigned long)phys, size); 374 375 mutex_lock(&acpi_ioremap_lock); 376 /* Check if there's a suitable mapping already. */ 377 map = acpi_map_lookup(phys, size); 378 if (map) { 379 map->refcount++; 380 goto out; 381 } 382 383 map = kzalloc(sizeof(*map), GFP_KERNEL); 384 if (!map) { 385 mutex_unlock(&acpi_ioremap_lock); 386 return NULL; 387 } 388 389 pg_off = round_down(phys, PAGE_SIZE); 390 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 391 virt = acpi_map(pg_off, pg_sz); 392 if (!virt) { 393 mutex_unlock(&acpi_ioremap_lock); 394 kfree(map); 395 return NULL; 396 } 397 398 INIT_LIST_HEAD(&map->list); 399 map->virt = virt; 400 map->phys = pg_off; 401 map->size = pg_sz; 402 map->refcount = 1; 403 404 list_add_tail_rcu(&map->list, &acpi_ioremaps); 405 406 out: 407 mutex_unlock(&acpi_ioremap_lock); 408 return map->virt + (phys - map->phys); 409 } 410 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 411 412 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 413 { 414 if (!--map->refcount) 415 list_del_rcu(&map->list); 416 } 417 418 static void acpi_os_map_cleanup(struct acpi_ioremap *map) 419 { 420 if (!map->refcount) { 421 synchronize_rcu(); 422 acpi_unmap(map->phys, map->virt); 423 kfree(map); 424 } 425 } 426 427 void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) 428 { 429 struct acpi_ioremap *map; 430 431 if (!acpi_gbl_permanent_mmap) { 432 __acpi_unmap_table(virt, size); 433 return; 434 } 435 436 mutex_lock(&acpi_ioremap_lock); 437 map = acpi_map_lookup_virt(virt, size); 438 if (!map) { 439 mutex_unlock(&acpi_ioremap_lock); 440 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 441 return; 442 } 443 acpi_os_drop_map_ref(map); 444 mutex_unlock(&acpi_ioremap_lock); 445 446 acpi_os_map_cleanup(map); 447 } 448 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 449 450 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) 451 { 452 if (!acpi_gbl_permanent_mmap) 453 __acpi_unmap_table(virt, size); 454 } 455 456 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 457 { 458 u64 addr; 459 void __iomem *virt; 460 461 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 462 return 0; 463 464 /* Handle possible alignment issues */ 465 memcpy(&addr, &gas->address, sizeof(addr)); 466 if (!addr || !gas->bit_width) 467 return -EINVAL; 468 469 virt = acpi_os_map_memory(addr, gas->bit_width / 8); 470 if (!virt) 471 return -EIO; 472 473 return 0; 474 } 475 EXPORT_SYMBOL(acpi_os_map_generic_address); 476 477 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 478 { 479 u64 addr; 480 struct acpi_ioremap *map; 481 482 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 483 return; 484 485 /* Handle possible alignment issues */ 486 memcpy(&addr, &gas->address, sizeof(addr)); 487 if (!addr || !gas->bit_width) 488 return; 489 490 mutex_lock(&acpi_ioremap_lock); 491 map = acpi_map_lookup(addr, gas->bit_width / 8); 492 if (!map) { 493 mutex_unlock(&acpi_ioremap_lock); 494 return; 495 } 496 acpi_os_drop_map_ref(map); 497 mutex_unlock(&acpi_ioremap_lock); 498 499 acpi_os_map_cleanup(map); 500 } 501 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 502 503 #ifdef ACPI_FUTURE_USAGE 504 acpi_status 505 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 506 { 507 if (!phys || !virt) 508 return AE_BAD_PARAMETER; 509 510 *phys = virt_to_phys(virt); 511 512 return AE_OK; 513 } 514 #endif 515 516 #define ACPI_MAX_OVERRIDE_LEN 100 517 518 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 519 520 acpi_status 521 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 522 acpi_string * new_val) 523 { 524 if (!init_val || !new_val) 525 return AE_BAD_PARAMETER; 526 527 *new_val = NULL; 528 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 529 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 530 acpi_os_name); 531 *new_val = acpi_os_name; 532 } 533 534 return AE_OK; 535 } 536 537 acpi_status 538 acpi_os_table_override(struct acpi_table_header * existing_table, 539 struct acpi_table_header ** new_table) 540 { 541 if (!existing_table || !new_table) 542 return AE_BAD_PARAMETER; 543 544 *new_table = NULL; 545 546 #ifdef CONFIG_ACPI_CUSTOM_DSDT 547 if (strncmp(existing_table->signature, "DSDT", 4) == 0) 548 *new_table = (struct acpi_table_header *)AmlCode; 549 #endif 550 if (*new_table != NULL) { 551 printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], " 552 "this is unsafe: tainting kernel\n", 553 existing_table->signature, 554 existing_table->oem_table_id); 555 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); 556 } 557 return AE_OK; 558 } 559 560 acpi_status 561 acpi_os_physical_table_override(struct acpi_table_header *existing_table, 562 acpi_physical_address * new_address, 563 u32 *new_table_length) 564 { 565 return AE_SUPPORT; 566 } 567 568 569 static irqreturn_t acpi_irq(int irq, void *dev_id) 570 { 571 u32 handled; 572 573 handled = (*acpi_irq_handler) (acpi_irq_context); 574 575 if (handled) { 576 acpi_irq_handled++; 577 return IRQ_HANDLED; 578 } else { 579 acpi_irq_not_handled++; 580 return IRQ_NONE; 581 } 582 } 583 584 acpi_status 585 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 586 void *context) 587 { 588 unsigned int irq; 589 590 acpi_irq_stats_init(); 591 592 /* 593 * ACPI interrupts different from the SCI in our copy of the FADT are 594 * not supported. 595 */ 596 if (gsi != acpi_gbl_FADT.sci_interrupt) 597 return AE_BAD_PARAMETER; 598 599 if (acpi_irq_handler) 600 return AE_ALREADY_ACQUIRED; 601 602 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 603 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 604 gsi); 605 return AE_OK; 606 } 607 608 acpi_irq_handler = handler; 609 acpi_irq_context = context; 610 if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED, "acpi", 611 acpi_irq)) { 612 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 613 acpi_irq_handler = NULL; 614 return AE_NOT_ACQUIRED; 615 } 616 617 return AE_OK; 618 } 619 620 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler) 621 { 622 if (irq != acpi_gbl_FADT.sci_interrupt) 623 return AE_BAD_PARAMETER; 624 625 free_irq(irq, acpi_irq); 626 acpi_irq_handler = NULL; 627 628 return AE_OK; 629 } 630 631 /* 632 * Running in interpreter thread context, safe to sleep 633 */ 634 635 void acpi_os_sleep(u64 ms) 636 { 637 schedule_timeout_interruptible(msecs_to_jiffies(ms)); 638 } 639 640 void acpi_os_stall(u32 us) 641 { 642 while (us) { 643 u32 delay = 1000; 644 645 if (delay > us) 646 delay = us; 647 udelay(delay); 648 touch_nmi_watchdog(); 649 us -= delay; 650 } 651 } 652 653 /* 654 * Support ACPI 3.0 AML Timer operand 655 * Returns 64-bit free-running, monotonically increasing timer 656 * with 100ns granularity 657 */ 658 u64 acpi_os_get_timer(void) 659 { 660 static u64 t; 661 662 #ifdef CONFIG_HPET 663 /* TBD: use HPET if available */ 664 #endif 665 666 #ifdef CONFIG_X86_PM_TIMER 667 /* TBD: default to PM timer if HPET was not available */ 668 #endif 669 if (!t) 670 printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n"); 671 672 return ++t; 673 } 674 675 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 676 { 677 u32 dummy; 678 679 if (!value) 680 value = &dummy; 681 682 *value = 0; 683 if (width <= 8) { 684 *(u8 *) value = inb(port); 685 } else if (width <= 16) { 686 *(u16 *) value = inw(port); 687 } else if (width <= 32) { 688 *(u32 *) value = inl(port); 689 } else { 690 BUG(); 691 } 692 693 return AE_OK; 694 } 695 696 EXPORT_SYMBOL(acpi_os_read_port); 697 698 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 699 { 700 if (width <= 8) { 701 outb(value, port); 702 } else if (width <= 16) { 703 outw(value, port); 704 } else if (width <= 32) { 705 outl(value, port); 706 } else { 707 BUG(); 708 } 709 710 return AE_OK; 711 } 712 713 EXPORT_SYMBOL(acpi_os_write_port); 714 715 #ifdef readq 716 static inline u64 read64(const volatile void __iomem *addr) 717 { 718 return readq(addr); 719 } 720 #else 721 static inline u64 read64(const volatile void __iomem *addr) 722 { 723 u64 l, h; 724 l = readl(addr); 725 h = readl(addr+4); 726 return l | (h << 32); 727 } 728 #endif 729 730 acpi_status 731 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 732 { 733 void __iomem *virt_addr; 734 unsigned int size = width / 8; 735 bool unmap = false; 736 u64 dummy; 737 738 rcu_read_lock(); 739 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 740 if (!virt_addr) { 741 rcu_read_unlock(); 742 virt_addr = acpi_os_ioremap(phys_addr, size); 743 if (!virt_addr) 744 return AE_BAD_ADDRESS; 745 unmap = true; 746 } 747 748 if (!value) 749 value = &dummy; 750 751 switch (width) { 752 case 8: 753 *(u8 *) value = readb(virt_addr); 754 break; 755 case 16: 756 *(u16 *) value = readw(virt_addr); 757 break; 758 case 32: 759 *(u32 *) value = readl(virt_addr); 760 break; 761 case 64: 762 *(u64 *) value = read64(virt_addr); 763 break; 764 default: 765 BUG(); 766 } 767 768 if (unmap) 769 iounmap(virt_addr); 770 else 771 rcu_read_unlock(); 772 773 return AE_OK; 774 } 775 776 #ifdef writeq 777 static inline void write64(u64 val, volatile void __iomem *addr) 778 { 779 writeq(val, addr); 780 } 781 #else 782 static inline void write64(u64 val, volatile void __iomem *addr) 783 { 784 writel(val, addr); 785 writel(val>>32, addr+4); 786 } 787 #endif 788 789 acpi_status 790 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 791 { 792 void __iomem *virt_addr; 793 unsigned int size = width / 8; 794 bool unmap = false; 795 796 rcu_read_lock(); 797 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 798 if (!virt_addr) { 799 rcu_read_unlock(); 800 virt_addr = acpi_os_ioremap(phys_addr, size); 801 if (!virt_addr) 802 return AE_BAD_ADDRESS; 803 unmap = true; 804 } 805 806 switch (width) { 807 case 8: 808 writeb(value, virt_addr); 809 break; 810 case 16: 811 writew(value, virt_addr); 812 break; 813 case 32: 814 writel(value, virt_addr); 815 break; 816 case 64: 817 write64(value, virt_addr); 818 break; 819 default: 820 BUG(); 821 } 822 823 if (unmap) 824 iounmap(virt_addr); 825 else 826 rcu_read_unlock(); 827 828 return AE_OK; 829 } 830 831 acpi_status 832 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 833 u64 *value, u32 width) 834 { 835 int result, size; 836 u32 value32; 837 838 if (!value) 839 return AE_BAD_PARAMETER; 840 841 switch (width) { 842 case 8: 843 size = 1; 844 break; 845 case 16: 846 size = 2; 847 break; 848 case 32: 849 size = 4; 850 break; 851 default: 852 return AE_ERROR; 853 } 854 855 result = raw_pci_read(pci_id->segment, pci_id->bus, 856 PCI_DEVFN(pci_id->device, pci_id->function), 857 reg, size, &value32); 858 *value = value32; 859 860 return (result ? AE_ERROR : AE_OK); 861 } 862 863 acpi_status 864 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 865 u64 value, u32 width) 866 { 867 int result, size; 868 869 switch (width) { 870 case 8: 871 size = 1; 872 break; 873 case 16: 874 size = 2; 875 break; 876 case 32: 877 size = 4; 878 break; 879 default: 880 return AE_ERROR; 881 } 882 883 result = raw_pci_write(pci_id->segment, pci_id->bus, 884 PCI_DEVFN(pci_id->device, pci_id->function), 885 reg, size, value); 886 887 return (result ? AE_ERROR : AE_OK); 888 } 889 890 static void acpi_os_execute_deferred(struct work_struct *work) 891 { 892 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 893 894 if (dpc->wait) 895 acpi_os_wait_events_complete(NULL); 896 897 dpc->function(dpc->context); 898 kfree(dpc); 899 } 900 901 /******************************************************************************* 902 * 903 * FUNCTION: acpi_os_execute 904 * 905 * PARAMETERS: Type - Type of the callback 906 * Function - Function to be executed 907 * Context - Function parameters 908 * 909 * RETURN: Status 910 * 911 * DESCRIPTION: Depending on type, either queues function for deferred execution or 912 * immediately executes function on a separate thread. 913 * 914 ******************************************************************************/ 915 916 static acpi_status __acpi_os_execute(acpi_execute_type type, 917 acpi_osd_exec_callback function, void *context, int hp) 918 { 919 acpi_status status = AE_OK; 920 struct acpi_os_dpc *dpc; 921 struct workqueue_struct *queue; 922 int ret; 923 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 924 "Scheduling function [%p(%p)] for deferred execution.\n", 925 function, context)); 926 927 /* 928 * Allocate/initialize DPC structure. Note that this memory will be 929 * freed by the callee. The kernel handles the work_struct list in a 930 * way that allows us to also free its memory inside the callee. 931 * Because we may want to schedule several tasks with different 932 * parameters we can't use the approach some kernel code uses of 933 * having a static work_struct. 934 */ 935 936 dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 937 if (!dpc) 938 return AE_NO_MEMORY; 939 940 dpc->function = function; 941 dpc->context = context; 942 943 /* 944 * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq 945 * because the hotplug code may call driver .remove() functions, 946 * which invoke flush_scheduled_work/acpi_os_wait_events_complete 947 * to flush these workqueues. 948 */ 949 queue = hp ? kacpi_hotplug_wq : 950 (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); 951 dpc->wait = hp ? 1 : 0; 952 953 if (queue == kacpi_hotplug_wq) 954 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 955 else if (queue == kacpi_notify_wq) 956 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 957 else 958 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 959 960 /* 961 * On some machines, a software-initiated SMI causes corruption unless 962 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 963 * typically it's done in GPE-related methods that are run via 964 * workqueues, so we can avoid the known corruption cases by always 965 * queueing on CPU 0. 966 */ 967 ret = queue_work_on(0, queue, &dpc->work); 968 969 if (!ret) { 970 printk(KERN_ERR PREFIX 971 "Call to queue_work() failed.\n"); 972 status = AE_ERROR; 973 kfree(dpc); 974 } 975 return status; 976 } 977 978 acpi_status acpi_os_execute(acpi_execute_type type, 979 acpi_osd_exec_callback function, void *context) 980 { 981 return __acpi_os_execute(type, function, context, 0); 982 } 983 EXPORT_SYMBOL(acpi_os_execute); 984 985 acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function, 986 void *context) 987 { 988 return __acpi_os_execute(0, function, context, 1); 989 } 990 991 void acpi_os_wait_events_complete(void *context) 992 { 993 flush_workqueue(kacpid_wq); 994 flush_workqueue(kacpi_notify_wq); 995 } 996 997 EXPORT_SYMBOL(acpi_os_wait_events_complete); 998 999 acpi_status 1000 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1001 { 1002 struct semaphore *sem = NULL; 1003 1004 sem = acpi_os_allocate(sizeof(struct semaphore)); 1005 if (!sem) 1006 return AE_NO_MEMORY; 1007 memset(sem, 0, sizeof(struct semaphore)); 1008 1009 sema_init(sem, initial_units); 1010 1011 *handle = (acpi_handle *) sem; 1012 1013 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1014 *handle, initial_units)); 1015 1016 return AE_OK; 1017 } 1018 1019 /* 1020 * TODO: A better way to delete semaphores? Linux doesn't have a 1021 * 'delete_semaphore()' function -- may result in an invalid 1022 * pointer dereference for non-synchronized consumers. Should 1023 * we at least check for blocked threads and signal/cancel them? 1024 */ 1025 1026 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1027 { 1028 struct semaphore *sem = (struct semaphore *)handle; 1029 1030 if (!sem) 1031 return AE_BAD_PARAMETER; 1032 1033 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1034 1035 BUG_ON(!list_empty(&sem->wait_list)); 1036 kfree(sem); 1037 sem = NULL; 1038 1039 return AE_OK; 1040 } 1041 1042 /* 1043 * TODO: Support for units > 1? 1044 */ 1045 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1046 { 1047 acpi_status status = AE_OK; 1048 struct semaphore *sem = (struct semaphore *)handle; 1049 long jiffies; 1050 int ret = 0; 1051 1052 if (!sem || (units < 1)) 1053 return AE_BAD_PARAMETER; 1054 1055 if (units > 1) 1056 return AE_SUPPORT; 1057 1058 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1059 handle, units, timeout)); 1060 1061 if (timeout == ACPI_WAIT_FOREVER) 1062 jiffies = MAX_SCHEDULE_TIMEOUT; 1063 else 1064 jiffies = msecs_to_jiffies(timeout); 1065 1066 ret = down_timeout(sem, jiffies); 1067 if (ret) 1068 status = AE_TIME; 1069 1070 if (ACPI_FAILURE(status)) { 1071 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1072 "Failed to acquire semaphore[%p|%d|%d], %s", 1073 handle, units, timeout, 1074 acpi_format_exception(status))); 1075 } else { 1076 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1077 "Acquired semaphore[%p|%d|%d]", handle, 1078 units, timeout)); 1079 } 1080 1081 return status; 1082 } 1083 1084 /* 1085 * TODO: Support for units > 1? 1086 */ 1087 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1088 { 1089 struct semaphore *sem = (struct semaphore *)handle; 1090 1091 if (!sem || (units < 1)) 1092 return AE_BAD_PARAMETER; 1093 1094 if (units > 1) 1095 return AE_SUPPORT; 1096 1097 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1098 units)); 1099 1100 up(sem); 1101 1102 return AE_OK; 1103 } 1104 1105 #ifdef ACPI_FUTURE_USAGE 1106 u32 acpi_os_get_line(char *buffer) 1107 { 1108 1109 #ifdef ENABLE_DEBUGGER 1110 if (acpi_in_debugger) { 1111 u32 chars; 1112 1113 kdb_read(buffer, sizeof(line_buf)); 1114 1115 /* remove the CR kdb includes */ 1116 chars = strlen(buffer) - 1; 1117 buffer[chars] = '\0'; 1118 } 1119 #endif 1120 1121 return 0; 1122 } 1123 #endif /* ACPI_FUTURE_USAGE */ 1124 1125 acpi_status acpi_os_signal(u32 function, void *info) 1126 { 1127 switch (function) { 1128 case ACPI_SIGNAL_FATAL: 1129 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1130 break; 1131 case ACPI_SIGNAL_BREAKPOINT: 1132 /* 1133 * AML Breakpoint 1134 * ACPI spec. says to treat it as a NOP unless 1135 * you are debugging. So if/when we integrate 1136 * AML debugger into the kernel debugger its 1137 * hook will go here. But until then it is 1138 * not useful to print anything on breakpoints. 1139 */ 1140 break; 1141 default: 1142 break; 1143 } 1144 1145 return AE_OK; 1146 } 1147 1148 static int __init acpi_os_name_setup(char *str) 1149 { 1150 char *p = acpi_os_name; 1151 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1152 1153 if (!str || !*str) 1154 return 0; 1155 1156 for (; count-- && str && *str; str++) { 1157 if (isalnum(*str) || *str == ' ' || *str == ':') 1158 *p++ = *str; 1159 else if (*str == '\'' || *str == '"') 1160 continue; 1161 else 1162 break; 1163 } 1164 *p = 0; 1165 1166 return 1; 1167 1168 } 1169 1170 __setup("acpi_os_name=", acpi_os_name_setup); 1171 1172 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ 1173 #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */ 1174 1175 struct osi_setup_entry { 1176 char string[OSI_STRING_LENGTH_MAX]; 1177 bool enable; 1178 }; 1179 1180 static struct osi_setup_entry __initdata 1181 osi_setup_entries[OSI_STRING_ENTRIES_MAX] = { 1182 {"Module Device", true}, 1183 {"Processor Device", true}, 1184 {"3.0 _SCP Extensions", true}, 1185 {"Processor Aggregator Device", true}, 1186 }; 1187 1188 void __init acpi_osi_setup(char *str) 1189 { 1190 struct osi_setup_entry *osi; 1191 bool enable = true; 1192 int i; 1193 1194 if (!acpi_gbl_create_osi_method) 1195 return; 1196 1197 if (str == NULL || *str == '\0') { 1198 printk(KERN_INFO PREFIX "_OSI method disabled\n"); 1199 acpi_gbl_create_osi_method = FALSE; 1200 return; 1201 } 1202 1203 if (*str == '!') { 1204 str++; 1205 enable = false; 1206 } 1207 1208 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1209 osi = &osi_setup_entries[i]; 1210 if (!strcmp(osi->string, str)) { 1211 osi->enable = enable; 1212 break; 1213 } else if (osi->string[0] == '\0') { 1214 osi->enable = enable; 1215 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX); 1216 break; 1217 } 1218 } 1219 } 1220 1221 static void __init set_osi_linux(unsigned int enable) 1222 { 1223 if (osi_linux.enable != enable) 1224 osi_linux.enable = enable; 1225 1226 if (osi_linux.enable) 1227 acpi_osi_setup("Linux"); 1228 else 1229 acpi_osi_setup("!Linux"); 1230 1231 return; 1232 } 1233 1234 static void __init acpi_cmdline_osi_linux(unsigned int enable) 1235 { 1236 osi_linux.cmdline = 1; /* cmdline set the default and override DMI */ 1237 osi_linux.dmi = 0; 1238 set_osi_linux(enable); 1239 1240 return; 1241 } 1242 1243 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) 1244 { 1245 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); 1246 1247 if (enable == -1) 1248 return; 1249 1250 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ 1251 set_osi_linux(enable); 1252 1253 return; 1254 } 1255 1256 /* 1257 * Modify the list of "OS Interfaces" reported to BIOS via _OSI 1258 * 1259 * empty string disables _OSI 1260 * string starting with '!' disables that string 1261 * otherwise string is added to list, augmenting built-in strings 1262 */ 1263 static void __init acpi_osi_setup_late(void) 1264 { 1265 struct osi_setup_entry *osi; 1266 char *str; 1267 int i; 1268 acpi_status status; 1269 1270 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { 1271 osi = &osi_setup_entries[i]; 1272 str = osi->string; 1273 1274 if (*str == '\0') 1275 break; 1276 if (osi->enable) { 1277 status = acpi_install_interface(str); 1278 1279 if (ACPI_SUCCESS(status)) 1280 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); 1281 } else { 1282 status = acpi_remove_interface(str); 1283 1284 if (ACPI_SUCCESS(status)) 1285 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); 1286 } 1287 } 1288 } 1289 1290 static int __init osi_setup(char *str) 1291 { 1292 if (str && !strcmp("Linux", str)) 1293 acpi_cmdline_osi_linux(1); 1294 else if (str && !strcmp("!Linux", str)) 1295 acpi_cmdline_osi_linux(0); 1296 else 1297 acpi_osi_setup(str); 1298 1299 return 1; 1300 } 1301 1302 __setup("acpi_osi=", osi_setup); 1303 1304 /* enable serialization to combat AE_ALREADY_EXISTS errors */ 1305 static int __init acpi_serialize_setup(char *str) 1306 { 1307 printk(KERN_INFO PREFIX "serialize enabled\n"); 1308 1309 acpi_gbl_all_methods_serialized = TRUE; 1310 1311 return 1; 1312 } 1313 1314 __setup("acpi_serialize", acpi_serialize_setup); 1315 1316 /* Check of resource interference between native drivers and ACPI 1317 * OperationRegions (SystemIO and System Memory only). 1318 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1319 * in arbitrary AML code and can interfere with legacy drivers. 1320 * acpi_enforce_resources= can be set to: 1321 * 1322 * - strict (default) (2) 1323 * -> further driver trying to access the resources will not load 1324 * - lax (1) 1325 * -> further driver trying to access the resources will load, but you 1326 * get a system message that something might go wrong... 1327 * 1328 * - no (0) 1329 * -> ACPI Operation Region resources will not be registered 1330 * 1331 */ 1332 #define ENFORCE_RESOURCES_STRICT 2 1333 #define ENFORCE_RESOURCES_LAX 1 1334 #define ENFORCE_RESOURCES_NO 0 1335 1336 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1337 1338 static int __init acpi_enforce_resources_setup(char *str) 1339 { 1340 if (str == NULL || *str == '\0') 1341 return 0; 1342 1343 if (!strcmp("strict", str)) 1344 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1345 else if (!strcmp("lax", str)) 1346 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1347 else if (!strcmp("no", str)) 1348 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1349 1350 return 1; 1351 } 1352 1353 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1354 1355 /* Check for resource conflicts between ACPI OperationRegions and native 1356 * drivers */ 1357 int acpi_check_resource_conflict(const struct resource *res) 1358 { 1359 acpi_adr_space_type space_id; 1360 acpi_size length; 1361 u8 warn = 0; 1362 int clash = 0; 1363 1364 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1365 return 0; 1366 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1367 return 0; 1368 1369 if (res->flags & IORESOURCE_IO) 1370 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1371 else 1372 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1373 1374 length = res->end - res->start + 1; 1375 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1376 warn = 1; 1377 clash = acpi_check_address_range(space_id, res->start, length, warn); 1378 1379 if (clash) { 1380 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1381 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1382 printk(KERN_NOTICE "ACPI: This conflict may" 1383 " cause random problems and system" 1384 " instability\n"); 1385 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1386 " for this device, you should use it instead of" 1387 " the native driver\n"); 1388 } 1389 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1390 return -EBUSY; 1391 } 1392 return 0; 1393 } 1394 EXPORT_SYMBOL(acpi_check_resource_conflict); 1395 1396 int acpi_check_region(resource_size_t start, resource_size_t n, 1397 const char *name) 1398 { 1399 struct resource res = { 1400 .start = start, 1401 .end = start + n - 1, 1402 .name = name, 1403 .flags = IORESOURCE_IO, 1404 }; 1405 1406 return acpi_check_resource_conflict(&res); 1407 } 1408 EXPORT_SYMBOL(acpi_check_region); 1409 1410 /* 1411 * Let drivers know whether the resource checks are effective 1412 */ 1413 int acpi_resources_are_enforced(void) 1414 { 1415 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1416 } 1417 EXPORT_SYMBOL(acpi_resources_are_enforced); 1418 1419 /* 1420 * Deallocate the memory for a spinlock. 1421 */ 1422 void acpi_os_delete_lock(acpi_spinlock handle) 1423 { 1424 ACPI_FREE(handle); 1425 } 1426 1427 /* 1428 * Acquire a spinlock. 1429 * 1430 * handle is a pointer to the spinlock_t. 1431 */ 1432 1433 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1434 { 1435 acpi_cpu_flags flags; 1436 spin_lock_irqsave(lockp, flags); 1437 return flags; 1438 } 1439 1440 /* 1441 * Release a spinlock. See above. 1442 */ 1443 1444 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1445 { 1446 spin_unlock_irqrestore(lockp, flags); 1447 } 1448 1449 #ifndef ACPI_USE_LOCAL_CACHE 1450 1451 /******************************************************************************* 1452 * 1453 * FUNCTION: acpi_os_create_cache 1454 * 1455 * PARAMETERS: name - Ascii name for the cache 1456 * size - Size of each cached object 1457 * depth - Maximum depth of the cache (in objects) <ignored> 1458 * cache - Where the new cache object is returned 1459 * 1460 * RETURN: status 1461 * 1462 * DESCRIPTION: Create a cache object 1463 * 1464 ******************************************************************************/ 1465 1466 acpi_status 1467 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1468 { 1469 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1470 if (*cache == NULL) 1471 return AE_ERROR; 1472 else 1473 return AE_OK; 1474 } 1475 1476 /******************************************************************************* 1477 * 1478 * FUNCTION: acpi_os_purge_cache 1479 * 1480 * PARAMETERS: Cache - Handle to cache object 1481 * 1482 * RETURN: Status 1483 * 1484 * DESCRIPTION: Free all objects within the requested cache. 1485 * 1486 ******************************************************************************/ 1487 1488 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1489 { 1490 kmem_cache_shrink(cache); 1491 return (AE_OK); 1492 } 1493 1494 /******************************************************************************* 1495 * 1496 * FUNCTION: acpi_os_delete_cache 1497 * 1498 * PARAMETERS: Cache - Handle to cache object 1499 * 1500 * RETURN: Status 1501 * 1502 * DESCRIPTION: Free all objects within the requested cache and delete the 1503 * cache object. 1504 * 1505 ******************************************************************************/ 1506 1507 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1508 { 1509 kmem_cache_destroy(cache); 1510 return (AE_OK); 1511 } 1512 1513 /******************************************************************************* 1514 * 1515 * FUNCTION: acpi_os_release_object 1516 * 1517 * PARAMETERS: Cache - Handle to cache object 1518 * Object - The object to be released 1519 * 1520 * RETURN: None 1521 * 1522 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1523 * the object is deleted. 1524 * 1525 ******************************************************************************/ 1526 1527 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1528 { 1529 kmem_cache_free(cache, object); 1530 return (AE_OK); 1531 } 1532 #endif 1533 1534 acpi_status __init acpi_os_initialize(void) 1535 { 1536 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1537 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1538 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1539 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1540 1541 return AE_OK; 1542 } 1543 1544 acpi_status __init acpi_os_initialize1(void) 1545 { 1546 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1547 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1548 kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1); 1549 BUG_ON(!kacpid_wq); 1550 BUG_ON(!kacpi_notify_wq); 1551 BUG_ON(!kacpi_hotplug_wq); 1552 acpi_install_interface_handler(acpi_osi_handler); 1553 acpi_osi_setup_late(); 1554 return AE_OK; 1555 } 1556 1557 acpi_status acpi_os_terminate(void) 1558 { 1559 if (acpi_irq_handler) { 1560 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1561 acpi_irq_handler); 1562 } 1563 1564 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1565 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1566 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1567 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1568 1569 destroy_workqueue(kacpid_wq); 1570 destroy_workqueue(kacpi_notify_wq); 1571 destroy_workqueue(kacpi_hotplug_wq); 1572 1573 return AE_OK; 1574 } 1575 1576 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1577 u32 pm1b_control) 1578 { 1579 int rc = 0; 1580 if (__acpi_os_prepare_sleep) 1581 rc = __acpi_os_prepare_sleep(sleep_state, 1582 pm1a_control, pm1b_control); 1583 if (rc < 0) 1584 return AE_ERROR; 1585 else if (rc > 0) 1586 return AE_CTRL_SKIP; 1587 1588 return AE_OK; 1589 } 1590 1591 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1592 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1593 { 1594 __acpi_os_prepare_sleep = func; 1595 } 1596