1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (c) 2008 Intel Corporation 8 * Author: Matthew Wilcox <willy@linux.intel.com> 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 */ 25 26 #include <linux/module.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/mm.h> 30 #include <linux/highmem.h> 31 #include <linux/pci.h> 32 #include <linux/interrupt.h> 33 #include <linux/kmod.h> 34 #include <linux/delay.h> 35 #include <linux/workqueue.h> 36 #include <linux/nmi.h> 37 #include <linux/acpi.h> 38 #include <linux/efi.h> 39 #include <linux/ioport.h> 40 #include <linux/list.h> 41 #include <linux/jiffies.h> 42 #include <linux/semaphore.h> 43 44 #include <asm/io.h> 45 #include <linux/uaccess.h> 46 #include <linux/io-64-nonatomic-lo-hi.h> 47 48 #include "internal.h" 49 50 #define _COMPONENT ACPI_OS_SERVICES 51 ACPI_MODULE_NAME("osl"); 52 53 struct acpi_os_dpc { 54 acpi_osd_exec_callback function; 55 void *context; 56 struct work_struct work; 57 }; 58 59 #ifdef ENABLE_DEBUGGER 60 #include <linux/kdb.h> 61 62 /* stuff for debugger support */ 63 int acpi_in_debugger; 64 EXPORT_SYMBOL(acpi_in_debugger); 65 #endif /*ENABLE_DEBUGGER */ 66 67 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 68 u32 pm1b_ctrl); 69 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 70 u32 val_b); 71 72 static acpi_osd_handler acpi_irq_handler; 73 static void *acpi_irq_context; 74 static struct workqueue_struct *kacpid_wq; 75 static struct workqueue_struct *kacpi_notify_wq; 76 static struct workqueue_struct *kacpi_hotplug_wq; 77 static bool acpi_os_initialized; 78 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 79 bool acpi_permanent_mmap = false; 80 81 /* 82 * This list of permanent mappings is for memory that may be accessed from 83 * interrupt context, where we can't do the ioremap(). 84 */ 85 struct acpi_ioremap { 86 struct list_head list; 87 void __iomem *virt; 88 acpi_physical_address phys; 89 acpi_size size; 90 unsigned long refcount; 91 }; 92 93 static LIST_HEAD(acpi_ioremaps); 94 static DEFINE_MUTEX(acpi_ioremap_lock); 95 96 static void __init acpi_request_region (struct acpi_generic_address *gas, 97 unsigned int length, char *desc) 98 { 99 u64 addr; 100 101 /* Handle possible alignment issues */ 102 memcpy(&addr, &gas->address, sizeof(addr)); 103 if (!addr || !length) 104 return; 105 106 /* Resources are never freed */ 107 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 108 request_region(addr, length, desc); 109 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 110 request_mem_region(addr, length, desc); 111 } 112 113 static int __init acpi_reserve_resources(void) 114 { 115 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 116 "ACPI PM1a_EVT_BLK"); 117 118 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 119 "ACPI PM1b_EVT_BLK"); 120 121 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 122 "ACPI PM1a_CNT_BLK"); 123 124 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 125 "ACPI PM1b_CNT_BLK"); 126 127 if (acpi_gbl_FADT.pm_timer_length == 4) 128 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 129 130 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 131 "ACPI PM2_CNT_BLK"); 132 133 /* Length of GPE blocks must be a non-negative multiple of 2 */ 134 135 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 136 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 137 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 138 139 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 140 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 141 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 142 143 return 0; 144 } 145 fs_initcall_sync(acpi_reserve_resources); 146 147 void acpi_os_printf(const char *fmt, ...) 148 { 149 va_list args; 150 va_start(args, fmt); 151 acpi_os_vprintf(fmt, args); 152 va_end(args); 153 } 154 EXPORT_SYMBOL(acpi_os_printf); 155 156 void acpi_os_vprintf(const char *fmt, va_list args) 157 { 158 static char buffer[512]; 159 160 vsprintf(buffer, fmt, args); 161 162 #ifdef ENABLE_DEBUGGER 163 if (acpi_in_debugger) { 164 kdb_printf("%s", buffer); 165 } else { 166 if (printk_get_level(buffer)) 167 printk("%s", buffer); 168 else 169 printk(KERN_CONT "%s", buffer); 170 } 171 #else 172 if (acpi_debugger_write_log(buffer) < 0) { 173 if (printk_get_level(buffer)) 174 printk("%s", buffer); 175 else 176 printk(KERN_CONT "%s", buffer); 177 } 178 #endif 179 } 180 181 #ifdef CONFIG_KEXEC 182 static unsigned long acpi_rsdp; 183 static int __init setup_acpi_rsdp(char *arg) 184 { 185 return kstrtoul(arg, 16, &acpi_rsdp); 186 } 187 early_param("acpi_rsdp", setup_acpi_rsdp); 188 #endif 189 190 acpi_physical_address __init acpi_os_get_root_pointer(void) 191 { 192 acpi_physical_address pa = 0; 193 194 #ifdef CONFIG_KEXEC 195 if (acpi_rsdp) 196 return acpi_rsdp; 197 #endif 198 199 if (efi_enabled(EFI_CONFIG_TABLES)) { 200 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 201 return efi.acpi20; 202 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 203 return efi.acpi; 204 pr_err(PREFIX "System description tables not found\n"); 205 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 206 acpi_find_root_pointer(&pa); 207 } 208 209 return pa; 210 } 211 212 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 213 static struct acpi_ioremap * 214 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 215 { 216 struct acpi_ioremap *map; 217 218 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 219 if (map->phys <= phys && 220 phys + size <= map->phys + map->size) 221 return map; 222 223 return NULL; 224 } 225 226 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 227 static void __iomem * 228 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 229 { 230 struct acpi_ioremap *map; 231 232 map = acpi_map_lookup(phys, size); 233 if (map) 234 return map->virt + (phys - map->phys); 235 236 return NULL; 237 } 238 239 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 240 { 241 struct acpi_ioremap *map; 242 void __iomem *virt = NULL; 243 244 mutex_lock(&acpi_ioremap_lock); 245 map = acpi_map_lookup(phys, size); 246 if (map) { 247 virt = map->virt + (phys - map->phys); 248 map->refcount++; 249 } 250 mutex_unlock(&acpi_ioremap_lock); 251 return virt; 252 } 253 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 254 255 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 256 static struct acpi_ioremap * 257 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 258 { 259 struct acpi_ioremap *map; 260 261 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 262 if (map->virt <= virt && 263 virt + size <= map->virt + map->size) 264 return map; 265 266 return NULL; 267 } 268 269 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 270 /* ioremap will take care of cache attributes */ 271 #define should_use_kmap(pfn) 0 272 #else 273 #define should_use_kmap(pfn) page_is_ram(pfn) 274 #endif 275 276 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 277 { 278 unsigned long pfn; 279 280 pfn = pg_off >> PAGE_SHIFT; 281 if (should_use_kmap(pfn)) { 282 if (pg_sz > PAGE_SIZE) 283 return NULL; 284 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 285 } else 286 return acpi_os_ioremap(pg_off, pg_sz); 287 } 288 289 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 290 { 291 unsigned long pfn; 292 293 pfn = pg_off >> PAGE_SHIFT; 294 if (should_use_kmap(pfn)) 295 kunmap(pfn_to_page(pfn)); 296 else 297 iounmap(vaddr); 298 } 299 300 /** 301 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 302 * @phys: Start of the physical address range to map. 303 * @size: Size of the physical address range to map. 304 * 305 * Look up the given physical address range in the list of existing ACPI memory 306 * mappings. If found, get a reference to it and return a pointer to it (its 307 * virtual address). If not found, map it, add it to that list and return a 308 * pointer to it. 309 * 310 * During early init (when acpi_permanent_mmap has not been set yet) this 311 * routine simply calls __acpi_map_table() to get the job done. 312 */ 313 void __iomem *__ref 314 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 315 { 316 struct acpi_ioremap *map; 317 void __iomem *virt; 318 acpi_physical_address pg_off; 319 acpi_size pg_sz; 320 321 if (phys > ULONG_MAX) { 322 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 323 return NULL; 324 } 325 326 if (!acpi_permanent_mmap) 327 return __acpi_map_table((unsigned long)phys, size); 328 329 mutex_lock(&acpi_ioremap_lock); 330 /* Check if there's a suitable mapping already. */ 331 map = acpi_map_lookup(phys, size); 332 if (map) { 333 map->refcount++; 334 goto out; 335 } 336 337 map = kzalloc(sizeof(*map), GFP_KERNEL); 338 if (!map) { 339 mutex_unlock(&acpi_ioremap_lock); 340 return NULL; 341 } 342 343 pg_off = round_down(phys, PAGE_SIZE); 344 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 345 virt = acpi_map(pg_off, pg_sz); 346 if (!virt) { 347 mutex_unlock(&acpi_ioremap_lock); 348 kfree(map); 349 return NULL; 350 } 351 352 INIT_LIST_HEAD(&map->list); 353 map->virt = virt; 354 map->phys = pg_off; 355 map->size = pg_sz; 356 map->refcount = 1; 357 358 list_add_tail_rcu(&map->list, &acpi_ioremaps); 359 360 out: 361 mutex_unlock(&acpi_ioremap_lock); 362 return map->virt + (phys - map->phys); 363 } 364 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 365 366 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 367 { 368 return (void *)acpi_os_map_iomem(phys, size); 369 } 370 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 371 372 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 373 { 374 if (!--map->refcount) 375 list_del_rcu(&map->list); 376 } 377 378 static void acpi_os_map_cleanup(struct acpi_ioremap *map) 379 { 380 if (!map->refcount) { 381 synchronize_rcu_expedited(); 382 acpi_unmap(map->phys, map->virt); 383 kfree(map); 384 } 385 } 386 387 /** 388 * acpi_os_unmap_iomem - Drop a memory mapping reference. 389 * @virt: Start of the address range to drop a reference to. 390 * @size: Size of the address range to drop a reference to. 391 * 392 * Look up the given virtual address range in the list of existing ACPI memory 393 * mappings, drop a reference to it and unmap it if there are no more active 394 * references to it. 395 * 396 * During early init (when acpi_permanent_mmap has not been set yet) this 397 * routine simply calls __acpi_unmap_table() to get the job done. Since 398 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 399 * here. 400 */ 401 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 402 { 403 struct acpi_ioremap *map; 404 405 if (!acpi_permanent_mmap) { 406 __acpi_unmap_table(virt, size); 407 return; 408 } 409 410 mutex_lock(&acpi_ioremap_lock); 411 map = acpi_map_lookup_virt(virt, size); 412 if (!map) { 413 mutex_unlock(&acpi_ioremap_lock); 414 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 415 return; 416 } 417 acpi_os_drop_map_ref(map); 418 mutex_unlock(&acpi_ioremap_lock); 419 420 acpi_os_map_cleanup(map); 421 } 422 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 423 424 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 425 { 426 return acpi_os_unmap_iomem((void __iomem *)virt, size); 427 } 428 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 429 430 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 431 { 432 u64 addr; 433 void __iomem *virt; 434 435 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 436 return 0; 437 438 /* Handle possible alignment issues */ 439 memcpy(&addr, &gas->address, sizeof(addr)); 440 if (!addr || !gas->bit_width) 441 return -EINVAL; 442 443 virt = acpi_os_map_iomem(addr, gas->bit_width / 8); 444 if (!virt) 445 return -EIO; 446 447 return 0; 448 } 449 EXPORT_SYMBOL(acpi_os_map_generic_address); 450 451 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 452 { 453 u64 addr; 454 struct acpi_ioremap *map; 455 456 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 457 return; 458 459 /* Handle possible alignment issues */ 460 memcpy(&addr, &gas->address, sizeof(addr)); 461 if (!addr || !gas->bit_width) 462 return; 463 464 mutex_lock(&acpi_ioremap_lock); 465 map = acpi_map_lookup(addr, gas->bit_width / 8); 466 if (!map) { 467 mutex_unlock(&acpi_ioremap_lock); 468 return; 469 } 470 acpi_os_drop_map_ref(map); 471 mutex_unlock(&acpi_ioremap_lock); 472 473 acpi_os_map_cleanup(map); 474 } 475 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 476 477 #ifdef ACPI_FUTURE_USAGE 478 acpi_status 479 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 480 { 481 if (!phys || !virt) 482 return AE_BAD_PARAMETER; 483 484 *phys = virt_to_phys(virt); 485 486 return AE_OK; 487 } 488 #endif 489 490 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 491 static bool acpi_rev_override; 492 493 int __init acpi_rev_override_setup(char *str) 494 { 495 acpi_rev_override = true; 496 return 1; 497 } 498 __setup("acpi_rev_override", acpi_rev_override_setup); 499 #else 500 #define acpi_rev_override false 501 #endif 502 503 #define ACPI_MAX_OVERRIDE_LEN 100 504 505 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 506 507 acpi_status 508 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 509 acpi_string *new_val) 510 { 511 if (!init_val || !new_val) 512 return AE_BAD_PARAMETER; 513 514 *new_val = NULL; 515 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 516 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 517 acpi_os_name); 518 *new_val = acpi_os_name; 519 } 520 521 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 522 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); 523 *new_val = (char *)5; 524 } 525 526 return AE_OK; 527 } 528 529 static irqreturn_t acpi_irq(int irq, void *dev_id) 530 { 531 u32 handled; 532 533 handled = (*acpi_irq_handler) (acpi_irq_context); 534 535 if (handled) { 536 acpi_irq_handled++; 537 return IRQ_HANDLED; 538 } else { 539 acpi_irq_not_handled++; 540 return IRQ_NONE; 541 } 542 } 543 544 acpi_status 545 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 546 void *context) 547 { 548 unsigned int irq; 549 550 acpi_irq_stats_init(); 551 552 /* 553 * ACPI interrupts different from the SCI in our copy of the FADT are 554 * not supported. 555 */ 556 if (gsi != acpi_gbl_FADT.sci_interrupt) 557 return AE_BAD_PARAMETER; 558 559 if (acpi_irq_handler) 560 return AE_ALREADY_ACQUIRED; 561 562 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 563 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 564 gsi); 565 return AE_OK; 566 } 567 568 acpi_irq_handler = handler; 569 acpi_irq_context = context; 570 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 571 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 572 acpi_irq_handler = NULL; 573 return AE_NOT_ACQUIRED; 574 } 575 acpi_sci_irq = irq; 576 577 return AE_OK; 578 } 579 580 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 581 { 582 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 583 return AE_BAD_PARAMETER; 584 585 free_irq(acpi_sci_irq, acpi_irq); 586 acpi_irq_handler = NULL; 587 acpi_sci_irq = INVALID_ACPI_IRQ; 588 589 return AE_OK; 590 } 591 592 /* 593 * Running in interpreter thread context, safe to sleep 594 */ 595 596 void acpi_os_sleep(u64 ms) 597 { 598 msleep(ms); 599 } 600 601 void acpi_os_stall(u32 us) 602 { 603 while (us) { 604 u32 delay = 1000; 605 606 if (delay > us) 607 delay = us; 608 udelay(delay); 609 touch_nmi_watchdog(); 610 us -= delay; 611 } 612 } 613 614 /* 615 * Support ACPI 3.0 AML Timer operand 616 * Returns 64-bit free-running, monotonically increasing timer 617 * with 100ns granularity 618 */ 619 u64 acpi_os_get_timer(void) 620 { 621 u64 time_ns = ktime_to_ns(ktime_get()); 622 do_div(time_ns, 100); 623 return time_ns; 624 } 625 626 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 627 { 628 u32 dummy; 629 630 if (!value) 631 value = &dummy; 632 633 *value = 0; 634 if (width <= 8) { 635 *(u8 *) value = inb(port); 636 } else if (width <= 16) { 637 *(u16 *) value = inw(port); 638 } else if (width <= 32) { 639 *(u32 *) value = inl(port); 640 } else { 641 BUG(); 642 } 643 644 return AE_OK; 645 } 646 647 EXPORT_SYMBOL(acpi_os_read_port); 648 649 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 650 { 651 if (width <= 8) { 652 outb(value, port); 653 } else if (width <= 16) { 654 outw(value, port); 655 } else if (width <= 32) { 656 outl(value, port); 657 } else { 658 BUG(); 659 } 660 661 return AE_OK; 662 } 663 664 EXPORT_SYMBOL(acpi_os_write_port); 665 666 acpi_status 667 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 668 { 669 void __iomem *virt_addr; 670 unsigned int size = width / 8; 671 bool unmap = false; 672 u64 dummy; 673 674 rcu_read_lock(); 675 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 676 if (!virt_addr) { 677 rcu_read_unlock(); 678 virt_addr = acpi_os_ioremap(phys_addr, size); 679 if (!virt_addr) 680 return AE_BAD_ADDRESS; 681 unmap = true; 682 } 683 684 if (!value) 685 value = &dummy; 686 687 switch (width) { 688 case 8: 689 *(u8 *) value = readb(virt_addr); 690 break; 691 case 16: 692 *(u16 *) value = readw(virt_addr); 693 break; 694 case 32: 695 *(u32 *) value = readl(virt_addr); 696 break; 697 case 64: 698 *(u64 *) value = readq(virt_addr); 699 break; 700 default: 701 BUG(); 702 } 703 704 if (unmap) 705 iounmap(virt_addr); 706 else 707 rcu_read_unlock(); 708 709 return AE_OK; 710 } 711 712 acpi_status 713 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 714 { 715 void __iomem *virt_addr; 716 unsigned int size = width / 8; 717 bool unmap = false; 718 719 rcu_read_lock(); 720 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 721 if (!virt_addr) { 722 rcu_read_unlock(); 723 virt_addr = acpi_os_ioremap(phys_addr, size); 724 if (!virt_addr) 725 return AE_BAD_ADDRESS; 726 unmap = true; 727 } 728 729 switch (width) { 730 case 8: 731 writeb(value, virt_addr); 732 break; 733 case 16: 734 writew(value, virt_addr); 735 break; 736 case 32: 737 writel(value, virt_addr); 738 break; 739 case 64: 740 writeq(value, virt_addr); 741 break; 742 default: 743 BUG(); 744 } 745 746 if (unmap) 747 iounmap(virt_addr); 748 else 749 rcu_read_unlock(); 750 751 return AE_OK; 752 } 753 754 acpi_status 755 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 756 u64 *value, u32 width) 757 { 758 int result, size; 759 u32 value32; 760 761 if (!value) 762 return AE_BAD_PARAMETER; 763 764 switch (width) { 765 case 8: 766 size = 1; 767 break; 768 case 16: 769 size = 2; 770 break; 771 case 32: 772 size = 4; 773 break; 774 default: 775 return AE_ERROR; 776 } 777 778 result = raw_pci_read(pci_id->segment, pci_id->bus, 779 PCI_DEVFN(pci_id->device, pci_id->function), 780 reg, size, &value32); 781 *value = value32; 782 783 return (result ? AE_ERROR : AE_OK); 784 } 785 786 acpi_status 787 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 788 u64 value, u32 width) 789 { 790 int result, size; 791 792 switch (width) { 793 case 8: 794 size = 1; 795 break; 796 case 16: 797 size = 2; 798 break; 799 case 32: 800 size = 4; 801 break; 802 default: 803 return AE_ERROR; 804 } 805 806 result = raw_pci_write(pci_id->segment, pci_id->bus, 807 PCI_DEVFN(pci_id->device, pci_id->function), 808 reg, size, value); 809 810 return (result ? AE_ERROR : AE_OK); 811 } 812 813 static void acpi_os_execute_deferred(struct work_struct *work) 814 { 815 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 816 817 dpc->function(dpc->context); 818 kfree(dpc); 819 } 820 821 #ifdef CONFIG_ACPI_DEBUGGER 822 static struct acpi_debugger acpi_debugger; 823 static bool acpi_debugger_initialized; 824 825 int acpi_register_debugger(struct module *owner, 826 const struct acpi_debugger_ops *ops) 827 { 828 int ret = 0; 829 830 mutex_lock(&acpi_debugger.lock); 831 if (acpi_debugger.ops) { 832 ret = -EBUSY; 833 goto err_lock; 834 } 835 836 acpi_debugger.owner = owner; 837 acpi_debugger.ops = ops; 838 839 err_lock: 840 mutex_unlock(&acpi_debugger.lock); 841 return ret; 842 } 843 EXPORT_SYMBOL(acpi_register_debugger); 844 845 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 846 { 847 mutex_lock(&acpi_debugger.lock); 848 if (ops == acpi_debugger.ops) { 849 acpi_debugger.ops = NULL; 850 acpi_debugger.owner = NULL; 851 } 852 mutex_unlock(&acpi_debugger.lock); 853 } 854 EXPORT_SYMBOL(acpi_unregister_debugger); 855 856 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 857 { 858 int ret; 859 int (*func)(acpi_osd_exec_callback, void *); 860 struct module *owner; 861 862 if (!acpi_debugger_initialized) 863 return -ENODEV; 864 mutex_lock(&acpi_debugger.lock); 865 if (!acpi_debugger.ops) { 866 ret = -ENODEV; 867 goto err_lock; 868 } 869 if (!try_module_get(acpi_debugger.owner)) { 870 ret = -ENODEV; 871 goto err_lock; 872 } 873 func = acpi_debugger.ops->create_thread; 874 owner = acpi_debugger.owner; 875 mutex_unlock(&acpi_debugger.lock); 876 877 ret = func(function, context); 878 879 mutex_lock(&acpi_debugger.lock); 880 module_put(owner); 881 err_lock: 882 mutex_unlock(&acpi_debugger.lock); 883 return ret; 884 } 885 886 ssize_t acpi_debugger_write_log(const char *msg) 887 { 888 ssize_t ret; 889 ssize_t (*func)(const char *); 890 struct module *owner; 891 892 if (!acpi_debugger_initialized) 893 return -ENODEV; 894 mutex_lock(&acpi_debugger.lock); 895 if (!acpi_debugger.ops) { 896 ret = -ENODEV; 897 goto err_lock; 898 } 899 if (!try_module_get(acpi_debugger.owner)) { 900 ret = -ENODEV; 901 goto err_lock; 902 } 903 func = acpi_debugger.ops->write_log; 904 owner = acpi_debugger.owner; 905 mutex_unlock(&acpi_debugger.lock); 906 907 ret = func(msg); 908 909 mutex_lock(&acpi_debugger.lock); 910 module_put(owner); 911 err_lock: 912 mutex_unlock(&acpi_debugger.lock); 913 return ret; 914 } 915 916 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 917 { 918 ssize_t ret; 919 ssize_t (*func)(char *, size_t); 920 struct module *owner; 921 922 if (!acpi_debugger_initialized) 923 return -ENODEV; 924 mutex_lock(&acpi_debugger.lock); 925 if (!acpi_debugger.ops) { 926 ret = -ENODEV; 927 goto err_lock; 928 } 929 if (!try_module_get(acpi_debugger.owner)) { 930 ret = -ENODEV; 931 goto err_lock; 932 } 933 func = acpi_debugger.ops->read_cmd; 934 owner = acpi_debugger.owner; 935 mutex_unlock(&acpi_debugger.lock); 936 937 ret = func(buffer, buffer_length); 938 939 mutex_lock(&acpi_debugger.lock); 940 module_put(owner); 941 err_lock: 942 mutex_unlock(&acpi_debugger.lock); 943 return ret; 944 } 945 946 int acpi_debugger_wait_command_ready(void) 947 { 948 int ret; 949 int (*func)(bool, char *, size_t); 950 struct module *owner; 951 952 if (!acpi_debugger_initialized) 953 return -ENODEV; 954 mutex_lock(&acpi_debugger.lock); 955 if (!acpi_debugger.ops) { 956 ret = -ENODEV; 957 goto err_lock; 958 } 959 if (!try_module_get(acpi_debugger.owner)) { 960 ret = -ENODEV; 961 goto err_lock; 962 } 963 func = acpi_debugger.ops->wait_command_ready; 964 owner = acpi_debugger.owner; 965 mutex_unlock(&acpi_debugger.lock); 966 967 ret = func(acpi_gbl_method_executing, 968 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 969 970 mutex_lock(&acpi_debugger.lock); 971 module_put(owner); 972 err_lock: 973 mutex_unlock(&acpi_debugger.lock); 974 return ret; 975 } 976 977 int acpi_debugger_notify_command_complete(void) 978 { 979 int ret; 980 int (*func)(void); 981 struct module *owner; 982 983 if (!acpi_debugger_initialized) 984 return -ENODEV; 985 mutex_lock(&acpi_debugger.lock); 986 if (!acpi_debugger.ops) { 987 ret = -ENODEV; 988 goto err_lock; 989 } 990 if (!try_module_get(acpi_debugger.owner)) { 991 ret = -ENODEV; 992 goto err_lock; 993 } 994 func = acpi_debugger.ops->notify_command_complete; 995 owner = acpi_debugger.owner; 996 mutex_unlock(&acpi_debugger.lock); 997 998 ret = func(); 999 1000 mutex_lock(&acpi_debugger.lock); 1001 module_put(owner); 1002 err_lock: 1003 mutex_unlock(&acpi_debugger.lock); 1004 return ret; 1005 } 1006 1007 int __init acpi_debugger_init(void) 1008 { 1009 mutex_init(&acpi_debugger.lock); 1010 acpi_debugger_initialized = true; 1011 return 0; 1012 } 1013 #endif 1014 1015 /******************************************************************************* 1016 * 1017 * FUNCTION: acpi_os_execute 1018 * 1019 * PARAMETERS: Type - Type of the callback 1020 * Function - Function to be executed 1021 * Context - Function parameters 1022 * 1023 * RETURN: Status 1024 * 1025 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1026 * immediately executes function on a separate thread. 1027 * 1028 ******************************************************************************/ 1029 1030 acpi_status acpi_os_execute(acpi_execute_type type, 1031 acpi_osd_exec_callback function, void *context) 1032 { 1033 acpi_status status = AE_OK; 1034 struct acpi_os_dpc *dpc; 1035 struct workqueue_struct *queue; 1036 int ret; 1037 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1038 "Scheduling function [%p(%p)] for deferred execution.\n", 1039 function, context)); 1040 1041 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1042 ret = acpi_debugger_create_thread(function, context); 1043 if (ret) { 1044 pr_err("Call to kthread_create() failed.\n"); 1045 status = AE_ERROR; 1046 } 1047 goto out_thread; 1048 } 1049 1050 /* 1051 * Allocate/initialize DPC structure. Note that this memory will be 1052 * freed by the callee. The kernel handles the work_struct list in a 1053 * way that allows us to also free its memory inside the callee. 1054 * Because we may want to schedule several tasks with different 1055 * parameters we can't use the approach some kernel code uses of 1056 * having a static work_struct. 1057 */ 1058 1059 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1060 if (!dpc) 1061 return AE_NO_MEMORY; 1062 1063 dpc->function = function; 1064 dpc->context = context; 1065 1066 /* 1067 * To prevent lockdep from complaining unnecessarily, make sure that 1068 * there is a different static lockdep key for each workqueue by using 1069 * INIT_WORK() for each of them separately. 1070 */ 1071 if (type == OSL_NOTIFY_HANDLER) { 1072 queue = kacpi_notify_wq; 1073 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1074 } else if (type == OSL_GPE_HANDLER) { 1075 queue = kacpid_wq; 1076 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1077 } else { 1078 pr_err("Unsupported os_execute type %d.\n", type); 1079 status = AE_ERROR; 1080 } 1081 1082 if (ACPI_FAILURE(status)) 1083 goto err_workqueue; 1084 1085 /* 1086 * On some machines, a software-initiated SMI causes corruption unless 1087 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1088 * typically it's done in GPE-related methods that are run via 1089 * workqueues, so we can avoid the known corruption cases by always 1090 * queueing on CPU 0. 1091 */ 1092 ret = queue_work_on(0, queue, &dpc->work); 1093 if (!ret) { 1094 printk(KERN_ERR PREFIX 1095 "Call to queue_work() failed.\n"); 1096 status = AE_ERROR; 1097 } 1098 err_workqueue: 1099 if (ACPI_FAILURE(status)) 1100 kfree(dpc); 1101 out_thread: 1102 return status; 1103 } 1104 EXPORT_SYMBOL(acpi_os_execute); 1105 1106 void acpi_os_wait_events_complete(void) 1107 { 1108 /* 1109 * Make sure the GPE handler or the fixed event handler is not used 1110 * on another CPU after removal. 1111 */ 1112 if (acpi_sci_irq_valid()) 1113 synchronize_hardirq(acpi_sci_irq); 1114 flush_workqueue(kacpid_wq); 1115 flush_workqueue(kacpi_notify_wq); 1116 } 1117 1118 struct acpi_hp_work { 1119 struct work_struct work; 1120 struct acpi_device *adev; 1121 u32 src; 1122 }; 1123 1124 static void acpi_hotplug_work_fn(struct work_struct *work) 1125 { 1126 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1127 1128 acpi_os_wait_events_complete(); 1129 acpi_device_hotplug(hpw->adev, hpw->src); 1130 kfree(hpw); 1131 } 1132 1133 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1134 { 1135 struct acpi_hp_work *hpw; 1136 1137 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1138 "Scheduling hotplug event (%p, %u) for deferred execution.\n", 1139 adev, src)); 1140 1141 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1142 if (!hpw) 1143 return AE_NO_MEMORY; 1144 1145 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1146 hpw->adev = adev; 1147 hpw->src = src; 1148 /* 1149 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1150 * the hotplug code may call driver .remove() functions, which may 1151 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1152 * these workqueues. 1153 */ 1154 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1155 kfree(hpw); 1156 return AE_ERROR; 1157 } 1158 return AE_OK; 1159 } 1160 1161 bool acpi_queue_hotplug_work(struct work_struct *work) 1162 { 1163 return queue_work(kacpi_hotplug_wq, work); 1164 } 1165 1166 acpi_status 1167 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1168 { 1169 struct semaphore *sem = NULL; 1170 1171 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1172 if (!sem) 1173 return AE_NO_MEMORY; 1174 1175 sema_init(sem, initial_units); 1176 1177 *handle = (acpi_handle *) sem; 1178 1179 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1180 *handle, initial_units)); 1181 1182 return AE_OK; 1183 } 1184 1185 /* 1186 * TODO: A better way to delete semaphores? Linux doesn't have a 1187 * 'delete_semaphore()' function -- may result in an invalid 1188 * pointer dereference for non-synchronized consumers. Should 1189 * we at least check for blocked threads and signal/cancel them? 1190 */ 1191 1192 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1193 { 1194 struct semaphore *sem = (struct semaphore *)handle; 1195 1196 if (!sem) 1197 return AE_BAD_PARAMETER; 1198 1199 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1200 1201 BUG_ON(!list_empty(&sem->wait_list)); 1202 kfree(sem); 1203 sem = NULL; 1204 1205 return AE_OK; 1206 } 1207 1208 /* 1209 * TODO: Support for units > 1? 1210 */ 1211 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1212 { 1213 acpi_status status = AE_OK; 1214 struct semaphore *sem = (struct semaphore *)handle; 1215 long jiffies; 1216 int ret = 0; 1217 1218 if (!acpi_os_initialized) 1219 return AE_OK; 1220 1221 if (!sem || (units < 1)) 1222 return AE_BAD_PARAMETER; 1223 1224 if (units > 1) 1225 return AE_SUPPORT; 1226 1227 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1228 handle, units, timeout)); 1229 1230 if (timeout == ACPI_WAIT_FOREVER) 1231 jiffies = MAX_SCHEDULE_TIMEOUT; 1232 else 1233 jiffies = msecs_to_jiffies(timeout); 1234 1235 ret = down_timeout(sem, jiffies); 1236 if (ret) 1237 status = AE_TIME; 1238 1239 if (ACPI_FAILURE(status)) { 1240 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1241 "Failed to acquire semaphore[%p|%d|%d], %s", 1242 handle, units, timeout, 1243 acpi_format_exception(status))); 1244 } else { 1245 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1246 "Acquired semaphore[%p|%d|%d]", handle, 1247 units, timeout)); 1248 } 1249 1250 return status; 1251 } 1252 1253 /* 1254 * TODO: Support for units > 1? 1255 */ 1256 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1257 { 1258 struct semaphore *sem = (struct semaphore *)handle; 1259 1260 if (!acpi_os_initialized) 1261 return AE_OK; 1262 1263 if (!sem || (units < 1)) 1264 return AE_BAD_PARAMETER; 1265 1266 if (units > 1) 1267 return AE_SUPPORT; 1268 1269 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1270 units)); 1271 1272 up(sem); 1273 1274 return AE_OK; 1275 } 1276 1277 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1278 { 1279 #ifdef ENABLE_DEBUGGER 1280 if (acpi_in_debugger) { 1281 u32 chars; 1282 1283 kdb_read(buffer, buffer_length); 1284 1285 /* remove the CR kdb includes */ 1286 chars = strlen(buffer) - 1; 1287 buffer[chars] = '\0'; 1288 } 1289 #else 1290 int ret; 1291 1292 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1293 if (ret < 0) 1294 return AE_ERROR; 1295 if (bytes_read) 1296 *bytes_read = ret; 1297 #endif 1298 1299 return AE_OK; 1300 } 1301 EXPORT_SYMBOL(acpi_os_get_line); 1302 1303 acpi_status acpi_os_wait_command_ready(void) 1304 { 1305 int ret; 1306 1307 ret = acpi_debugger_wait_command_ready(); 1308 if (ret < 0) 1309 return AE_ERROR; 1310 return AE_OK; 1311 } 1312 1313 acpi_status acpi_os_notify_command_complete(void) 1314 { 1315 int ret; 1316 1317 ret = acpi_debugger_notify_command_complete(); 1318 if (ret < 0) 1319 return AE_ERROR; 1320 return AE_OK; 1321 } 1322 1323 acpi_status acpi_os_signal(u32 function, void *info) 1324 { 1325 switch (function) { 1326 case ACPI_SIGNAL_FATAL: 1327 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1328 break; 1329 case ACPI_SIGNAL_BREAKPOINT: 1330 /* 1331 * AML Breakpoint 1332 * ACPI spec. says to treat it as a NOP unless 1333 * you are debugging. So if/when we integrate 1334 * AML debugger into the kernel debugger its 1335 * hook will go here. But until then it is 1336 * not useful to print anything on breakpoints. 1337 */ 1338 break; 1339 default: 1340 break; 1341 } 1342 1343 return AE_OK; 1344 } 1345 1346 static int __init acpi_os_name_setup(char *str) 1347 { 1348 char *p = acpi_os_name; 1349 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1350 1351 if (!str || !*str) 1352 return 0; 1353 1354 for (; count-- && *str; str++) { 1355 if (isalnum(*str) || *str == ' ' || *str == ':') 1356 *p++ = *str; 1357 else if (*str == '\'' || *str == '"') 1358 continue; 1359 else 1360 break; 1361 } 1362 *p = 0; 1363 1364 return 1; 1365 1366 } 1367 1368 __setup("acpi_os_name=", acpi_os_name_setup); 1369 1370 /* 1371 * Disable the auto-serialization of named objects creation methods. 1372 * 1373 * This feature is enabled by default. It marks the AML control methods 1374 * that contain the opcodes to create named objects as "Serialized". 1375 */ 1376 static int __init acpi_no_auto_serialize_setup(char *str) 1377 { 1378 acpi_gbl_auto_serialize_methods = FALSE; 1379 pr_info("ACPI: auto-serialization disabled\n"); 1380 1381 return 1; 1382 } 1383 1384 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1385 1386 /* Check of resource interference between native drivers and ACPI 1387 * OperationRegions (SystemIO and System Memory only). 1388 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1389 * in arbitrary AML code and can interfere with legacy drivers. 1390 * acpi_enforce_resources= can be set to: 1391 * 1392 * - strict (default) (2) 1393 * -> further driver trying to access the resources will not load 1394 * - lax (1) 1395 * -> further driver trying to access the resources will load, but you 1396 * get a system message that something might go wrong... 1397 * 1398 * - no (0) 1399 * -> ACPI Operation Region resources will not be registered 1400 * 1401 */ 1402 #define ENFORCE_RESOURCES_STRICT 2 1403 #define ENFORCE_RESOURCES_LAX 1 1404 #define ENFORCE_RESOURCES_NO 0 1405 1406 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1407 1408 static int __init acpi_enforce_resources_setup(char *str) 1409 { 1410 if (str == NULL || *str == '\0') 1411 return 0; 1412 1413 if (!strcmp("strict", str)) 1414 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1415 else if (!strcmp("lax", str)) 1416 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1417 else if (!strcmp("no", str)) 1418 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1419 1420 return 1; 1421 } 1422 1423 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1424 1425 /* Check for resource conflicts between ACPI OperationRegions and native 1426 * drivers */ 1427 int acpi_check_resource_conflict(const struct resource *res) 1428 { 1429 acpi_adr_space_type space_id; 1430 acpi_size length; 1431 u8 warn = 0; 1432 int clash = 0; 1433 1434 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1435 return 0; 1436 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1437 return 0; 1438 1439 if (res->flags & IORESOURCE_IO) 1440 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1441 else 1442 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1443 1444 length = resource_size(res); 1445 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1446 warn = 1; 1447 clash = acpi_check_address_range(space_id, res->start, length, warn); 1448 1449 if (clash) { 1450 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1451 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1452 printk(KERN_NOTICE "ACPI: This conflict may" 1453 " cause random problems and system" 1454 " instability\n"); 1455 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1456 " for this device, you should use it instead of" 1457 " the native driver\n"); 1458 } 1459 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1460 return -EBUSY; 1461 } 1462 return 0; 1463 } 1464 EXPORT_SYMBOL(acpi_check_resource_conflict); 1465 1466 int acpi_check_region(resource_size_t start, resource_size_t n, 1467 const char *name) 1468 { 1469 struct resource res = { 1470 .start = start, 1471 .end = start + n - 1, 1472 .name = name, 1473 .flags = IORESOURCE_IO, 1474 }; 1475 1476 return acpi_check_resource_conflict(&res); 1477 } 1478 EXPORT_SYMBOL(acpi_check_region); 1479 1480 /* 1481 * Let drivers know whether the resource checks are effective 1482 */ 1483 int acpi_resources_are_enforced(void) 1484 { 1485 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1486 } 1487 EXPORT_SYMBOL(acpi_resources_are_enforced); 1488 1489 /* 1490 * Deallocate the memory for a spinlock. 1491 */ 1492 void acpi_os_delete_lock(acpi_spinlock handle) 1493 { 1494 ACPI_FREE(handle); 1495 } 1496 1497 /* 1498 * Acquire a spinlock. 1499 * 1500 * handle is a pointer to the spinlock_t. 1501 */ 1502 1503 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1504 { 1505 acpi_cpu_flags flags; 1506 spin_lock_irqsave(lockp, flags); 1507 return flags; 1508 } 1509 1510 /* 1511 * Release a spinlock. See above. 1512 */ 1513 1514 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1515 { 1516 spin_unlock_irqrestore(lockp, flags); 1517 } 1518 1519 #ifndef ACPI_USE_LOCAL_CACHE 1520 1521 /******************************************************************************* 1522 * 1523 * FUNCTION: acpi_os_create_cache 1524 * 1525 * PARAMETERS: name - Ascii name for the cache 1526 * size - Size of each cached object 1527 * depth - Maximum depth of the cache (in objects) <ignored> 1528 * cache - Where the new cache object is returned 1529 * 1530 * RETURN: status 1531 * 1532 * DESCRIPTION: Create a cache object 1533 * 1534 ******************************************************************************/ 1535 1536 acpi_status 1537 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1538 { 1539 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1540 if (*cache == NULL) 1541 return AE_ERROR; 1542 else 1543 return AE_OK; 1544 } 1545 1546 /******************************************************************************* 1547 * 1548 * FUNCTION: acpi_os_purge_cache 1549 * 1550 * PARAMETERS: Cache - Handle to cache object 1551 * 1552 * RETURN: Status 1553 * 1554 * DESCRIPTION: Free all objects within the requested cache. 1555 * 1556 ******************************************************************************/ 1557 1558 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1559 { 1560 kmem_cache_shrink(cache); 1561 return (AE_OK); 1562 } 1563 1564 /******************************************************************************* 1565 * 1566 * FUNCTION: acpi_os_delete_cache 1567 * 1568 * PARAMETERS: Cache - Handle to cache object 1569 * 1570 * RETURN: Status 1571 * 1572 * DESCRIPTION: Free all objects within the requested cache and delete the 1573 * cache object. 1574 * 1575 ******************************************************************************/ 1576 1577 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1578 { 1579 kmem_cache_destroy(cache); 1580 return (AE_OK); 1581 } 1582 1583 /******************************************************************************* 1584 * 1585 * FUNCTION: acpi_os_release_object 1586 * 1587 * PARAMETERS: Cache - Handle to cache object 1588 * Object - The object to be released 1589 * 1590 * RETURN: None 1591 * 1592 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1593 * the object is deleted. 1594 * 1595 ******************************************************************************/ 1596 1597 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1598 { 1599 kmem_cache_free(cache, object); 1600 return (AE_OK); 1601 } 1602 #endif 1603 1604 static int __init acpi_no_static_ssdt_setup(char *s) 1605 { 1606 acpi_gbl_disable_ssdt_table_install = TRUE; 1607 pr_info("ACPI: static SSDT installation disabled\n"); 1608 1609 return 0; 1610 } 1611 1612 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1613 1614 static int __init acpi_disable_return_repair(char *s) 1615 { 1616 printk(KERN_NOTICE PREFIX 1617 "ACPI: Predefined validation mechanism disabled\n"); 1618 acpi_gbl_disable_auto_repair = TRUE; 1619 1620 return 1; 1621 } 1622 1623 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1624 1625 acpi_status __init acpi_os_initialize(void) 1626 { 1627 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1628 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1629 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1630 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1631 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1632 /* 1633 * Use acpi_os_map_generic_address to pre-map the reset 1634 * register if it's in system memory. 1635 */ 1636 int rv; 1637 1638 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1639 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); 1640 } 1641 acpi_os_initialized = true; 1642 1643 return AE_OK; 1644 } 1645 1646 acpi_status __init acpi_os_initialize1(void) 1647 { 1648 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1649 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1650 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1651 BUG_ON(!kacpid_wq); 1652 BUG_ON(!kacpi_notify_wq); 1653 BUG_ON(!kacpi_hotplug_wq); 1654 acpi_osi_init(); 1655 return AE_OK; 1656 } 1657 1658 acpi_status acpi_os_terminate(void) 1659 { 1660 if (acpi_irq_handler) { 1661 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1662 acpi_irq_handler); 1663 } 1664 1665 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1666 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1667 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1668 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1669 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1670 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1671 1672 destroy_workqueue(kacpid_wq); 1673 destroy_workqueue(kacpi_notify_wq); 1674 destroy_workqueue(kacpi_hotplug_wq); 1675 1676 return AE_OK; 1677 } 1678 1679 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1680 u32 pm1b_control) 1681 { 1682 int rc = 0; 1683 if (__acpi_os_prepare_sleep) 1684 rc = __acpi_os_prepare_sleep(sleep_state, 1685 pm1a_control, pm1b_control); 1686 if (rc < 0) 1687 return AE_ERROR; 1688 else if (rc > 0) 1689 return AE_CTRL_TERMINATE; 1690 1691 return AE_OK; 1692 } 1693 1694 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1695 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1696 { 1697 __acpi_os_prepare_sleep = func; 1698 } 1699 1700 #if (ACPI_REDUCED_HARDWARE) 1701 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1702 u32 val_b) 1703 { 1704 int rc = 0; 1705 if (__acpi_os_prepare_extended_sleep) 1706 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1707 val_a, val_b); 1708 if (rc < 0) 1709 return AE_ERROR; 1710 else if (rc > 0) 1711 return AE_CTRL_TERMINATE; 1712 1713 return AE_OK; 1714 } 1715 #else 1716 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1717 u32 val_b) 1718 { 1719 return AE_OK; 1720 } 1721 #endif 1722 1723 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1724 u32 val_a, u32 val_b)) 1725 { 1726 __acpi_os_prepare_extended_sleep = func; 1727 } 1728 1729 acpi_status acpi_os_enter_sleep(u8 sleep_state, 1730 u32 reg_a_value, u32 reg_b_value) 1731 { 1732 acpi_status status; 1733 1734 if (acpi_gbl_reduced_hardware) 1735 status = acpi_os_prepare_extended_sleep(sleep_state, 1736 reg_a_value, 1737 reg_b_value); 1738 else 1739 status = acpi_os_prepare_sleep(sleep_state, 1740 reg_a_value, reg_b_value); 1741 return status; 1742 } 1743