1 /* 2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 3 * 4 * Copyright (C) 2000 Andrew Henroid 5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 7 * Copyright (c) 2008 Intel Corporation 8 * Author: Matthew Wilcox <willy@linux.intel.com> 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 */ 25 26 #include <linux/module.h> 27 #include <linux/kernel.h> 28 #include <linux/slab.h> 29 #include <linux/mm.h> 30 #include <linux/highmem.h> 31 #include <linux/pci.h> 32 #include <linux/interrupt.h> 33 #include <linux/kmod.h> 34 #include <linux/delay.h> 35 #include <linux/workqueue.h> 36 #include <linux/nmi.h> 37 #include <linux/acpi.h> 38 #include <linux/efi.h> 39 #include <linux/ioport.h> 40 #include <linux/list.h> 41 #include <linux/jiffies.h> 42 #include <linux/semaphore.h> 43 44 #include <asm/io.h> 45 #include <linux/uaccess.h> 46 #include <linux/io-64-nonatomic-lo-hi.h> 47 48 #include "internal.h" 49 50 #define _COMPONENT ACPI_OS_SERVICES 51 ACPI_MODULE_NAME("osl"); 52 53 struct acpi_os_dpc { 54 acpi_osd_exec_callback function; 55 void *context; 56 struct work_struct work; 57 }; 58 59 #ifdef ENABLE_DEBUGGER 60 #include <linux/kdb.h> 61 62 /* stuff for debugger support */ 63 int acpi_in_debugger; 64 EXPORT_SYMBOL(acpi_in_debugger); 65 #endif /*ENABLE_DEBUGGER */ 66 67 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 68 u32 pm1b_ctrl); 69 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 70 u32 val_b); 71 72 static acpi_osd_handler acpi_irq_handler; 73 static void *acpi_irq_context; 74 static struct workqueue_struct *kacpid_wq; 75 static struct workqueue_struct *kacpi_notify_wq; 76 static struct workqueue_struct *kacpi_hotplug_wq; 77 static bool acpi_os_initialized; 78 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 79 bool acpi_permanent_mmap = false; 80 81 /* 82 * This list of permanent mappings is for memory that may be accessed from 83 * interrupt context, where we can't do the ioremap(). 84 */ 85 struct acpi_ioremap { 86 struct list_head list; 87 void __iomem *virt; 88 acpi_physical_address phys; 89 acpi_size size; 90 unsigned long refcount; 91 }; 92 93 static LIST_HEAD(acpi_ioremaps); 94 static DEFINE_MUTEX(acpi_ioremap_lock); 95 96 static void __init acpi_request_region (struct acpi_generic_address *gas, 97 unsigned int length, char *desc) 98 { 99 u64 addr; 100 101 /* Handle possible alignment issues */ 102 memcpy(&addr, &gas->address, sizeof(addr)); 103 if (!addr || !length) 104 return; 105 106 /* Resources are never freed */ 107 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 108 request_region(addr, length, desc); 109 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 110 request_mem_region(addr, length, desc); 111 } 112 113 static int __init acpi_reserve_resources(void) 114 { 115 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 116 "ACPI PM1a_EVT_BLK"); 117 118 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 119 "ACPI PM1b_EVT_BLK"); 120 121 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 122 "ACPI PM1a_CNT_BLK"); 123 124 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 125 "ACPI PM1b_CNT_BLK"); 126 127 if (acpi_gbl_FADT.pm_timer_length == 4) 128 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 129 130 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 131 "ACPI PM2_CNT_BLK"); 132 133 /* Length of GPE blocks must be a non-negative multiple of 2 */ 134 135 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 136 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 137 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 138 139 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 140 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 141 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 142 143 return 0; 144 } 145 fs_initcall_sync(acpi_reserve_resources); 146 147 void acpi_os_printf(const char *fmt, ...) 148 { 149 va_list args; 150 va_start(args, fmt); 151 acpi_os_vprintf(fmt, args); 152 va_end(args); 153 } 154 EXPORT_SYMBOL(acpi_os_printf); 155 156 void acpi_os_vprintf(const char *fmt, va_list args) 157 { 158 static char buffer[512]; 159 160 vsprintf(buffer, fmt, args); 161 162 #ifdef ENABLE_DEBUGGER 163 if (acpi_in_debugger) { 164 kdb_printf("%s", buffer); 165 } else { 166 if (printk_get_level(buffer)) 167 printk("%s", buffer); 168 else 169 printk(KERN_CONT "%s", buffer); 170 } 171 #else 172 if (acpi_debugger_write_log(buffer) < 0) { 173 if (printk_get_level(buffer)) 174 printk("%s", buffer); 175 else 176 printk(KERN_CONT "%s", buffer); 177 } 178 #endif 179 } 180 181 #ifdef CONFIG_KEXEC 182 static unsigned long acpi_rsdp; 183 static int __init setup_acpi_rsdp(char *arg) 184 { 185 return kstrtoul(arg, 16, &acpi_rsdp); 186 } 187 early_param("acpi_rsdp", setup_acpi_rsdp); 188 #endif 189 190 acpi_physical_address __init acpi_os_get_root_pointer(void) 191 { 192 acpi_physical_address pa; 193 194 #ifdef CONFIG_KEXEC 195 if (acpi_rsdp) 196 return acpi_rsdp; 197 #endif 198 pa = acpi_arch_get_root_pointer(); 199 if (pa) 200 return pa; 201 202 if (efi_enabled(EFI_CONFIG_TABLES)) { 203 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 204 return efi.acpi20; 205 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 206 return efi.acpi; 207 pr_err(PREFIX "System description tables not found\n"); 208 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 209 acpi_find_root_pointer(&pa); 210 } 211 212 return pa; 213 } 214 215 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 216 static struct acpi_ioremap * 217 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 218 { 219 struct acpi_ioremap *map; 220 221 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 222 if (map->phys <= phys && 223 phys + size <= map->phys + map->size) 224 return map; 225 226 return NULL; 227 } 228 229 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 230 static void __iomem * 231 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 232 { 233 struct acpi_ioremap *map; 234 235 map = acpi_map_lookup(phys, size); 236 if (map) 237 return map->virt + (phys - map->phys); 238 239 return NULL; 240 } 241 242 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 243 { 244 struct acpi_ioremap *map; 245 void __iomem *virt = NULL; 246 247 mutex_lock(&acpi_ioremap_lock); 248 map = acpi_map_lookup(phys, size); 249 if (map) { 250 virt = map->virt + (phys - map->phys); 251 map->refcount++; 252 } 253 mutex_unlock(&acpi_ioremap_lock); 254 return virt; 255 } 256 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 257 258 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 259 static struct acpi_ioremap * 260 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 261 { 262 struct acpi_ioremap *map; 263 264 list_for_each_entry_rcu(map, &acpi_ioremaps, list) 265 if (map->virt <= virt && 266 virt + size <= map->virt + map->size) 267 return map; 268 269 return NULL; 270 } 271 272 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64) 273 /* ioremap will take care of cache attributes */ 274 #define should_use_kmap(pfn) 0 275 #else 276 #define should_use_kmap(pfn) page_is_ram(pfn) 277 #endif 278 279 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 280 { 281 unsigned long pfn; 282 283 pfn = pg_off >> PAGE_SHIFT; 284 if (should_use_kmap(pfn)) { 285 if (pg_sz > PAGE_SIZE) 286 return NULL; 287 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 288 } else 289 return acpi_os_ioremap(pg_off, pg_sz); 290 } 291 292 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 293 { 294 unsigned long pfn; 295 296 pfn = pg_off >> PAGE_SHIFT; 297 if (should_use_kmap(pfn)) 298 kunmap(pfn_to_page(pfn)); 299 else 300 iounmap(vaddr); 301 } 302 303 /** 304 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 305 * @phys: Start of the physical address range to map. 306 * @size: Size of the physical address range to map. 307 * 308 * Look up the given physical address range in the list of existing ACPI memory 309 * mappings. If found, get a reference to it and return a pointer to it (its 310 * virtual address). If not found, map it, add it to that list and return a 311 * pointer to it. 312 * 313 * During early init (when acpi_permanent_mmap has not been set yet) this 314 * routine simply calls __acpi_map_table() to get the job done. 315 */ 316 void __iomem *__ref 317 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 318 { 319 struct acpi_ioremap *map; 320 void __iomem *virt; 321 acpi_physical_address pg_off; 322 acpi_size pg_sz; 323 324 if (phys > ULONG_MAX) { 325 printk(KERN_ERR PREFIX "Cannot map memory that high\n"); 326 return NULL; 327 } 328 329 if (!acpi_permanent_mmap) 330 return __acpi_map_table((unsigned long)phys, size); 331 332 mutex_lock(&acpi_ioremap_lock); 333 /* Check if there's a suitable mapping already. */ 334 map = acpi_map_lookup(phys, size); 335 if (map) { 336 map->refcount++; 337 goto out; 338 } 339 340 map = kzalloc(sizeof(*map), GFP_KERNEL); 341 if (!map) { 342 mutex_unlock(&acpi_ioremap_lock); 343 return NULL; 344 } 345 346 pg_off = round_down(phys, PAGE_SIZE); 347 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 348 virt = acpi_map(pg_off, pg_sz); 349 if (!virt) { 350 mutex_unlock(&acpi_ioremap_lock); 351 kfree(map); 352 return NULL; 353 } 354 355 INIT_LIST_HEAD(&map->list); 356 map->virt = virt; 357 map->phys = pg_off; 358 map->size = pg_sz; 359 map->refcount = 1; 360 361 list_add_tail_rcu(&map->list, &acpi_ioremaps); 362 363 out: 364 mutex_unlock(&acpi_ioremap_lock); 365 return map->virt + (phys - map->phys); 366 } 367 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 368 369 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 370 { 371 return (void *)acpi_os_map_iomem(phys, size); 372 } 373 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 374 375 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 376 { 377 if (!--map->refcount) 378 list_del_rcu(&map->list); 379 } 380 381 static void acpi_os_map_cleanup(struct acpi_ioremap *map) 382 { 383 if (!map->refcount) { 384 synchronize_rcu_expedited(); 385 acpi_unmap(map->phys, map->virt); 386 kfree(map); 387 } 388 } 389 390 /** 391 * acpi_os_unmap_iomem - Drop a memory mapping reference. 392 * @virt: Start of the address range to drop a reference to. 393 * @size: Size of the address range to drop a reference to. 394 * 395 * Look up the given virtual address range in the list of existing ACPI memory 396 * mappings, drop a reference to it and unmap it if there are no more active 397 * references to it. 398 * 399 * During early init (when acpi_permanent_mmap has not been set yet) this 400 * routine simply calls __acpi_unmap_table() to get the job done. Since 401 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 402 * here. 403 */ 404 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 405 { 406 struct acpi_ioremap *map; 407 408 if (!acpi_permanent_mmap) { 409 __acpi_unmap_table(virt, size); 410 return; 411 } 412 413 mutex_lock(&acpi_ioremap_lock); 414 map = acpi_map_lookup_virt(virt, size); 415 if (!map) { 416 mutex_unlock(&acpi_ioremap_lock); 417 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt); 418 return; 419 } 420 acpi_os_drop_map_ref(map); 421 mutex_unlock(&acpi_ioremap_lock); 422 423 acpi_os_map_cleanup(map); 424 } 425 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 426 427 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 428 { 429 return acpi_os_unmap_iomem((void __iomem *)virt, size); 430 } 431 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 432 433 int acpi_os_map_generic_address(struct acpi_generic_address *gas) 434 { 435 u64 addr; 436 void __iomem *virt; 437 438 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 439 return 0; 440 441 /* Handle possible alignment issues */ 442 memcpy(&addr, &gas->address, sizeof(addr)); 443 if (!addr || !gas->bit_width) 444 return -EINVAL; 445 446 virt = acpi_os_map_iomem(addr, gas->bit_width / 8); 447 if (!virt) 448 return -EIO; 449 450 return 0; 451 } 452 EXPORT_SYMBOL(acpi_os_map_generic_address); 453 454 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 455 { 456 u64 addr; 457 struct acpi_ioremap *map; 458 459 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 460 return; 461 462 /* Handle possible alignment issues */ 463 memcpy(&addr, &gas->address, sizeof(addr)); 464 if (!addr || !gas->bit_width) 465 return; 466 467 mutex_lock(&acpi_ioremap_lock); 468 map = acpi_map_lookup(addr, gas->bit_width / 8); 469 if (!map) { 470 mutex_unlock(&acpi_ioremap_lock); 471 return; 472 } 473 acpi_os_drop_map_ref(map); 474 mutex_unlock(&acpi_ioremap_lock); 475 476 acpi_os_map_cleanup(map); 477 } 478 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 479 480 #ifdef ACPI_FUTURE_USAGE 481 acpi_status 482 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys) 483 { 484 if (!phys || !virt) 485 return AE_BAD_PARAMETER; 486 487 *phys = virt_to_phys(virt); 488 489 return AE_OK; 490 } 491 #endif 492 493 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 494 static bool acpi_rev_override; 495 496 int __init acpi_rev_override_setup(char *str) 497 { 498 acpi_rev_override = true; 499 return 1; 500 } 501 __setup("acpi_rev_override", acpi_rev_override_setup); 502 #else 503 #define acpi_rev_override false 504 #endif 505 506 #define ACPI_MAX_OVERRIDE_LEN 100 507 508 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 509 510 acpi_status 511 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 512 acpi_string *new_val) 513 { 514 if (!init_val || !new_val) 515 return AE_BAD_PARAMETER; 516 517 *new_val = NULL; 518 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 519 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n", 520 acpi_os_name); 521 *new_val = acpi_os_name; 522 } 523 524 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 525 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n"); 526 *new_val = (char *)5; 527 } 528 529 return AE_OK; 530 } 531 532 static irqreturn_t acpi_irq(int irq, void *dev_id) 533 { 534 u32 handled; 535 536 handled = (*acpi_irq_handler) (acpi_irq_context); 537 538 if (handled) { 539 acpi_irq_handled++; 540 return IRQ_HANDLED; 541 } else { 542 acpi_irq_not_handled++; 543 return IRQ_NONE; 544 } 545 } 546 547 acpi_status 548 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 549 void *context) 550 { 551 unsigned int irq; 552 553 acpi_irq_stats_init(); 554 555 /* 556 * ACPI interrupts different from the SCI in our copy of the FADT are 557 * not supported. 558 */ 559 if (gsi != acpi_gbl_FADT.sci_interrupt) 560 return AE_BAD_PARAMETER; 561 562 if (acpi_irq_handler) 563 return AE_ALREADY_ACQUIRED; 564 565 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 566 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n", 567 gsi); 568 return AE_OK; 569 } 570 571 acpi_irq_handler = handler; 572 acpi_irq_context = context; 573 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { 574 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); 575 acpi_irq_handler = NULL; 576 return AE_NOT_ACQUIRED; 577 } 578 acpi_sci_irq = irq; 579 580 return AE_OK; 581 } 582 583 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 584 { 585 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 586 return AE_BAD_PARAMETER; 587 588 free_irq(acpi_sci_irq, acpi_irq); 589 acpi_irq_handler = NULL; 590 acpi_sci_irq = INVALID_ACPI_IRQ; 591 592 return AE_OK; 593 } 594 595 /* 596 * Running in interpreter thread context, safe to sleep 597 */ 598 599 void acpi_os_sleep(u64 ms) 600 { 601 msleep(ms); 602 } 603 604 void acpi_os_stall(u32 us) 605 { 606 while (us) { 607 u32 delay = 1000; 608 609 if (delay > us) 610 delay = us; 611 udelay(delay); 612 touch_nmi_watchdog(); 613 us -= delay; 614 } 615 } 616 617 /* 618 * Support ACPI 3.0 AML Timer operand 619 * Returns 64-bit free-running, monotonically increasing timer 620 * with 100ns granularity 621 */ 622 u64 acpi_os_get_timer(void) 623 { 624 u64 time_ns = ktime_to_ns(ktime_get()); 625 do_div(time_ns, 100); 626 return time_ns; 627 } 628 629 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) 630 { 631 u32 dummy; 632 633 if (!value) 634 value = &dummy; 635 636 *value = 0; 637 if (width <= 8) { 638 *(u8 *) value = inb(port); 639 } else if (width <= 16) { 640 *(u16 *) value = inw(port); 641 } else if (width <= 32) { 642 *(u32 *) value = inl(port); 643 } else { 644 BUG(); 645 } 646 647 return AE_OK; 648 } 649 650 EXPORT_SYMBOL(acpi_os_read_port); 651 652 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 653 { 654 if (width <= 8) { 655 outb(value, port); 656 } else if (width <= 16) { 657 outw(value, port); 658 } else if (width <= 32) { 659 outl(value, port); 660 } else { 661 BUG(); 662 } 663 664 return AE_OK; 665 } 666 667 EXPORT_SYMBOL(acpi_os_write_port); 668 669 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width) 670 { 671 672 switch (width) { 673 case 8: 674 *(u8 *) value = readb(virt_addr); 675 break; 676 case 16: 677 *(u16 *) value = readw(virt_addr); 678 break; 679 case 32: 680 *(u32 *) value = readl(virt_addr); 681 break; 682 case 64: 683 *(u64 *) value = readq(virt_addr); 684 break; 685 default: 686 return -EINVAL; 687 } 688 689 return 0; 690 } 691 692 acpi_status 693 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 694 { 695 void __iomem *virt_addr; 696 unsigned int size = width / 8; 697 bool unmap = false; 698 u64 dummy; 699 int error; 700 701 rcu_read_lock(); 702 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 703 if (!virt_addr) { 704 rcu_read_unlock(); 705 virt_addr = acpi_os_ioremap(phys_addr, size); 706 if (!virt_addr) 707 return AE_BAD_ADDRESS; 708 unmap = true; 709 } 710 711 if (!value) 712 value = &dummy; 713 714 error = acpi_os_read_iomem(virt_addr, value, width); 715 BUG_ON(error); 716 717 if (unmap) 718 iounmap(virt_addr); 719 else 720 rcu_read_unlock(); 721 722 return AE_OK; 723 } 724 725 acpi_status 726 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 727 { 728 void __iomem *virt_addr; 729 unsigned int size = width / 8; 730 bool unmap = false; 731 732 rcu_read_lock(); 733 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 734 if (!virt_addr) { 735 rcu_read_unlock(); 736 virt_addr = acpi_os_ioremap(phys_addr, size); 737 if (!virt_addr) 738 return AE_BAD_ADDRESS; 739 unmap = true; 740 } 741 742 switch (width) { 743 case 8: 744 writeb(value, virt_addr); 745 break; 746 case 16: 747 writew(value, virt_addr); 748 break; 749 case 32: 750 writel(value, virt_addr); 751 break; 752 case 64: 753 writeq(value, virt_addr); 754 break; 755 default: 756 BUG(); 757 } 758 759 if (unmap) 760 iounmap(virt_addr); 761 else 762 rcu_read_unlock(); 763 764 return AE_OK; 765 } 766 767 acpi_status 768 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 769 u64 *value, u32 width) 770 { 771 int result, size; 772 u32 value32; 773 774 if (!value) 775 return AE_BAD_PARAMETER; 776 777 switch (width) { 778 case 8: 779 size = 1; 780 break; 781 case 16: 782 size = 2; 783 break; 784 case 32: 785 size = 4; 786 break; 787 default: 788 return AE_ERROR; 789 } 790 791 result = raw_pci_read(pci_id->segment, pci_id->bus, 792 PCI_DEVFN(pci_id->device, pci_id->function), 793 reg, size, &value32); 794 *value = value32; 795 796 return (result ? AE_ERROR : AE_OK); 797 } 798 799 acpi_status 800 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, 801 u64 value, u32 width) 802 { 803 int result, size; 804 805 switch (width) { 806 case 8: 807 size = 1; 808 break; 809 case 16: 810 size = 2; 811 break; 812 case 32: 813 size = 4; 814 break; 815 default: 816 return AE_ERROR; 817 } 818 819 result = raw_pci_write(pci_id->segment, pci_id->bus, 820 PCI_DEVFN(pci_id->device, pci_id->function), 821 reg, size, value); 822 823 return (result ? AE_ERROR : AE_OK); 824 } 825 826 static void acpi_os_execute_deferred(struct work_struct *work) 827 { 828 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 829 830 dpc->function(dpc->context); 831 kfree(dpc); 832 } 833 834 #ifdef CONFIG_ACPI_DEBUGGER 835 static struct acpi_debugger acpi_debugger; 836 static bool acpi_debugger_initialized; 837 838 int acpi_register_debugger(struct module *owner, 839 const struct acpi_debugger_ops *ops) 840 { 841 int ret = 0; 842 843 mutex_lock(&acpi_debugger.lock); 844 if (acpi_debugger.ops) { 845 ret = -EBUSY; 846 goto err_lock; 847 } 848 849 acpi_debugger.owner = owner; 850 acpi_debugger.ops = ops; 851 852 err_lock: 853 mutex_unlock(&acpi_debugger.lock); 854 return ret; 855 } 856 EXPORT_SYMBOL(acpi_register_debugger); 857 858 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 859 { 860 mutex_lock(&acpi_debugger.lock); 861 if (ops == acpi_debugger.ops) { 862 acpi_debugger.ops = NULL; 863 acpi_debugger.owner = NULL; 864 } 865 mutex_unlock(&acpi_debugger.lock); 866 } 867 EXPORT_SYMBOL(acpi_unregister_debugger); 868 869 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 870 { 871 int ret; 872 int (*func)(acpi_osd_exec_callback, void *); 873 struct module *owner; 874 875 if (!acpi_debugger_initialized) 876 return -ENODEV; 877 mutex_lock(&acpi_debugger.lock); 878 if (!acpi_debugger.ops) { 879 ret = -ENODEV; 880 goto err_lock; 881 } 882 if (!try_module_get(acpi_debugger.owner)) { 883 ret = -ENODEV; 884 goto err_lock; 885 } 886 func = acpi_debugger.ops->create_thread; 887 owner = acpi_debugger.owner; 888 mutex_unlock(&acpi_debugger.lock); 889 890 ret = func(function, context); 891 892 mutex_lock(&acpi_debugger.lock); 893 module_put(owner); 894 err_lock: 895 mutex_unlock(&acpi_debugger.lock); 896 return ret; 897 } 898 899 ssize_t acpi_debugger_write_log(const char *msg) 900 { 901 ssize_t ret; 902 ssize_t (*func)(const char *); 903 struct module *owner; 904 905 if (!acpi_debugger_initialized) 906 return -ENODEV; 907 mutex_lock(&acpi_debugger.lock); 908 if (!acpi_debugger.ops) { 909 ret = -ENODEV; 910 goto err_lock; 911 } 912 if (!try_module_get(acpi_debugger.owner)) { 913 ret = -ENODEV; 914 goto err_lock; 915 } 916 func = acpi_debugger.ops->write_log; 917 owner = acpi_debugger.owner; 918 mutex_unlock(&acpi_debugger.lock); 919 920 ret = func(msg); 921 922 mutex_lock(&acpi_debugger.lock); 923 module_put(owner); 924 err_lock: 925 mutex_unlock(&acpi_debugger.lock); 926 return ret; 927 } 928 929 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 930 { 931 ssize_t ret; 932 ssize_t (*func)(char *, size_t); 933 struct module *owner; 934 935 if (!acpi_debugger_initialized) 936 return -ENODEV; 937 mutex_lock(&acpi_debugger.lock); 938 if (!acpi_debugger.ops) { 939 ret = -ENODEV; 940 goto err_lock; 941 } 942 if (!try_module_get(acpi_debugger.owner)) { 943 ret = -ENODEV; 944 goto err_lock; 945 } 946 func = acpi_debugger.ops->read_cmd; 947 owner = acpi_debugger.owner; 948 mutex_unlock(&acpi_debugger.lock); 949 950 ret = func(buffer, buffer_length); 951 952 mutex_lock(&acpi_debugger.lock); 953 module_put(owner); 954 err_lock: 955 mutex_unlock(&acpi_debugger.lock); 956 return ret; 957 } 958 959 int acpi_debugger_wait_command_ready(void) 960 { 961 int ret; 962 int (*func)(bool, char *, size_t); 963 struct module *owner; 964 965 if (!acpi_debugger_initialized) 966 return -ENODEV; 967 mutex_lock(&acpi_debugger.lock); 968 if (!acpi_debugger.ops) { 969 ret = -ENODEV; 970 goto err_lock; 971 } 972 if (!try_module_get(acpi_debugger.owner)) { 973 ret = -ENODEV; 974 goto err_lock; 975 } 976 func = acpi_debugger.ops->wait_command_ready; 977 owner = acpi_debugger.owner; 978 mutex_unlock(&acpi_debugger.lock); 979 980 ret = func(acpi_gbl_method_executing, 981 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 982 983 mutex_lock(&acpi_debugger.lock); 984 module_put(owner); 985 err_lock: 986 mutex_unlock(&acpi_debugger.lock); 987 return ret; 988 } 989 990 int acpi_debugger_notify_command_complete(void) 991 { 992 int ret; 993 int (*func)(void); 994 struct module *owner; 995 996 if (!acpi_debugger_initialized) 997 return -ENODEV; 998 mutex_lock(&acpi_debugger.lock); 999 if (!acpi_debugger.ops) { 1000 ret = -ENODEV; 1001 goto err_lock; 1002 } 1003 if (!try_module_get(acpi_debugger.owner)) { 1004 ret = -ENODEV; 1005 goto err_lock; 1006 } 1007 func = acpi_debugger.ops->notify_command_complete; 1008 owner = acpi_debugger.owner; 1009 mutex_unlock(&acpi_debugger.lock); 1010 1011 ret = func(); 1012 1013 mutex_lock(&acpi_debugger.lock); 1014 module_put(owner); 1015 err_lock: 1016 mutex_unlock(&acpi_debugger.lock); 1017 return ret; 1018 } 1019 1020 int __init acpi_debugger_init(void) 1021 { 1022 mutex_init(&acpi_debugger.lock); 1023 acpi_debugger_initialized = true; 1024 return 0; 1025 } 1026 #endif 1027 1028 /******************************************************************************* 1029 * 1030 * FUNCTION: acpi_os_execute 1031 * 1032 * PARAMETERS: Type - Type of the callback 1033 * Function - Function to be executed 1034 * Context - Function parameters 1035 * 1036 * RETURN: Status 1037 * 1038 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1039 * immediately executes function on a separate thread. 1040 * 1041 ******************************************************************************/ 1042 1043 acpi_status acpi_os_execute(acpi_execute_type type, 1044 acpi_osd_exec_callback function, void *context) 1045 { 1046 acpi_status status = AE_OK; 1047 struct acpi_os_dpc *dpc; 1048 struct workqueue_struct *queue; 1049 int ret; 1050 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1051 "Scheduling function [%p(%p)] for deferred execution.\n", 1052 function, context)); 1053 1054 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1055 ret = acpi_debugger_create_thread(function, context); 1056 if (ret) { 1057 pr_err("Call to kthread_create() failed.\n"); 1058 status = AE_ERROR; 1059 } 1060 goto out_thread; 1061 } 1062 1063 /* 1064 * Allocate/initialize DPC structure. Note that this memory will be 1065 * freed by the callee. The kernel handles the work_struct list in a 1066 * way that allows us to also free its memory inside the callee. 1067 * Because we may want to schedule several tasks with different 1068 * parameters we can't use the approach some kernel code uses of 1069 * having a static work_struct. 1070 */ 1071 1072 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1073 if (!dpc) 1074 return AE_NO_MEMORY; 1075 1076 dpc->function = function; 1077 dpc->context = context; 1078 1079 /* 1080 * To prevent lockdep from complaining unnecessarily, make sure that 1081 * there is a different static lockdep key for each workqueue by using 1082 * INIT_WORK() for each of them separately. 1083 */ 1084 if (type == OSL_NOTIFY_HANDLER) { 1085 queue = kacpi_notify_wq; 1086 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1087 } else if (type == OSL_GPE_HANDLER) { 1088 queue = kacpid_wq; 1089 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1090 } else { 1091 pr_err("Unsupported os_execute type %d.\n", type); 1092 status = AE_ERROR; 1093 } 1094 1095 if (ACPI_FAILURE(status)) 1096 goto err_workqueue; 1097 1098 /* 1099 * On some machines, a software-initiated SMI causes corruption unless 1100 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but 1101 * typically it's done in GPE-related methods that are run via 1102 * workqueues, so we can avoid the known corruption cases by always 1103 * queueing on CPU 0. 1104 */ 1105 ret = queue_work_on(0, queue, &dpc->work); 1106 if (!ret) { 1107 printk(KERN_ERR PREFIX 1108 "Call to queue_work() failed.\n"); 1109 status = AE_ERROR; 1110 } 1111 err_workqueue: 1112 if (ACPI_FAILURE(status)) 1113 kfree(dpc); 1114 out_thread: 1115 return status; 1116 } 1117 EXPORT_SYMBOL(acpi_os_execute); 1118 1119 void acpi_os_wait_events_complete(void) 1120 { 1121 /* 1122 * Make sure the GPE handler or the fixed event handler is not used 1123 * on another CPU after removal. 1124 */ 1125 if (acpi_sci_irq_valid()) 1126 synchronize_hardirq(acpi_sci_irq); 1127 flush_workqueue(kacpid_wq); 1128 flush_workqueue(kacpi_notify_wq); 1129 } 1130 1131 struct acpi_hp_work { 1132 struct work_struct work; 1133 struct acpi_device *adev; 1134 u32 src; 1135 }; 1136 1137 static void acpi_hotplug_work_fn(struct work_struct *work) 1138 { 1139 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1140 1141 acpi_os_wait_events_complete(); 1142 acpi_device_hotplug(hpw->adev, hpw->src); 1143 kfree(hpw); 1144 } 1145 1146 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1147 { 1148 struct acpi_hp_work *hpw; 1149 1150 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1151 "Scheduling hotplug event (%p, %u) for deferred execution.\n", 1152 adev, src)); 1153 1154 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1155 if (!hpw) 1156 return AE_NO_MEMORY; 1157 1158 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1159 hpw->adev = adev; 1160 hpw->src = src; 1161 /* 1162 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1163 * the hotplug code may call driver .remove() functions, which may 1164 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1165 * these workqueues. 1166 */ 1167 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1168 kfree(hpw); 1169 return AE_ERROR; 1170 } 1171 return AE_OK; 1172 } 1173 1174 bool acpi_queue_hotplug_work(struct work_struct *work) 1175 { 1176 return queue_work(kacpi_hotplug_wq, work); 1177 } 1178 1179 acpi_status 1180 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) 1181 { 1182 struct semaphore *sem = NULL; 1183 1184 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1185 if (!sem) 1186 return AE_NO_MEMORY; 1187 1188 sema_init(sem, initial_units); 1189 1190 *handle = (acpi_handle *) sem; 1191 1192 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1193 *handle, initial_units)); 1194 1195 return AE_OK; 1196 } 1197 1198 /* 1199 * TODO: A better way to delete semaphores? Linux doesn't have a 1200 * 'delete_semaphore()' function -- may result in an invalid 1201 * pointer dereference for non-synchronized consumers. Should 1202 * we at least check for blocked threads and signal/cancel them? 1203 */ 1204 1205 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1206 { 1207 struct semaphore *sem = (struct semaphore *)handle; 1208 1209 if (!sem) 1210 return AE_BAD_PARAMETER; 1211 1212 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1213 1214 BUG_ON(!list_empty(&sem->wait_list)); 1215 kfree(sem); 1216 sem = NULL; 1217 1218 return AE_OK; 1219 } 1220 1221 /* 1222 * TODO: Support for units > 1? 1223 */ 1224 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1225 { 1226 acpi_status status = AE_OK; 1227 struct semaphore *sem = (struct semaphore *)handle; 1228 long jiffies; 1229 int ret = 0; 1230 1231 if (!acpi_os_initialized) 1232 return AE_OK; 1233 1234 if (!sem || (units < 1)) 1235 return AE_BAD_PARAMETER; 1236 1237 if (units > 1) 1238 return AE_SUPPORT; 1239 1240 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1241 handle, units, timeout)); 1242 1243 if (timeout == ACPI_WAIT_FOREVER) 1244 jiffies = MAX_SCHEDULE_TIMEOUT; 1245 else 1246 jiffies = msecs_to_jiffies(timeout); 1247 1248 ret = down_timeout(sem, jiffies); 1249 if (ret) 1250 status = AE_TIME; 1251 1252 if (ACPI_FAILURE(status)) { 1253 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1254 "Failed to acquire semaphore[%p|%d|%d], %s", 1255 handle, units, timeout, 1256 acpi_format_exception(status))); 1257 } else { 1258 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1259 "Acquired semaphore[%p|%d|%d]", handle, 1260 units, timeout)); 1261 } 1262 1263 return status; 1264 } 1265 1266 /* 1267 * TODO: Support for units > 1? 1268 */ 1269 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1270 { 1271 struct semaphore *sem = (struct semaphore *)handle; 1272 1273 if (!acpi_os_initialized) 1274 return AE_OK; 1275 1276 if (!sem || (units < 1)) 1277 return AE_BAD_PARAMETER; 1278 1279 if (units > 1) 1280 return AE_SUPPORT; 1281 1282 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1283 units)); 1284 1285 up(sem); 1286 1287 return AE_OK; 1288 } 1289 1290 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1291 { 1292 #ifdef ENABLE_DEBUGGER 1293 if (acpi_in_debugger) { 1294 u32 chars; 1295 1296 kdb_read(buffer, buffer_length); 1297 1298 /* remove the CR kdb includes */ 1299 chars = strlen(buffer) - 1; 1300 buffer[chars] = '\0'; 1301 } 1302 #else 1303 int ret; 1304 1305 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1306 if (ret < 0) 1307 return AE_ERROR; 1308 if (bytes_read) 1309 *bytes_read = ret; 1310 #endif 1311 1312 return AE_OK; 1313 } 1314 EXPORT_SYMBOL(acpi_os_get_line); 1315 1316 acpi_status acpi_os_wait_command_ready(void) 1317 { 1318 int ret; 1319 1320 ret = acpi_debugger_wait_command_ready(); 1321 if (ret < 0) 1322 return AE_ERROR; 1323 return AE_OK; 1324 } 1325 1326 acpi_status acpi_os_notify_command_complete(void) 1327 { 1328 int ret; 1329 1330 ret = acpi_debugger_notify_command_complete(); 1331 if (ret < 0) 1332 return AE_ERROR; 1333 return AE_OK; 1334 } 1335 1336 acpi_status acpi_os_signal(u32 function, void *info) 1337 { 1338 switch (function) { 1339 case ACPI_SIGNAL_FATAL: 1340 printk(KERN_ERR PREFIX "Fatal opcode executed\n"); 1341 break; 1342 case ACPI_SIGNAL_BREAKPOINT: 1343 /* 1344 * AML Breakpoint 1345 * ACPI spec. says to treat it as a NOP unless 1346 * you are debugging. So if/when we integrate 1347 * AML debugger into the kernel debugger its 1348 * hook will go here. But until then it is 1349 * not useful to print anything on breakpoints. 1350 */ 1351 break; 1352 default: 1353 break; 1354 } 1355 1356 return AE_OK; 1357 } 1358 1359 static int __init acpi_os_name_setup(char *str) 1360 { 1361 char *p = acpi_os_name; 1362 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1363 1364 if (!str || !*str) 1365 return 0; 1366 1367 for (; count-- && *str; str++) { 1368 if (isalnum(*str) || *str == ' ' || *str == ':') 1369 *p++ = *str; 1370 else if (*str == '\'' || *str == '"') 1371 continue; 1372 else 1373 break; 1374 } 1375 *p = 0; 1376 1377 return 1; 1378 1379 } 1380 1381 __setup("acpi_os_name=", acpi_os_name_setup); 1382 1383 /* 1384 * Disable the auto-serialization of named objects creation methods. 1385 * 1386 * This feature is enabled by default. It marks the AML control methods 1387 * that contain the opcodes to create named objects as "Serialized". 1388 */ 1389 static int __init acpi_no_auto_serialize_setup(char *str) 1390 { 1391 acpi_gbl_auto_serialize_methods = FALSE; 1392 pr_info("ACPI: auto-serialization disabled\n"); 1393 1394 return 1; 1395 } 1396 1397 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1398 1399 /* Check of resource interference between native drivers and ACPI 1400 * OperationRegions (SystemIO and System Memory only). 1401 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1402 * in arbitrary AML code and can interfere with legacy drivers. 1403 * acpi_enforce_resources= can be set to: 1404 * 1405 * - strict (default) (2) 1406 * -> further driver trying to access the resources will not load 1407 * - lax (1) 1408 * -> further driver trying to access the resources will load, but you 1409 * get a system message that something might go wrong... 1410 * 1411 * - no (0) 1412 * -> ACPI Operation Region resources will not be registered 1413 * 1414 */ 1415 #define ENFORCE_RESOURCES_STRICT 2 1416 #define ENFORCE_RESOURCES_LAX 1 1417 #define ENFORCE_RESOURCES_NO 0 1418 1419 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1420 1421 static int __init acpi_enforce_resources_setup(char *str) 1422 { 1423 if (str == NULL || *str == '\0') 1424 return 0; 1425 1426 if (!strcmp("strict", str)) 1427 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1428 else if (!strcmp("lax", str)) 1429 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1430 else if (!strcmp("no", str)) 1431 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1432 1433 return 1; 1434 } 1435 1436 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1437 1438 /* Check for resource conflicts between ACPI OperationRegions and native 1439 * drivers */ 1440 int acpi_check_resource_conflict(const struct resource *res) 1441 { 1442 acpi_adr_space_type space_id; 1443 acpi_size length; 1444 u8 warn = 0; 1445 int clash = 0; 1446 1447 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1448 return 0; 1449 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM)) 1450 return 0; 1451 1452 if (res->flags & IORESOURCE_IO) 1453 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1454 else 1455 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1456 1457 length = resource_size(res); 1458 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) 1459 warn = 1; 1460 clash = acpi_check_address_range(space_id, res->start, length, warn); 1461 1462 if (clash) { 1463 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1464 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1465 printk(KERN_NOTICE "ACPI: This conflict may" 1466 " cause random problems and system" 1467 " instability\n"); 1468 printk(KERN_INFO "ACPI: If an ACPI driver is available" 1469 " for this device, you should use it instead of" 1470 " the native driver\n"); 1471 } 1472 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1473 return -EBUSY; 1474 } 1475 return 0; 1476 } 1477 EXPORT_SYMBOL(acpi_check_resource_conflict); 1478 1479 int acpi_check_region(resource_size_t start, resource_size_t n, 1480 const char *name) 1481 { 1482 struct resource res = { 1483 .start = start, 1484 .end = start + n - 1, 1485 .name = name, 1486 .flags = IORESOURCE_IO, 1487 }; 1488 1489 return acpi_check_resource_conflict(&res); 1490 } 1491 EXPORT_SYMBOL(acpi_check_region); 1492 1493 /* 1494 * Let drivers know whether the resource checks are effective 1495 */ 1496 int acpi_resources_are_enforced(void) 1497 { 1498 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1499 } 1500 EXPORT_SYMBOL(acpi_resources_are_enforced); 1501 1502 /* 1503 * Deallocate the memory for a spinlock. 1504 */ 1505 void acpi_os_delete_lock(acpi_spinlock handle) 1506 { 1507 ACPI_FREE(handle); 1508 } 1509 1510 /* 1511 * Acquire a spinlock. 1512 * 1513 * handle is a pointer to the spinlock_t. 1514 */ 1515 1516 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1517 { 1518 acpi_cpu_flags flags; 1519 spin_lock_irqsave(lockp, flags); 1520 return flags; 1521 } 1522 1523 /* 1524 * Release a spinlock. See above. 1525 */ 1526 1527 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1528 { 1529 spin_unlock_irqrestore(lockp, flags); 1530 } 1531 1532 #ifndef ACPI_USE_LOCAL_CACHE 1533 1534 /******************************************************************************* 1535 * 1536 * FUNCTION: acpi_os_create_cache 1537 * 1538 * PARAMETERS: name - Ascii name for the cache 1539 * size - Size of each cached object 1540 * depth - Maximum depth of the cache (in objects) <ignored> 1541 * cache - Where the new cache object is returned 1542 * 1543 * RETURN: status 1544 * 1545 * DESCRIPTION: Create a cache object 1546 * 1547 ******************************************************************************/ 1548 1549 acpi_status 1550 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) 1551 { 1552 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1553 if (*cache == NULL) 1554 return AE_ERROR; 1555 else 1556 return AE_OK; 1557 } 1558 1559 /******************************************************************************* 1560 * 1561 * FUNCTION: acpi_os_purge_cache 1562 * 1563 * PARAMETERS: Cache - Handle to cache object 1564 * 1565 * RETURN: Status 1566 * 1567 * DESCRIPTION: Free all objects within the requested cache. 1568 * 1569 ******************************************************************************/ 1570 1571 acpi_status acpi_os_purge_cache(acpi_cache_t * cache) 1572 { 1573 kmem_cache_shrink(cache); 1574 return (AE_OK); 1575 } 1576 1577 /******************************************************************************* 1578 * 1579 * FUNCTION: acpi_os_delete_cache 1580 * 1581 * PARAMETERS: Cache - Handle to cache object 1582 * 1583 * RETURN: Status 1584 * 1585 * DESCRIPTION: Free all objects within the requested cache and delete the 1586 * cache object. 1587 * 1588 ******************************************************************************/ 1589 1590 acpi_status acpi_os_delete_cache(acpi_cache_t * cache) 1591 { 1592 kmem_cache_destroy(cache); 1593 return (AE_OK); 1594 } 1595 1596 /******************************************************************************* 1597 * 1598 * FUNCTION: acpi_os_release_object 1599 * 1600 * PARAMETERS: Cache - Handle to cache object 1601 * Object - The object to be released 1602 * 1603 * RETURN: None 1604 * 1605 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1606 * the object is deleted. 1607 * 1608 ******************************************************************************/ 1609 1610 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) 1611 { 1612 kmem_cache_free(cache, object); 1613 return (AE_OK); 1614 } 1615 #endif 1616 1617 static int __init acpi_no_static_ssdt_setup(char *s) 1618 { 1619 acpi_gbl_disable_ssdt_table_install = TRUE; 1620 pr_info("ACPI: static SSDT installation disabled\n"); 1621 1622 return 0; 1623 } 1624 1625 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1626 1627 static int __init acpi_disable_return_repair(char *s) 1628 { 1629 printk(KERN_NOTICE PREFIX 1630 "ACPI: Predefined validation mechanism disabled\n"); 1631 acpi_gbl_disable_auto_repair = TRUE; 1632 1633 return 1; 1634 } 1635 1636 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1637 1638 acpi_status __init acpi_os_initialize(void) 1639 { 1640 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1641 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1642 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1643 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1644 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1645 /* 1646 * Use acpi_os_map_generic_address to pre-map the reset 1647 * register if it's in system memory. 1648 */ 1649 int rv; 1650 1651 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1652 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); 1653 } 1654 acpi_os_initialized = true; 1655 1656 return AE_OK; 1657 } 1658 1659 acpi_status __init acpi_os_initialize1(void) 1660 { 1661 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1662 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); 1663 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1664 BUG_ON(!kacpid_wq); 1665 BUG_ON(!kacpi_notify_wq); 1666 BUG_ON(!kacpi_hotplug_wq); 1667 acpi_osi_init(); 1668 return AE_OK; 1669 } 1670 1671 acpi_status acpi_os_terminate(void) 1672 { 1673 if (acpi_irq_handler) { 1674 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1675 acpi_irq_handler); 1676 } 1677 1678 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1679 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1680 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1681 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1682 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1683 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1684 1685 destroy_workqueue(kacpid_wq); 1686 destroy_workqueue(kacpi_notify_wq); 1687 destroy_workqueue(kacpi_hotplug_wq); 1688 1689 return AE_OK; 1690 } 1691 1692 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1693 u32 pm1b_control) 1694 { 1695 int rc = 0; 1696 if (__acpi_os_prepare_sleep) 1697 rc = __acpi_os_prepare_sleep(sleep_state, 1698 pm1a_control, pm1b_control); 1699 if (rc < 0) 1700 return AE_ERROR; 1701 else if (rc > 0) 1702 return AE_CTRL_TERMINATE; 1703 1704 return AE_OK; 1705 } 1706 1707 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1708 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1709 { 1710 __acpi_os_prepare_sleep = func; 1711 } 1712 1713 #if (ACPI_REDUCED_HARDWARE) 1714 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1715 u32 val_b) 1716 { 1717 int rc = 0; 1718 if (__acpi_os_prepare_extended_sleep) 1719 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1720 val_a, val_b); 1721 if (rc < 0) 1722 return AE_ERROR; 1723 else if (rc > 0) 1724 return AE_CTRL_TERMINATE; 1725 1726 return AE_OK; 1727 } 1728 #else 1729 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1730 u32 val_b) 1731 { 1732 return AE_OK; 1733 } 1734 #endif 1735 1736 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1737 u32 val_a, u32 val_b)) 1738 { 1739 __acpi_os_prepare_extended_sleep = func; 1740 } 1741 1742 acpi_status acpi_os_enter_sleep(u8 sleep_state, 1743 u32 reg_a_value, u32 reg_b_value) 1744 { 1745 acpi_status status; 1746 1747 if (acpi_gbl_reduced_hardware) 1748 status = acpi_os_prepare_extended_sleep(sleep_state, 1749 reg_a_value, 1750 reg_b_value); 1751 else 1752 status = acpi_os_prepare_sleep(sleep_state, 1753 reg_a_value, reg_b_value); 1754 return status; 1755 } 1756