1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * acpi_osl.c - OS-dependent functions ($Revision: 83 $) 4 * 5 * Copyright (C) 2000 Andrew Henroid 6 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 7 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 8 * Copyright (c) 2008 Intel Corporation 9 * Author: Matthew Wilcox <willy@linux.intel.com> 10 */ 11 12 #define pr_fmt(fmt) "ACPI: OSL: " fmt 13 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/slab.h> 17 #include <linux/mm.h> 18 #include <linux/highmem.h> 19 #include <linux/lockdep.h> 20 #include <linux/pci.h> 21 #include <linux/interrupt.h> 22 #include <linux/kmod.h> 23 #include <linux/delay.h> 24 #include <linux/workqueue.h> 25 #include <linux/nmi.h> 26 #include <linux/acpi.h> 27 #include <linux/efi.h> 28 #include <linux/ioport.h> 29 #include <linux/list.h> 30 #include <linux/jiffies.h> 31 #include <linux/semaphore.h> 32 #include <linux/security.h> 33 34 #include <asm/io.h> 35 #include <linux/uaccess.h> 36 #include <linux/io-64-nonatomic-lo-hi.h> 37 38 #include "acpica/accommon.h" 39 #include "internal.h" 40 41 /* Definitions for ACPI_DEBUG_PRINT() */ 42 #define _COMPONENT ACPI_OS_SERVICES 43 ACPI_MODULE_NAME("osl"); 44 45 struct acpi_os_dpc { 46 acpi_osd_exec_callback function; 47 void *context; 48 struct work_struct work; 49 }; 50 51 #ifdef ENABLE_DEBUGGER 52 #include <linux/kdb.h> 53 54 /* stuff for debugger support */ 55 int acpi_in_debugger; 56 EXPORT_SYMBOL(acpi_in_debugger); 57 #endif /*ENABLE_DEBUGGER */ 58 59 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, 60 u32 pm1b_ctrl); 61 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, 62 u32 val_b); 63 64 static acpi_osd_handler acpi_irq_handler; 65 static void *acpi_irq_context; 66 static struct workqueue_struct *kacpid_wq; 67 static struct workqueue_struct *kacpi_notify_wq; 68 static struct workqueue_struct *kacpi_hotplug_wq; 69 static bool acpi_os_initialized; 70 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ; 71 bool acpi_permanent_mmap = false; 72 73 /* 74 * This list of permanent mappings is for memory that may be accessed from 75 * interrupt context, where we can't do the ioremap(). 76 */ 77 struct acpi_ioremap { 78 struct list_head list; 79 void __iomem *virt; 80 acpi_physical_address phys; 81 acpi_size size; 82 union { 83 unsigned long refcount; 84 struct rcu_work rwork; 85 } track; 86 }; 87 88 static LIST_HEAD(acpi_ioremaps); 89 static DEFINE_MUTEX(acpi_ioremap_lock); 90 #define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map) 91 92 static void __init acpi_request_region (struct acpi_generic_address *gas, 93 unsigned int length, char *desc) 94 { 95 u64 addr; 96 97 /* Handle possible alignment issues */ 98 memcpy(&addr, &gas->address, sizeof(addr)); 99 if (!addr || !length) 100 return; 101 102 /* Resources are never freed */ 103 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) 104 request_region(addr, length, desc); 105 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 106 request_mem_region(addr, length, desc); 107 } 108 109 static int __init acpi_reserve_resources(void) 110 { 111 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, 112 "ACPI PM1a_EVT_BLK"); 113 114 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length, 115 "ACPI PM1b_EVT_BLK"); 116 117 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length, 118 "ACPI PM1a_CNT_BLK"); 119 120 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length, 121 "ACPI PM1b_CNT_BLK"); 122 123 if (acpi_gbl_FADT.pm_timer_length == 4) 124 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR"); 125 126 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length, 127 "ACPI PM2_CNT_BLK"); 128 129 /* Length of GPE blocks must be a non-negative multiple of 2 */ 130 131 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1)) 132 acpi_request_region(&acpi_gbl_FADT.xgpe0_block, 133 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK"); 134 135 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) 136 acpi_request_region(&acpi_gbl_FADT.xgpe1_block, 137 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); 138 139 return 0; 140 } 141 fs_initcall_sync(acpi_reserve_resources); 142 143 void acpi_os_printf(const char *fmt, ...) 144 { 145 va_list args; 146 va_start(args, fmt); 147 acpi_os_vprintf(fmt, args); 148 va_end(args); 149 } 150 EXPORT_SYMBOL(acpi_os_printf); 151 152 void __printf(1, 0) acpi_os_vprintf(const char *fmt, va_list args) 153 { 154 static char buffer[512]; 155 156 vsprintf(buffer, fmt, args); 157 158 #ifdef ENABLE_DEBUGGER 159 if (acpi_in_debugger) { 160 kdb_printf("%s", buffer); 161 } else { 162 if (printk_get_level(buffer)) 163 printk("%s", buffer); 164 else 165 printk(KERN_CONT "%s", buffer); 166 } 167 #else 168 if (acpi_debugger_write_log(buffer) < 0) { 169 if (printk_get_level(buffer)) 170 printk("%s", buffer); 171 else 172 printk(KERN_CONT "%s", buffer); 173 } 174 #endif 175 } 176 177 #ifdef CONFIG_KEXEC 178 static unsigned long acpi_rsdp; 179 static int __init setup_acpi_rsdp(char *arg) 180 { 181 return kstrtoul(arg, 16, &acpi_rsdp); 182 } 183 early_param("acpi_rsdp", setup_acpi_rsdp); 184 #endif 185 186 acpi_physical_address __init acpi_os_get_root_pointer(void) 187 { 188 acpi_physical_address pa; 189 190 #ifdef CONFIG_KEXEC 191 /* 192 * We may have been provided with an RSDP on the command line, 193 * but if a malicious user has done so they may be pointing us 194 * at modified ACPI tables that could alter kernel behaviour - 195 * so, we check the lockdown status before making use of 196 * it. If we trust it then also stash it in an architecture 197 * specific location (if appropriate) so it can be carried 198 * over further kexec()s. 199 */ 200 if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) { 201 acpi_arch_set_root_pointer(acpi_rsdp); 202 return acpi_rsdp; 203 } 204 #endif 205 pa = acpi_arch_get_root_pointer(); 206 if (pa) 207 return pa; 208 209 if (efi_enabled(EFI_CONFIG_TABLES)) { 210 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) 211 return efi.acpi20; 212 if (efi.acpi != EFI_INVALID_TABLE_ADDR) 213 return efi.acpi; 214 pr_err("System description tables not found\n"); 215 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) { 216 acpi_find_root_pointer(&pa); 217 } 218 219 return pa; 220 } 221 222 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 223 static struct acpi_ioremap * 224 acpi_map_lookup(acpi_physical_address phys, acpi_size size) 225 { 226 struct acpi_ioremap *map; 227 228 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) 229 if (map->phys <= phys && 230 phys + size <= map->phys + map->size) 231 return map; 232 233 return NULL; 234 } 235 236 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 237 static void __iomem * 238 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) 239 { 240 struct acpi_ioremap *map; 241 242 map = acpi_map_lookup(phys, size); 243 if (map) 244 return map->virt + (phys - map->phys); 245 246 return NULL; 247 } 248 249 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size) 250 { 251 struct acpi_ioremap *map; 252 void __iomem *virt = NULL; 253 254 mutex_lock(&acpi_ioremap_lock); 255 map = acpi_map_lookup(phys, size); 256 if (map) { 257 virt = map->virt + (phys - map->phys); 258 map->track.refcount++; 259 } 260 mutex_unlock(&acpi_ioremap_lock); 261 return virt; 262 } 263 EXPORT_SYMBOL_GPL(acpi_os_get_iomem); 264 265 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ 266 static struct acpi_ioremap * 267 acpi_map_lookup_virt(void __iomem *virt, acpi_size size) 268 { 269 struct acpi_ioremap *map; 270 271 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) 272 if (map->virt <= virt && 273 virt + size <= map->virt + map->size) 274 return map; 275 276 return NULL; 277 } 278 279 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) 280 /* ioremap will take care of cache attributes */ 281 #define should_use_kmap(pfn) 0 282 #else 283 #define should_use_kmap(pfn) page_is_ram(pfn) 284 #endif 285 286 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 287 { 288 unsigned long pfn; 289 290 pfn = pg_off >> PAGE_SHIFT; 291 if (should_use_kmap(pfn)) { 292 if (pg_sz > PAGE_SIZE) 293 return NULL; 294 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 295 } else 296 return acpi_os_ioremap(pg_off, pg_sz); 297 } 298 299 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 300 { 301 unsigned long pfn; 302 303 pfn = pg_off >> PAGE_SHIFT; 304 if (should_use_kmap(pfn)) 305 kunmap(pfn_to_page(pfn)); 306 else 307 iounmap(vaddr); 308 } 309 310 /** 311 * acpi_os_map_iomem - Get a virtual address for a given physical address range. 312 * @phys: Start of the physical address range to map. 313 * @size: Size of the physical address range to map. 314 * 315 * Look up the given physical address range in the list of existing ACPI memory 316 * mappings. If found, get a reference to it and return a pointer to it (its 317 * virtual address). If not found, map it, add it to that list and return a 318 * pointer to it. 319 * 320 * During early init (when acpi_permanent_mmap has not been set yet) this 321 * routine simply calls __acpi_map_table() to get the job done. 322 */ 323 void __iomem __ref 324 *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 325 { 326 struct acpi_ioremap *map; 327 void __iomem *virt; 328 acpi_physical_address pg_off; 329 acpi_size pg_sz; 330 331 if (phys > ULONG_MAX) { 332 pr_err("Cannot map memory that high: 0x%llx\n", phys); 333 return NULL; 334 } 335 336 if (!acpi_permanent_mmap) 337 return __acpi_map_table((unsigned long)phys, size); 338 339 mutex_lock(&acpi_ioremap_lock); 340 /* Check if there's a suitable mapping already. */ 341 map = acpi_map_lookup(phys, size); 342 if (map) { 343 map->track.refcount++; 344 goto out; 345 } 346 347 map = kzalloc(sizeof(*map), GFP_KERNEL); 348 if (!map) { 349 mutex_unlock(&acpi_ioremap_lock); 350 return NULL; 351 } 352 353 pg_off = round_down(phys, PAGE_SIZE); 354 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 355 virt = acpi_map(phys, size); 356 if (!virt) { 357 mutex_unlock(&acpi_ioremap_lock); 358 kfree(map); 359 return NULL; 360 } 361 362 INIT_LIST_HEAD(&map->list); 363 map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK); 364 map->phys = pg_off; 365 map->size = pg_sz; 366 map->track.refcount = 1; 367 368 list_add_tail_rcu(&map->list, &acpi_ioremaps); 369 370 out: 371 mutex_unlock(&acpi_ioremap_lock); 372 return map->virt + (phys - map->phys); 373 } 374 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 375 376 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 377 { 378 return (void *)acpi_os_map_iomem(phys, size); 379 } 380 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 381 382 static void acpi_os_map_remove(struct work_struct *work) 383 { 384 struct acpi_ioremap *map = container_of(to_rcu_work(work), 385 struct acpi_ioremap, 386 track.rwork); 387 388 acpi_unmap(map->phys, map->virt); 389 kfree(map); 390 } 391 392 /* Must be called with mutex_lock(&acpi_ioremap_lock) */ 393 static void acpi_os_drop_map_ref(struct acpi_ioremap *map) 394 { 395 if (--map->track.refcount) 396 return; 397 398 list_del_rcu(&map->list); 399 400 INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove); 401 queue_rcu_work(system_wq, &map->track.rwork); 402 } 403 404 /** 405 * acpi_os_unmap_iomem - Drop a memory mapping reference. 406 * @virt: Start of the address range to drop a reference to. 407 * @size: Size of the address range to drop a reference to. 408 * 409 * Look up the given virtual address range in the list of existing ACPI memory 410 * mappings, drop a reference to it and if there are no more active references 411 * to it, queue it up for later removal. 412 * 413 * During early init (when acpi_permanent_mmap has not been set yet) this 414 * routine simply calls __acpi_unmap_table() to get the job done. Since 415 * __acpi_unmap_table() is an __init function, the __ref annotation is needed 416 * here. 417 */ 418 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) 419 { 420 struct acpi_ioremap *map; 421 422 if (!acpi_permanent_mmap) { 423 __acpi_unmap_table(virt, size); 424 return; 425 } 426 427 mutex_lock(&acpi_ioremap_lock); 428 429 map = acpi_map_lookup_virt(virt, size); 430 if (!map) { 431 mutex_unlock(&acpi_ioremap_lock); 432 WARN(true, "ACPI: %s: bad address %p\n", __func__, virt); 433 return; 434 } 435 acpi_os_drop_map_ref(map); 436 437 mutex_unlock(&acpi_ioremap_lock); 438 } 439 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); 440 441 /** 442 * acpi_os_unmap_memory - Drop a memory mapping reference. 443 * @virt: Start of the address range to drop a reference to. 444 * @size: Size of the address range to drop a reference to. 445 */ 446 void __ref acpi_os_unmap_memory(void *virt, acpi_size size) 447 { 448 acpi_os_unmap_iomem((void __iomem *)virt, size); 449 } 450 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); 451 452 void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas) 453 { 454 u64 addr; 455 456 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 457 return NULL; 458 459 /* Handle possible alignment issues */ 460 memcpy(&addr, &gas->address, sizeof(addr)); 461 if (!addr || !gas->bit_width) 462 return NULL; 463 464 return acpi_os_map_iomem(addr, gas->bit_width / 8); 465 } 466 EXPORT_SYMBOL(acpi_os_map_generic_address); 467 468 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas) 469 { 470 u64 addr; 471 struct acpi_ioremap *map; 472 473 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 474 return; 475 476 /* Handle possible alignment issues */ 477 memcpy(&addr, &gas->address, sizeof(addr)); 478 if (!addr || !gas->bit_width) 479 return; 480 481 mutex_lock(&acpi_ioremap_lock); 482 483 map = acpi_map_lookup(addr, gas->bit_width / 8); 484 if (!map) { 485 mutex_unlock(&acpi_ioremap_lock); 486 return; 487 } 488 acpi_os_drop_map_ref(map); 489 490 mutex_unlock(&acpi_ioremap_lock); 491 } 492 EXPORT_SYMBOL(acpi_os_unmap_generic_address); 493 494 #ifdef ACPI_FUTURE_USAGE 495 acpi_status 496 acpi_os_get_physical_address(void *virt, acpi_physical_address *phys) 497 { 498 if (!phys || !virt) 499 return AE_BAD_PARAMETER; 500 501 *phys = virt_to_phys(virt); 502 503 return AE_OK; 504 } 505 #endif 506 507 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE 508 static bool acpi_rev_override; 509 510 int __init acpi_rev_override_setup(char *str) 511 { 512 acpi_rev_override = true; 513 return 1; 514 } 515 __setup("acpi_rev_override", acpi_rev_override_setup); 516 #else 517 #define acpi_rev_override false 518 #endif 519 520 #define ACPI_MAX_OVERRIDE_LEN 100 521 522 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN]; 523 524 acpi_status 525 acpi_os_predefined_override(const struct acpi_predefined_names *init_val, 526 acpi_string *new_val) 527 { 528 if (!init_val || !new_val) 529 return AE_BAD_PARAMETER; 530 531 *new_val = NULL; 532 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) { 533 pr_info("Overriding _OS definition to '%s'\n", acpi_os_name); 534 *new_val = acpi_os_name; 535 } 536 537 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) { 538 pr_info("Overriding _REV return value to 5\n"); 539 *new_val = (char *)5; 540 } 541 542 return AE_OK; 543 } 544 545 static irqreturn_t acpi_irq(int irq, void *dev_id) 546 { 547 if ((*acpi_irq_handler)(acpi_irq_context)) { 548 acpi_irq_handled++; 549 return IRQ_HANDLED; 550 } else { 551 acpi_irq_not_handled++; 552 return IRQ_NONE; 553 } 554 } 555 556 acpi_status 557 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, 558 void *context) 559 { 560 unsigned int irq; 561 562 acpi_irq_stats_init(); 563 564 /* 565 * ACPI interrupts different from the SCI in our copy of the FADT are 566 * not supported. 567 */ 568 if (gsi != acpi_gbl_FADT.sci_interrupt) 569 return AE_BAD_PARAMETER; 570 571 if (acpi_irq_handler) 572 return AE_ALREADY_ACQUIRED; 573 574 if (acpi_gsi_to_irq(gsi, &irq) < 0) { 575 pr_err("SCI (ACPI GSI %d) not registered\n", gsi); 576 return AE_OK; 577 } 578 579 acpi_irq_handler = handler; 580 acpi_irq_context = context; 581 if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED | IRQF_ONESHOT, 582 "acpi", acpi_irq)) { 583 pr_err("SCI (IRQ%d) allocation failed\n", irq); 584 acpi_irq_handler = NULL; 585 return AE_NOT_ACQUIRED; 586 } 587 acpi_sci_irq = irq; 588 589 return AE_OK; 590 } 591 592 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) 593 { 594 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) 595 return AE_BAD_PARAMETER; 596 597 free_irq(acpi_sci_irq, acpi_irq); 598 acpi_irq_handler = NULL; 599 acpi_sci_irq = INVALID_ACPI_IRQ; 600 601 return AE_OK; 602 } 603 604 /* 605 * Running in interpreter thread context, safe to sleep 606 */ 607 608 void acpi_os_sleep(u64 ms) 609 { 610 u64 usec = ms * USEC_PER_MSEC, delta_us = 50; 611 612 /* 613 * Use a hrtimer because the timer wheel timers are optimized for 614 * cancelation before they expire and this timer is not going to be 615 * canceled. 616 * 617 * Set the delta between the requested sleep time and the effective 618 * deadline to at least 50 us in case there is an opportunity for timer 619 * coalescing. 620 * 621 * Moreover, longer sleeps can be assumed to need somewhat less timer 622 * precision, so sacrifice some of it for making the timer a more likely 623 * candidate for coalescing by setting the delta to 1% of the sleep time 624 * if it is above 5 ms (this value is chosen so that the delta is a 625 * continuous function of the sleep time). 626 */ 627 if (ms > 5) 628 delta_us = (USEC_PER_MSEC / 100) * ms; 629 630 usleep_range(usec, usec + delta_us); 631 } 632 633 void acpi_os_stall(u32 us) 634 { 635 while (us) { 636 u32 delay = 1000; 637 638 if (delay > us) 639 delay = us; 640 udelay(delay); 641 touch_nmi_watchdog(); 642 us -= delay; 643 } 644 } 645 646 /* 647 * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, 648 * monotonically increasing timer with 100ns granularity. Do not use 649 * ktime_get() to implement this function because this function may get 650 * called after timekeeping has been suspended. Note: calling this function 651 * after timekeeping has been suspended may lead to unexpected results 652 * because when timekeeping is suspended the jiffies counter is not 653 * incremented. See also timekeeping_suspend(). 654 */ 655 u64 acpi_os_get_timer(void) 656 { 657 return (get_jiffies_64() - INITIAL_JIFFIES) * 658 (ACPI_100NSEC_PER_SEC / HZ); 659 } 660 661 acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width) 662 { 663 u32 dummy; 664 665 if (!IS_ENABLED(CONFIG_HAS_IOPORT)) { 666 /* 667 * set all-1 result as if reading from non-existing 668 * I/O port 669 */ 670 *value = GENMASK(width, 0); 671 return AE_NOT_IMPLEMENTED; 672 } 673 674 if (value) 675 *value = 0; 676 else 677 value = &dummy; 678 679 if (width <= 8) { 680 *value = inb(port); 681 } else if (width <= 16) { 682 *value = inw(port); 683 } else if (width <= 32) { 684 *value = inl(port); 685 } else { 686 pr_debug("%s: Access width %d not supported\n", __func__, width); 687 return AE_BAD_PARAMETER; 688 } 689 690 return AE_OK; 691 } 692 693 EXPORT_SYMBOL(acpi_os_read_port); 694 695 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width) 696 { 697 if (!IS_ENABLED(CONFIG_HAS_IOPORT)) 698 return AE_NOT_IMPLEMENTED; 699 700 if (width <= 8) { 701 outb(value, port); 702 } else if (width <= 16) { 703 outw(value, port); 704 } else if (width <= 32) { 705 outl(value, port); 706 } else { 707 pr_debug("%s: Access width %d not supported\n", __func__, width); 708 return AE_BAD_PARAMETER; 709 } 710 711 return AE_OK; 712 } 713 714 EXPORT_SYMBOL(acpi_os_write_port); 715 716 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width) 717 { 718 719 switch (width) { 720 case 8: 721 *(u8 *) value = readb(virt_addr); 722 break; 723 case 16: 724 *(u16 *) value = readw(virt_addr); 725 break; 726 case 32: 727 *(u32 *) value = readl(virt_addr); 728 break; 729 case 64: 730 *(u64 *) value = readq(virt_addr); 731 break; 732 default: 733 return -EINVAL; 734 } 735 736 return 0; 737 } 738 739 acpi_status 740 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width) 741 { 742 void __iomem *virt_addr; 743 unsigned int size = width / 8; 744 bool unmap = false; 745 u64 dummy; 746 int error; 747 748 rcu_read_lock(); 749 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 750 if (!virt_addr) { 751 rcu_read_unlock(); 752 virt_addr = acpi_os_ioremap(phys_addr, size); 753 if (!virt_addr) 754 return AE_BAD_ADDRESS; 755 unmap = true; 756 } 757 758 if (!value) 759 value = &dummy; 760 761 error = acpi_os_read_iomem(virt_addr, value, width); 762 BUG_ON(error); 763 764 if (unmap) 765 iounmap(virt_addr); 766 else 767 rcu_read_unlock(); 768 769 return AE_OK; 770 } 771 772 acpi_status 773 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) 774 { 775 void __iomem *virt_addr; 776 unsigned int size = width / 8; 777 bool unmap = false; 778 779 rcu_read_lock(); 780 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 781 if (!virt_addr) { 782 rcu_read_unlock(); 783 virt_addr = acpi_os_ioremap(phys_addr, size); 784 if (!virt_addr) 785 return AE_BAD_ADDRESS; 786 unmap = true; 787 } 788 789 switch (width) { 790 case 8: 791 writeb(value, virt_addr); 792 break; 793 case 16: 794 writew(value, virt_addr); 795 break; 796 case 32: 797 writel(value, virt_addr); 798 break; 799 case 64: 800 writeq(value, virt_addr); 801 break; 802 default: 803 BUG(); 804 } 805 806 if (unmap) 807 iounmap(virt_addr); 808 else 809 rcu_read_unlock(); 810 811 return AE_OK; 812 } 813 814 #ifdef CONFIG_PCI 815 acpi_status 816 acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id, u32 reg, 817 u64 *value, u32 width) 818 { 819 int result, size; 820 u32 value32; 821 822 if (!value) 823 return AE_BAD_PARAMETER; 824 825 switch (width) { 826 case 8: 827 size = 1; 828 break; 829 case 16: 830 size = 2; 831 break; 832 case 32: 833 size = 4; 834 break; 835 default: 836 return AE_ERROR; 837 } 838 839 result = raw_pci_read(pci_id->segment, pci_id->bus, 840 PCI_DEVFN(pci_id->device, pci_id->function), 841 reg, size, &value32); 842 *value = value32; 843 844 return (result ? AE_ERROR : AE_OK); 845 } 846 847 acpi_status 848 acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id, u32 reg, 849 u64 value, u32 width) 850 { 851 int result, size; 852 853 switch (width) { 854 case 8: 855 size = 1; 856 break; 857 case 16: 858 size = 2; 859 break; 860 case 32: 861 size = 4; 862 break; 863 default: 864 return AE_ERROR; 865 } 866 867 result = raw_pci_write(pci_id->segment, pci_id->bus, 868 PCI_DEVFN(pci_id->device, pci_id->function), 869 reg, size, value); 870 871 return (result ? AE_ERROR : AE_OK); 872 } 873 #endif 874 875 static void acpi_os_execute_deferred(struct work_struct *work) 876 { 877 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 878 879 dpc->function(dpc->context); 880 kfree(dpc); 881 } 882 883 #ifdef CONFIG_ACPI_DEBUGGER 884 static struct acpi_debugger acpi_debugger; 885 static bool acpi_debugger_initialized; 886 887 int acpi_register_debugger(struct module *owner, 888 const struct acpi_debugger_ops *ops) 889 { 890 int ret = 0; 891 892 mutex_lock(&acpi_debugger.lock); 893 if (acpi_debugger.ops) { 894 ret = -EBUSY; 895 goto err_lock; 896 } 897 898 acpi_debugger.owner = owner; 899 acpi_debugger.ops = ops; 900 901 err_lock: 902 mutex_unlock(&acpi_debugger.lock); 903 return ret; 904 } 905 EXPORT_SYMBOL(acpi_register_debugger); 906 907 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops) 908 { 909 mutex_lock(&acpi_debugger.lock); 910 if (ops == acpi_debugger.ops) { 911 acpi_debugger.ops = NULL; 912 acpi_debugger.owner = NULL; 913 } 914 mutex_unlock(&acpi_debugger.lock); 915 } 916 EXPORT_SYMBOL(acpi_unregister_debugger); 917 918 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context) 919 { 920 int ret; 921 int (*func)(acpi_osd_exec_callback, void *); 922 struct module *owner; 923 924 if (!acpi_debugger_initialized) 925 return -ENODEV; 926 mutex_lock(&acpi_debugger.lock); 927 if (!acpi_debugger.ops) { 928 ret = -ENODEV; 929 goto err_lock; 930 } 931 if (!try_module_get(acpi_debugger.owner)) { 932 ret = -ENODEV; 933 goto err_lock; 934 } 935 func = acpi_debugger.ops->create_thread; 936 owner = acpi_debugger.owner; 937 mutex_unlock(&acpi_debugger.lock); 938 939 ret = func(function, context); 940 941 mutex_lock(&acpi_debugger.lock); 942 module_put(owner); 943 err_lock: 944 mutex_unlock(&acpi_debugger.lock); 945 return ret; 946 } 947 948 ssize_t acpi_debugger_write_log(const char *msg) 949 { 950 ssize_t ret; 951 ssize_t (*func)(const char *); 952 struct module *owner; 953 954 if (!acpi_debugger_initialized) 955 return -ENODEV; 956 mutex_lock(&acpi_debugger.lock); 957 if (!acpi_debugger.ops) { 958 ret = -ENODEV; 959 goto err_lock; 960 } 961 if (!try_module_get(acpi_debugger.owner)) { 962 ret = -ENODEV; 963 goto err_lock; 964 } 965 func = acpi_debugger.ops->write_log; 966 owner = acpi_debugger.owner; 967 mutex_unlock(&acpi_debugger.lock); 968 969 ret = func(msg); 970 971 mutex_lock(&acpi_debugger.lock); 972 module_put(owner); 973 err_lock: 974 mutex_unlock(&acpi_debugger.lock); 975 return ret; 976 } 977 978 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length) 979 { 980 ssize_t ret; 981 ssize_t (*func)(char *, size_t); 982 struct module *owner; 983 984 if (!acpi_debugger_initialized) 985 return -ENODEV; 986 mutex_lock(&acpi_debugger.lock); 987 if (!acpi_debugger.ops) { 988 ret = -ENODEV; 989 goto err_lock; 990 } 991 if (!try_module_get(acpi_debugger.owner)) { 992 ret = -ENODEV; 993 goto err_lock; 994 } 995 func = acpi_debugger.ops->read_cmd; 996 owner = acpi_debugger.owner; 997 mutex_unlock(&acpi_debugger.lock); 998 999 ret = func(buffer, buffer_length); 1000 1001 mutex_lock(&acpi_debugger.lock); 1002 module_put(owner); 1003 err_lock: 1004 mutex_unlock(&acpi_debugger.lock); 1005 return ret; 1006 } 1007 1008 int acpi_debugger_wait_command_ready(void) 1009 { 1010 int ret; 1011 int (*func)(bool, char *, size_t); 1012 struct module *owner; 1013 1014 if (!acpi_debugger_initialized) 1015 return -ENODEV; 1016 mutex_lock(&acpi_debugger.lock); 1017 if (!acpi_debugger.ops) { 1018 ret = -ENODEV; 1019 goto err_lock; 1020 } 1021 if (!try_module_get(acpi_debugger.owner)) { 1022 ret = -ENODEV; 1023 goto err_lock; 1024 } 1025 func = acpi_debugger.ops->wait_command_ready; 1026 owner = acpi_debugger.owner; 1027 mutex_unlock(&acpi_debugger.lock); 1028 1029 ret = func(acpi_gbl_method_executing, 1030 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE); 1031 1032 mutex_lock(&acpi_debugger.lock); 1033 module_put(owner); 1034 err_lock: 1035 mutex_unlock(&acpi_debugger.lock); 1036 return ret; 1037 } 1038 1039 int acpi_debugger_notify_command_complete(void) 1040 { 1041 int ret; 1042 int (*func)(void); 1043 struct module *owner; 1044 1045 if (!acpi_debugger_initialized) 1046 return -ENODEV; 1047 mutex_lock(&acpi_debugger.lock); 1048 if (!acpi_debugger.ops) { 1049 ret = -ENODEV; 1050 goto err_lock; 1051 } 1052 if (!try_module_get(acpi_debugger.owner)) { 1053 ret = -ENODEV; 1054 goto err_lock; 1055 } 1056 func = acpi_debugger.ops->notify_command_complete; 1057 owner = acpi_debugger.owner; 1058 mutex_unlock(&acpi_debugger.lock); 1059 1060 ret = func(); 1061 1062 mutex_lock(&acpi_debugger.lock); 1063 module_put(owner); 1064 err_lock: 1065 mutex_unlock(&acpi_debugger.lock); 1066 return ret; 1067 } 1068 1069 int __init acpi_debugger_init(void) 1070 { 1071 mutex_init(&acpi_debugger.lock); 1072 acpi_debugger_initialized = true; 1073 return 0; 1074 } 1075 #endif 1076 1077 /******************************************************************************* 1078 * 1079 * FUNCTION: acpi_os_execute 1080 * 1081 * PARAMETERS: Type - Type of the callback 1082 * Function - Function to be executed 1083 * Context - Function parameters 1084 * 1085 * RETURN: Status 1086 * 1087 * DESCRIPTION: Depending on type, either queues function for deferred execution or 1088 * immediately executes function on a separate thread. 1089 * 1090 ******************************************************************************/ 1091 1092 acpi_status acpi_os_execute(acpi_execute_type type, 1093 acpi_osd_exec_callback function, void *context) 1094 { 1095 struct acpi_os_dpc *dpc; 1096 int ret; 1097 1098 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 1099 "Scheduling function [%p(%p)] for deferred execution.\n", 1100 function, context)); 1101 1102 if (type == OSL_DEBUGGER_MAIN_THREAD) { 1103 ret = acpi_debugger_create_thread(function, context); 1104 if (ret) { 1105 pr_err("Kernel thread creation failed\n"); 1106 return AE_ERROR; 1107 } 1108 return AE_OK; 1109 } 1110 1111 /* 1112 * Allocate/initialize DPC structure. Note that this memory will be 1113 * freed by the callee. The kernel handles the work_struct list in a 1114 * way that allows us to also free its memory inside the callee. 1115 * Because we may want to schedule several tasks with different 1116 * parameters we can't use the approach some kernel code uses of 1117 * having a static work_struct. 1118 */ 1119 1120 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); 1121 if (!dpc) 1122 return AE_NO_MEMORY; 1123 1124 dpc->function = function; 1125 dpc->context = context; 1126 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 1127 1128 /* 1129 * To prevent lockdep from complaining unnecessarily, make sure that 1130 * there is a different static lockdep key for each workqueue by using 1131 * INIT_WORK() for each of them separately. 1132 */ 1133 switch (type) { 1134 case OSL_NOTIFY_HANDLER: 1135 ret = queue_work(kacpi_notify_wq, &dpc->work); 1136 break; 1137 case OSL_GPE_HANDLER: 1138 /* 1139 * On some machines, a software-initiated SMI causes corruption 1140 * unless the SMI runs on CPU 0. An SMI can be initiated by 1141 * any AML, but typically it's done in GPE-related methods that 1142 * are run via workqueues, so we can avoid the known corruption 1143 * cases by always queueing on CPU 0. 1144 */ 1145 ret = queue_work_on(0, kacpid_wq, &dpc->work); 1146 break; 1147 default: 1148 pr_err("Unsupported os_execute type %d.\n", type); 1149 goto err; 1150 } 1151 if (!ret) { 1152 pr_err("Unable to queue work\n"); 1153 goto err; 1154 } 1155 1156 return AE_OK; 1157 1158 err: 1159 kfree(dpc); 1160 return AE_ERROR; 1161 } 1162 EXPORT_SYMBOL(acpi_os_execute); 1163 1164 void acpi_os_wait_events_complete(void) 1165 { 1166 /* 1167 * Make sure the GPE handler or the fixed event handler is not used 1168 * on another CPU after removal. 1169 */ 1170 if (acpi_sci_irq_valid()) 1171 synchronize_hardirq(acpi_sci_irq); 1172 flush_workqueue(kacpid_wq); 1173 flush_workqueue(kacpi_notify_wq); 1174 } 1175 EXPORT_SYMBOL(acpi_os_wait_events_complete); 1176 1177 struct acpi_hp_work { 1178 struct work_struct work; 1179 struct acpi_device *adev; 1180 u32 src; 1181 }; 1182 1183 static void acpi_hotplug_work_fn(struct work_struct *work) 1184 { 1185 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); 1186 1187 acpi_os_wait_events_complete(); 1188 acpi_device_hotplug(hpw->adev, hpw->src); 1189 kfree(hpw); 1190 } 1191 1192 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) 1193 { 1194 struct acpi_hp_work *hpw; 1195 1196 acpi_handle_debug(adev->handle, 1197 "Scheduling hotplug event %u for deferred handling\n", 1198 src); 1199 1200 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); 1201 if (!hpw) 1202 return AE_NO_MEMORY; 1203 1204 INIT_WORK(&hpw->work, acpi_hotplug_work_fn); 1205 hpw->adev = adev; 1206 hpw->src = src; 1207 /* 1208 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because 1209 * the hotplug code may call driver .remove() functions, which may 1210 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush 1211 * these workqueues. 1212 */ 1213 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { 1214 kfree(hpw); 1215 return AE_ERROR; 1216 } 1217 return AE_OK; 1218 } 1219 1220 bool acpi_queue_hotplug_work(struct work_struct *work) 1221 { 1222 return queue_work(kacpi_hotplug_wq, work); 1223 } 1224 1225 acpi_status 1226 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle *handle) 1227 { 1228 struct semaphore *sem = NULL; 1229 1230 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); 1231 if (!sem) 1232 return AE_NO_MEMORY; 1233 1234 sema_init(sem, initial_units); 1235 1236 *handle = (acpi_handle *) sem; 1237 1238 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", 1239 *handle, initial_units)); 1240 1241 return AE_OK; 1242 } 1243 1244 /* 1245 * TODO: A better way to delete semaphores? Linux doesn't have a 1246 * 'delete_semaphore()' function -- may result in an invalid 1247 * pointer dereference for non-synchronized consumers. Should 1248 * we at least check for blocked threads and signal/cancel them? 1249 */ 1250 1251 acpi_status acpi_os_delete_semaphore(acpi_handle handle) 1252 { 1253 struct semaphore *sem = (struct semaphore *)handle; 1254 1255 if (!sem) 1256 return AE_BAD_PARAMETER; 1257 1258 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); 1259 1260 BUG_ON(!list_empty(&sem->wait_list)); 1261 kfree(sem); 1262 sem = NULL; 1263 1264 return AE_OK; 1265 } 1266 1267 /* 1268 * TODO: Support for units > 1? 1269 */ 1270 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) 1271 { 1272 acpi_status status = AE_OK; 1273 struct semaphore *sem = (struct semaphore *)handle; 1274 long jiffies; 1275 int ret = 0; 1276 1277 if (!acpi_os_initialized) 1278 return AE_OK; 1279 1280 if (!sem || (units < 1)) 1281 return AE_BAD_PARAMETER; 1282 1283 if (units > 1) 1284 return AE_SUPPORT; 1285 1286 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", 1287 handle, units, timeout)); 1288 1289 if (timeout == ACPI_WAIT_FOREVER) 1290 jiffies = MAX_SCHEDULE_TIMEOUT; 1291 else 1292 jiffies = msecs_to_jiffies(timeout); 1293 1294 ret = down_timeout(sem, jiffies); 1295 if (ret) 1296 status = AE_TIME; 1297 1298 if (ACPI_FAILURE(status)) { 1299 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1300 "Failed to acquire semaphore[%p|%d|%d], %s", 1301 handle, units, timeout, 1302 acpi_format_exception(status))); 1303 } else { 1304 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 1305 "Acquired semaphore[%p|%d|%d]", handle, 1306 units, timeout)); 1307 } 1308 1309 return status; 1310 } 1311 1312 /* 1313 * TODO: Support for units > 1? 1314 */ 1315 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) 1316 { 1317 struct semaphore *sem = (struct semaphore *)handle; 1318 1319 if (!acpi_os_initialized) 1320 return AE_OK; 1321 1322 if (!sem || (units < 1)) 1323 return AE_BAD_PARAMETER; 1324 1325 if (units > 1) 1326 return AE_SUPPORT; 1327 1328 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, 1329 units)); 1330 1331 up(sem); 1332 1333 return AE_OK; 1334 } 1335 1336 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read) 1337 { 1338 #ifdef ENABLE_DEBUGGER 1339 if (acpi_in_debugger) { 1340 u32 chars; 1341 1342 kdb_read(buffer, buffer_length); 1343 1344 /* remove the CR kdb includes */ 1345 chars = strlen(buffer) - 1; 1346 buffer[chars] = '\0'; 1347 } 1348 #else 1349 int ret; 1350 1351 ret = acpi_debugger_read_cmd(buffer, buffer_length); 1352 if (ret < 0) 1353 return AE_ERROR; 1354 if (bytes_read) 1355 *bytes_read = ret; 1356 #endif 1357 1358 return AE_OK; 1359 } 1360 EXPORT_SYMBOL(acpi_os_get_line); 1361 1362 acpi_status acpi_os_wait_command_ready(void) 1363 { 1364 int ret; 1365 1366 ret = acpi_debugger_wait_command_ready(); 1367 if (ret < 0) 1368 return AE_ERROR; 1369 return AE_OK; 1370 } 1371 1372 acpi_status acpi_os_notify_command_complete(void) 1373 { 1374 int ret; 1375 1376 ret = acpi_debugger_notify_command_complete(); 1377 if (ret < 0) 1378 return AE_ERROR; 1379 return AE_OK; 1380 } 1381 1382 acpi_status acpi_os_signal(u32 function, void *info) 1383 { 1384 switch (function) { 1385 case ACPI_SIGNAL_FATAL: 1386 pr_err("Fatal opcode executed\n"); 1387 break; 1388 case ACPI_SIGNAL_BREAKPOINT: 1389 /* 1390 * AML Breakpoint 1391 * ACPI spec. says to treat it as a NOP unless 1392 * you are debugging. So if/when we integrate 1393 * AML debugger into the kernel debugger its 1394 * hook will go here. But until then it is 1395 * not useful to print anything on breakpoints. 1396 */ 1397 break; 1398 default: 1399 break; 1400 } 1401 1402 return AE_OK; 1403 } 1404 1405 static int __init acpi_os_name_setup(char *str) 1406 { 1407 char *p = acpi_os_name; 1408 int count = ACPI_MAX_OVERRIDE_LEN - 1; 1409 1410 if (!str || !*str) 1411 return 0; 1412 1413 for (; count-- && *str; str++) { 1414 if (isalnum(*str) || *str == ' ' || *str == ':') 1415 *p++ = *str; 1416 else if (*str == '\'' || *str == '"') 1417 continue; 1418 else 1419 break; 1420 } 1421 *p = 0; 1422 1423 return 1; 1424 1425 } 1426 1427 __setup("acpi_os_name=", acpi_os_name_setup); 1428 1429 /* 1430 * Disable the auto-serialization of named objects creation methods. 1431 * 1432 * This feature is enabled by default. It marks the AML control methods 1433 * that contain the opcodes to create named objects as "Serialized". 1434 */ 1435 static int __init acpi_no_auto_serialize_setup(char *str) 1436 { 1437 acpi_gbl_auto_serialize_methods = FALSE; 1438 pr_info("Auto-serialization disabled\n"); 1439 1440 return 1; 1441 } 1442 1443 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); 1444 1445 /* Check of resource interference between native drivers and ACPI 1446 * OperationRegions (SystemIO and System Memory only). 1447 * IO ports and memory declared in ACPI might be used by the ACPI subsystem 1448 * in arbitrary AML code and can interfere with legacy drivers. 1449 * acpi_enforce_resources= can be set to: 1450 * 1451 * - strict (default) (2) 1452 * -> further driver trying to access the resources will not load 1453 * - lax (1) 1454 * -> further driver trying to access the resources will load, but you 1455 * get a system message that something might go wrong... 1456 * 1457 * - no (0) 1458 * -> ACPI Operation Region resources will not be registered 1459 * 1460 */ 1461 #define ENFORCE_RESOURCES_STRICT 2 1462 #define ENFORCE_RESOURCES_LAX 1 1463 #define ENFORCE_RESOURCES_NO 0 1464 1465 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1466 1467 static int __init acpi_enforce_resources_setup(char *str) 1468 { 1469 if (str == NULL || *str == '\0') 1470 return 0; 1471 1472 if (!strcmp("strict", str)) 1473 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT; 1474 else if (!strcmp("lax", str)) 1475 acpi_enforce_resources = ENFORCE_RESOURCES_LAX; 1476 else if (!strcmp("no", str)) 1477 acpi_enforce_resources = ENFORCE_RESOURCES_NO; 1478 1479 return 1; 1480 } 1481 1482 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup); 1483 1484 /* Check for resource conflicts between ACPI OperationRegions and native 1485 * drivers */ 1486 int acpi_check_resource_conflict(const struct resource *res) 1487 { 1488 acpi_adr_space_type space_id; 1489 1490 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1491 return 0; 1492 1493 if (res->flags & IORESOURCE_IO) 1494 space_id = ACPI_ADR_SPACE_SYSTEM_IO; 1495 else if (res->flags & IORESOURCE_MEM) 1496 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; 1497 else 1498 return 0; 1499 1500 if (!acpi_check_address_range(space_id, res->start, resource_size(res), 1)) 1501 return 0; 1502 1503 pr_info("Resource conflict; ACPI support missing from driver?\n"); 1504 1505 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1506 return -EBUSY; 1507 1508 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1509 pr_notice("Resource conflict: System may be unstable or behave erratically\n"); 1510 1511 return 0; 1512 } 1513 EXPORT_SYMBOL(acpi_check_resource_conflict); 1514 1515 int acpi_check_region(resource_size_t start, resource_size_t n, 1516 const char *name) 1517 { 1518 struct resource res = DEFINE_RES_IO_NAMED(start, n, name); 1519 1520 return acpi_check_resource_conflict(&res); 1521 } 1522 EXPORT_SYMBOL(acpi_check_region); 1523 1524 /* 1525 * Let drivers know whether the resource checks are effective 1526 */ 1527 int acpi_resources_are_enforced(void) 1528 { 1529 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT; 1530 } 1531 EXPORT_SYMBOL(acpi_resources_are_enforced); 1532 1533 /* 1534 * Deallocate the memory for a spinlock. 1535 */ 1536 void acpi_os_delete_lock(acpi_spinlock handle) 1537 { 1538 ACPI_FREE(handle); 1539 } 1540 1541 /* 1542 * Acquire a spinlock. 1543 * 1544 * handle is a pointer to the spinlock_t. 1545 */ 1546 1547 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1548 __acquires(lockp) 1549 { 1550 spin_lock(lockp); 1551 return 0; 1552 } 1553 1554 /* 1555 * Release a spinlock. See above. 1556 */ 1557 1558 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags not_used) 1559 __releases(lockp) 1560 { 1561 spin_unlock(lockp); 1562 } 1563 1564 #ifndef ACPI_USE_LOCAL_CACHE 1565 1566 /******************************************************************************* 1567 * 1568 * FUNCTION: acpi_os_create_cache 1569 * 1570 * PARAMETERS: name - Ascii name for the cache 1571 * size - Size of each cached object 1572 * depth - Maximum depth of the cache (in objects) <ignored> 1573 * cache - Where the new cache object is returned 1574 * 1575 * RETURN: status 1576 * 1577 * DESCRIPTION: Create a cache object 1578 * 1579 ******************************************************************************/ 1580 1581 acpi_status 1582 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t **cache) 1583 { 1584 *cache = kmem_cache_create(name, size, 0, 0, NULL); 1585 if (*cache == NULL) 1586 return AE_ERROR; 1587 else 1588 return AE_OK; 1589 } 1590 1591 /******************************************************************************* 1592 * 1593 * FUNCTION: acpi_os_purge_cache 1594 * 1595 * PARAMETERS: Cache - Handle to cache object 1596 * 1597 * RETURN: Status 1598 * 1599 * DESCRIPTION: Free all objects within the requested cache. 1600 * 1601 ******************************************************************************/ 1602 1603 acpi_status acpi_os_purge_cache(acpi_cache_t *cache) 1604 { 1605 kmem_cache_shrink(cache); 1606 return AE_OK; 1607 } 1608 1609 /******************************************************************************* 1610 * 1611 * FUNCTION: acpi_os_delete_cache 1612 * 1613 * PARAMETERS: Cache - Handle to cache object 1614 * 1615 * RETURN: Status 1616 * 1617 * DESCRIPTION: Free all objects within the requested cache and delete the 1618 * cache object. 1619 * 1620 ******************************************************************************/ 1621 1622 acpi_status acpi_os_delete_cache(acpi_cache_t *cache) 1623 { 1624 kmem_cache_destroy(cache); 1625 return AE_OK; 1626 } 1627 1628 /******************************************************************************* 1629 * 1630 * FUNCTION: acpi_os_release_object 1631 * 1632 * PARAMETERS: Cache - Handle to cache object 1633 * Object - The object to be released 1634 * 1635 * RETURN: None 1636 * 1637 * DESCRIPTION: Release an object to the specified cache. If cache is full, 1638 * the object is deleted. 1639 * 1640 ******************************************************************************/ 1641 1642 acpi_status acpi_os_release_object(acpi_cache_t *cache, void *object) 1643 { 1644 kmem_cache_free(cache, object); 1645 return AE_OK; 1646 } 1647 #endif 1648 1649 static int __init acpi_no_static_ssdt_setup(char *s) 1650 { 1651 acpi_gbl_disable_ssdt_table_install = TRUE; 1652 pr_info("Static SSDT installation disabled\n"); 1653 1654 return 0; 1655 } 1656 1657 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); 1658 1659 static int __init acpi_disable_return_repair(char *s) 1660 { 1661 pr_notice("Predefined validation mechanism disabled\n"); 1662 acpi_gbl_disable_auto_repair = TRUE; 1663 1664 return 1; 1665 } 1666 1667 __setup("acpica_no_return_repair", acpi_disable_return_repair); 1668 1669 acpi_status __init acpi_os_initialize(void) 1670 { 1671 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1672 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1673 1674 acpi_gbl_xgpe0_block_logical_address = 1675 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1676 acpi_gbl_xgpe1_block_logical_address = 1677 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1678 1679 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { 1680 /* 1681 * Use acpi_os_map_generic_address to pre-map the reset 1682 * register if it's in system memory. 1683 */ 1684 void *rv; 1685 1686 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); 1687 pr_debug("%s: Reset register mapping %s\n", __func__, 1688 rv ? "successful" : "failed"); 1689 } 1690 acpi_os_initialized = true; 1691 1692 return AE_OK; 1693 } 1694 1695 acpi_status __init acpi_os_initialize1(void) 1696 { 1697 kacpid_wq = alloc_workqueue("kacpid", 0, 1); 1698 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 0); 1699 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); 1700 BUG_ON(!kacpid_wq); 1701 BUG_ON(!kacpi_notify_wq); 1702 BUG_ON(!kacpi_hotplug_wq); 1703 acpi_osi_init(); 1704 return AE_OK; 1705 } 1706 1707 acpi_status acpi_os_terminate(void) 1708 { 1709 if (acpi_irq_handler) { 1710 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt, 1711 acpi_irq_handler); 1712 } 1713 1714 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block); 1715 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1716 acpi_gbl_xgpe0_block_logical_address = 0UL; 1717 acpi_gbl_xgpe1_block_logical_address = 0UL; 1718 1719 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1720 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1721 1722 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) 1723 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); 1724 1725 destroy_workqueue(kacpid_wq); 1726 destroy_workqueue(kacpi_notify_wq); 1727 destroy_workqueue(kacpi_hotplug_wq); 1728 1729 return AE_OK; 1730 } 1731 1732 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control, 1733 u32 pm1b_control) 1734 { 1735 int rc = 0; 1736 1737 if (__acpi_os_prepare_sleep) 1738 rc = __acpi_os_prepare_sleep(sleep_state, 1739 pm1a_control, pm1b_control); 1740 if (rc < 0) 1741 return AE_ERROR; 1742 else if (rc > 0) 1743 return AE_CTRL_TERMINATE; 1744 1745 return AE_OK; 1746 } 1747 1748 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 1749 u32 pm1a_ctrl, u32 pm1b_ctrl)) 1750 { 1751 __acpi_os_prepare_sleep = func; 1752 } 1753 1754 #if (ACPI_REDUCED_HARDWARE) 1755 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1756 u32 val_b) 1757 { 1758 int rc = 0; 1759 1760 if (__acpi_os_prepare_extended_sleep) 1761 rc = __acpi_os_prepare_extended_sleep(sleep_state, 1762 val_a, val_b); 1763 if (rc < 0) 1764 return AE_ERROR; 1765 else if (rc > 0) 1766 return AE_CTRL_TERMINATE; 1767 1768 return AE_OK; 1769 } 1770 #else 1771 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, 1772 u32 val_b) 1773 { 1774 return AE_OK; 1775 } 1776 #endif 1777 1778 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, 1779 u32 val_a, u32 val_b)) 1780 { 1781 __acpi_os_prepare_extended_sleep = func; 1782 } 1783 1784 acpi_status acpi_os_enter_sleep(u8 sleep_state, 1785 u32 reg_a_value, u32 reg_b_value) 1786 { 1787 acpi_status status; 1788 1789 if (acpi_gbl_reduced_hardware) 1790 status = acpi_os_prepare_extended_sleep(sleep_state, 1791 reg_a_value, 1792 reg_b_value); 1793 else 1794 status = acpi_os_prepare_sleep(sleep_state, 1795 reg_a_value, reg_b_value); 1796 return status; 1797 } 1798