1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
4 *
5 * Copyright (C) 2000 Andrew Henroid
6 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
7 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
8 * Copyright (c) 2008 Intel Corporation
9 * Author: Matthew Wilcox <willy@linux.intel.com>
10 */
11
12 #define pr_fmt(fmt) "ACPI: OSL: " fmt
13
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/panic.h>
17 #include <linux/reboot.h>
18 #include <linux/slab.h>
19 #include <linux/mm.h>
20 #include <linux/highmem.h>
21 #include <linux/lockdep.h>
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/kmod.h>
25 #include <linux/delay.h>
26 #include <linux/workqueue.h>
27 #include <linux/nmi.h>
28 #include <linux/acpi.h>
29 #include <linux/efi.h>
30 #include <linux/ioport.h>
31 #include <linux/list.h>
32 #include <linux/jiffies.h>
33 #include <linux/semaphore.h>
34 #include <linux/security.h>
35
36 #include <asm/io.h>
37 #include <linux/uaccess.h>
38 #include <linux/io-64-nonatomic-lo-hi.h>
39
40 #include "acpica/accommon.h"
41 #include "internal.h"
42
43 /* Definitions for ACPI_DEBUG_PRINT() */
44 #define _COMPONENT ACPI_OS_SERVICES
45 ACPI_MODULE_NAME("osl");
46
47 struct acpi_os_dpc {
48 acpi_osd_exec_callback function;
49 void *context;
50 struct work_struct work;
51 };
52
53 #ifdef ENABLE_DEBUGGER
54 #include <linux/kdb.h>
55
56 /* stuff for debugger support */
57 int acpi_in_debugger;
58 EXPORT_SYMBOL(acpi_in_debugger);
59 #endif /*ENABLE_DEBUGGER */
60
61 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
62 u32 pm1b_ctrl);
63 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
64 u32 val_b);
65
66 static acpi_osd_handler acpi_irq_handler;
67 static void *acpi_irq_context;
68 static struct workqueue_struct *kacpid_wq;
69 static struct workqueue_struct *kacpi_notify_wq;
70 static struct workqueue_struct *kacpi_hotplug_wq;
71 static bool acpi_os_initialized;
72 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
73 bool acpi_permanent_mmap = false;
74
75 static bool poweroff_on_fatal = true;
76 module_param(poweroff_on_fatal, bool, 0);
77 MODULE_PARM_DESC(poweroff_on_fatal, "Poweroff when encountering a fatal ACPI error");
78
79 /*
80 * This list of permanent mappings is for memory that may be accessed from
81 * interrupt context, where we can't do the ioremap().
82 */
83 struct acpi_ioremap {
84 struct list_head list;
85 void __iomem *virt;
86 acpi_physical_address phys;
87 acpi_size size;
88 union {
89 unsigned long refcount;
90 struct rcu_work rwork;
91 } track;
92 };
93
94 static LIST_HEAD(acpi_ioremaps);
95 static DEFINE_MUTEX(acpi_ioremap_lock);
96 #define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map)
97
acpi_request_region(struct acpi_generic_address * gas,unsigned int length,char * desc)98 static void __init acpi_request_region (struct acpi_generic_address *gas,
99 unsigned int length, char *desc)
100 {
101 u64 addr;
102
103 /* Handle possible alignment issues */
104 memcpy(&addr, &gas->address, sizeof(addr));
105 if (!addr || !length)
106 return;
107
108 /* Resources are never freed */
109 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
110 request_region(addr, length, desc);
111 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
112 request_mem_region(addr, length, desc);
113 }
114
acpi_reserve_resources(void)115 static int __init acpi_reserve_resources(void)
116 {
117 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
118 "ACPI PM1a_EVT_BLK");
119
120 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
121 "ACPI PM1b_EVT_BLK");
122
123 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
124 "ACPI PM1a_CNT_BLK");
125
126 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
127 "ACPI PM1b_CNT_BLK");
128
129 if (acpi_gbl_FADT.pm_timer_length == 4)
130 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
131
132 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
133 "ACPI PM2_CNT_BLK");
134
135 /* Length of GPE blocks must be a non-negative multiple of 2 */
136
137 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
138 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
139 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
140
141 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
142 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
143 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
144
145 return 0;
146 }
147 fs_initcall_sync(acpi_reserve_resources);
148
acpi_os_printf(const char * fmt,...)149 void acpi_os_printf(const char *fmt, ...)
150 {
151 va_list args;
152 va_start(args, fmt);
153 acpi_os_vprintf(fmt, args);
154 va_end(args);
155 }
156 EXPORT_SYMBOL(acpi_os_printf);
157
acpi_os_vprintf(const char * fmt,va_list args)158 void __printf(1, 0) acpi_os_vprintf(const char *fmt, va_list args)
159 {
160 static char buffer[512];
161
162 vsprintf(buffer, fmt, args);
163
164 #ifdef ENABLE_DEBUGGER
165 if (acpi_in_debugger) {
166 kdb_printf("%s", buffer);
167 } else {
168 if (printk_get_level(buffer))
169 printk("%s", buffer);
170 else
171 printk(KERN_CONT "%s", buffer);
172 }
173 #else
174 if (acpi_debugger_write_log(buffer) < 0) {
175 if (printk_get_level(buffer))
176 printk("%s", buffer);
177 else
178 printk(KERN_CONT "%s", buffer);
179 }
180 #endif
181 }
182
183 #ifdef CONFIG_KEXEC
184 static unsigned long acpi_rsdp;
setup_acpi_rsdp(char * arg)185 static int __init setup_acpi_rsdp(char *arg)
186 {
187 return kstrtoul(arg, 16, &acpi_rsdp);
188 }
189 early_param("acpi_rsdp", setup_acpi_rsdp);
190 #endif
191
acpi_os_get_root_pointer(void)192 acpi_physical_address __init acpi_os_get_root_pointer(void)
193 {
194 acpi_physical_address pa;
195
196 #ifdef CONFIG_KEXEC
197 /*
198 * We may have been provided with an RSDP on the command line,
199 * but if a malicious user has done so they may be pointing us
200 * at modified ACPI tables that could alter kernel behaviour -
201 * so, we check the lockdown status before making use of
202 * it. If we trust it then also stash it in an architecture
203 * specific location (if appropriate) so it can be carried
204 * over further kexec()s.
205 */
206 if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) {
207 acpi_arch_set_root_pointer(acpi_rsdp);
208 return acpi_rsdp;
209 }
210 #endif
211 pa = acpi_arch_get_root_pointer();
212 if (pa)
213 return pa;
214
215 if (efi_enabled(EFI_CONFIG_TABLES)) {
216 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
217 return efi.acpi20;
218 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
219 return efi.acpi;
220 pr_err("System description tables not found\n");
221 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
222 acpi_find_root_pointer(&pa);
223 }
224
225 return pa;
226 }
227
228 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
229 static struct acpi_ioremap *
acpi_map_lookup(acpi_physical_address phys,acpi_size size)230 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
231 {
232 struct acpi_ioremap *map;
233
234 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
235 if (map->phys <= phys &&
236 phys + size <= map->phys + map->size)
237 return map;
238
239 return NULL;
240 }
241
242 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
243 static void __iomem *
acpi_map_vaddr_lookup(acpi_physical_address phys,unsigned int size)244 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
245 {
246 struct acpi_ioremap *map;
247
248 map = acpi_map_lookup(phys, size);
249 if (map)
250 return map->virt + (phys - map->phys);
251
252 return NULL;
253 }
254
acpi_os_get_iomem(acpi_physical_address phys,unsigned int size)255 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
256 {
257 struct acpi_ioremap *map;
258 void __iomem *virt = NULL;
259
260 mutex_lock(&acpi_ioremap_lock);
261 map = acpi_map_lookup(phys, size);
262 if (map) {
263 virt = map->virt + (phys - map->phys);
264 map->track.refcount++;
265 }
266 mutex_unlock(&acpi_ioremap_lock);
267 return virt;
268 }
269 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
270
271 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
272 static struct acpi_ioremap *
acpi_map_lookup_virt(void __iomem * virt,acpi_size size)273 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
274 {
275 struct acpi_ioremap *map;
276
277 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
278 if (map->virt <= virt &&
279 virt + size <= map->virt + map->size)
280 return map;
281
282 return NULL;
283 }
284
285 #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
286 /* ioremap will take care of cache attributes */
287 #define should_use_kmap(pfn) 0
288 #else
289 #define should_use_kmap(pfn) page_is_ram(pfn)
290 #endif
291
acpi_map(acpi_physical_address pg_off,unsigned long pg_sz)292 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
293 {
294 unsigned long pfn;
295
296 pfn = pg_off >> PAGE_SHIFT;
297 if (should_use_kmap(pfn)) {
298 if (pg_sz > PAGE_SIZE)
299 return NULL;
300 return (void __iomem __force *)kmap(pfn_to_page(pfn));
301 } else
302 return acpi_os_ioremap(pg_off, pg_sz);
303 }
304
acpi_unmap(acpi_physical_address pg_off,void __iomem * vaddr)305 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
306 {
307 unsigned long pfn;
308
309 pfn = pg_off >> PAGE_SHIFT;
310 if (should_use_kmap(pfn))
311 kunmap(pfn_to_page(pfn));
312 else
313 iounmap(vaddr);
314 }
315
316 /**
317 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
318 * @phys: Start of the physical address range to map.
319 * @size: Size of the physical address range to map.
320 *
321 * Look up the given physical address range in the list of existing ACPI memory
322 * mappings. If found, get a reference to it and return a pointer to it (its
323 * virtual address). If not found, map it, add it to that list and return a
324 * pointer to it.
325 *
326 * During early init (when acpi_permanent_mmap has not been set yet) this
327 * routine simply calls __acpi_map_table() to get the job done.
328 */
329 void __iomem __ref
acpi_os_map_iomem(acpi_physical_address phys,acpi_size size)330 *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
331 {
332 struct acpi_ioremap *map;
333 void __iomem *virt;
334 acpi_physical_address pg_off;
335 acpi_size pg_sz;
336
337 if (phys > ULONG_MAX) {
338 pr_err("Cannot map memory that high: 0x%llx\n", phys);
339 return NULL;
340 }
341
342 if (!acpi_permanent_mmap)
343 return __acpi_map_table((unsigned long)phys, size);
344
345 mutex_lock(&acpi_ioremap_lock);
346 /* Check if there's a suitable mapping already. */
347 map = acpi_map_lookup(phys, size);
348 if (map) {
349 map->track.refcount++;
350 goto out;
351 }
352
353 map = kzalloc_obj(*map);
354 if (!map) {
355 mutex_unlock(&acpi_ioremap_lock);
356 return NULL;
357 }
358
359 pg_off = round_down(phys, PAGE_SIZE);
360 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
361 virt = acpi_map(phys, size);
362 if (!virt) {
363 mutex_unlock(&acpi_ioremap_lock);
364 kfree(map);
365 return NULL;
366 }
367
368 INIT_LIST_HEAD(&map->list);
369 map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK);
370 map->phys = pg_off;
371 map->size = pg_sz;
372 map->track.refcount = 1;
373
374 list_add_tail_rcu(&map->list, &acpi_ioremaps);
375
376 out:
377 mutex_unlock(&acpi_ioremap_lock);
378 return map->virt + (phys - map->phys);
379 }
380 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
381
acpi_os_map_memory(acpi_physical_address phys,acpi_size size)382 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
383 {
384 return (void *)acpi_os_map_iomem(phys, size);
385 }
386 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
387
acpi_os_map_remove(struct work_struct * work)388 static void acpi_os_map_remove(struct work_struct *work)
389 {
390 struct acpi_ioremap *map = container_of(to_rcu_work(work),
391 struct acpi_ioremap,
392 track.rwork);
393
394 acpi_unmap(map->phys, map->virt);
395 kfree(map);
396 }
397
398 /* Must be called with mutex_lock(&acpi_ioremap_lock) */
acpi_os_drop_map_ref(struct acpi_ioremap * map)399 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
400 {
401 if (--map->track.refcount)
402 return;
403
404 list_del_rcu(&map->list);
405
406 INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
407 queue_rcu_work(system_percpu_wq, &map->track.rwork);
408 }
409
410 /**
411 * acpi_os_unmap_iomem - Drop a memory mapping reference.
412 * @virt: Start of the address range to drop a reference to.
413 * @size: Size of the address range to drop a reference to.
414 *
415 * Look up the given virtual address range in the list of existing ACPI memory
416 * mappings, drop a reference to it and if there are no more active references
417 * to it, queue it up for later removal.
418 *
419 * During early init (when acpi_permanent_mmap has not been set yet) this
420 * routine simply calls __acpi_unmap_table() to get the job done. Since
421 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
422 * here.
423 */
acpi_os_unmap_iomem(void __iomem * virt,acpi_size size)424 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
425 {
426 struct acpi_ioremap *map;
427
428 if (!acpi_permanent_mmap) {
429 __acpi_unmap_table(virt, size);
430 return;
431 }
432
433 mutex_lock(&acpi_ioremap_lock);
434
435 map = acpi_map_lookup_virt(virt, size);
436 if (!map) {
437 mutex_unlock(&acpi_ioremap_lock);
438 WARN(true, "ACPI: %s: bad address %p\n", __func__, virt);
439 return;
440 }
441 acpi_os_drop_map_ref(map);
442
443 mutex_unlock(&acpi_ioremap_lock);
444 }
445 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
446
447 /**
448 * acpi_os_unmap_memory - Drop a memory mapping reference.
449 * @virt: Start of the address range to drop a reference to.
450 * @size: Size of the address range to drop a reference to.
451 */
acpi_os_unmap_memory(void * virt,acpi_size size)452 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
453 {
454 acpi_os_unmap_iomem((void __iomem *)virt, size);
455 }
456 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
457
acpi_os_map_generic_address(struct acpi_generic_address * gas)458 void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas)
459 {
460 u64 addr;
461
462 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
463 return NULL;
464
465 /* Handle possible alignment issues */
466 memcpy(&addr, &gas->address, sizeof(addr));
467 if (!addr || !gas->bit_width)
468 return NULL;
469
470 return acpi_os_map_iomem(addr, gas->bit_width / 8);
471 }
472 EXPORT_SYMBOL(acpi_os_map_generic_address);
473
acpi_os_unmap_generic_address(struct acpi_generic_address * gas)474 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
475 {
476 u64 addr;
477 struct acpi_ioremap *map;
478
479 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
480 return;
481
482 /* Handle possible alignment issues */
483 memcpy(&addr, &gas->address, sizeof(addr));
484 if (!addr || !gas->bit_width)
485 return;
486
487 mutex_lock(&acpi_ioremap_lock);
488
489 map = acpi_map_lookup(addr, gas->bit_width / 8);
490 if (!map) {
491 mutex_unlock(&acpi_ioremap_lock);
492 return;
493 }
494 acpi_os_drop_map_ref(map);
495
496 mutex_unlock(&acpi_ioremap_lock);
497 }
498 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
499
500 #ifdef ACPI_FUTURE_USAGE
501 acpi_status
acpi_os_get_physical_address(void * virt,acpi_physical_address * phys)502 acpi_os_get_physical_address(void *virt, acpi_physical_address *phys)
503 {
504 if (!phys || !virt)
505 return AE_BAD_PARAMETER;
506
507 *phys = virt_to_phys(virt);
508
509 return AE_OK;
510 }
511 #endif
512
513 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
514 static bool acpi_rev_override;
515
acpi_rev_override_setup(char * str)516 int __init acpi_rev_override_setup(char *str)
517 {
518 acpi_rev_override = true;
519 return 1;
520 }
521 __setup("acpi_rev_override", acpi_rev_override_setup);
522 #else
523 #define acpi_rev_override false
524 #endif
525
526 #define ACPI_MAX_OVERRIDE_LEN 100
527
528 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
529
530 acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names * init_val,acpi_string * new_val)531 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
532 acpi_string *new_val)
533 {
534 if (!init_val || !new_val)
535 return AE_BAD_PARAMETER;
536
537 *new_val = NULL;
538 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
539 pr_info("Overriding _OS definition to '%s'\n", acpi_os_name);
540 *new_val = acpi_os_name;
541 }
542
543 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
544 pr_info("Overriding _REV return value to 5\n");
545 *new_val = (char *)5;
546 }
547
548 return AE_OK;
549 }
550
acpi_irq(int irq,void * dev_id)551 static irqreturn_t acpi_irq(int irq, void *dev_id)
552 {
553 if ((*acpi_irq_handler)(acpi_irq_context)) {
554 acpi_irq_handled++;
555 return IRQ_HANDLED;
556 } else {
557 acpi_irq_not_handled++;
558 return IRQ_NONE;
559 }
560 }
561
562 acpi_status
acpi_os_install_interrupt_handler(u32 gsi,acpi_osd_handler handler,void * context)563 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
564 void *context)
565 {
566 unsigned int irq;
567
568 acpi_irq_stats_init();
569
570 /*
571 * ACPI interrupts different from the SCI in our copy of the FADT are
572 * not supported.
573 */
574 if (gsi != acpi_gbl_FADT.sci_interrupt)
575 return AE_BAD_PARAMETER;
576
577 if (acpi_irq_handler)
578 return AE_ALREADY_ACQUIRED;
579
580 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
581 pr_err("SCI (ACPI GSI %d) not registered\n", gsi);
582 return AE_OK;
583 }
584
585 acpi_irq_handler = handler;
586 acpi_irq_context = context;
587 if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED | IRQF_ONESHOT,
588 "acpi", acpi_irq)) {
589 pr_err("SCI (IRQ%d) allocation failed\n", irq);
590 acpi_irq_handler = NULL;
591 return AE_NOT_ACQUIRED;
592 }
593 acpi_sci_irq = irq;
594
595 return AE_OK;
596 }
597
acpi_os_remove_interrupt_handler(u32 gsi,acpi_osd_handler handler)598 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
599 {
600 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
601 return AE_BAD_PARAMETER;
602
603 free_irq(acpi_sci_irq, acpi_irq);
604 acpi_irq_handler = NULL;
605 acpi_sci_irq = INVALID_ACPI_IRQ;
606
607 return AE_OK;
608 }
609
610 /*
611 * Running in interpreter thread context, safe to sleep
612 */
613
acpi_os_sleep(u64 ms)614 void acpi_os_sleep(u64 ms)
615 {
616 u64 usec = ms * USEC_PER_MSEC, delta_us = 50;
617
618 /*
619 * Use a hrtimer because the timer wheel timers are optimized for
620 * cancelation before they expire and this timer is not going to be
621 * canceled.
622 *
623 * Set the delta between the requested sleep time and the effective
624 * deadline to at least 50 us in case there is an opportunity for timer
625 * coalescing.
626 *
627 * Moreover, longer sleeps can be assumed to need somewhat less timer
628 * precision, so sacrifice some of it for making the timer a more likely
629 * candidate for coalescing by setting the delta to 1% of the sleep time
630 * if it is above 5 ms (this value is chosen so that the delta is a
631 * continuous function of the sleep time).
632 */
633 if (ms > 5)
634 delta_us = (USEC_PER_MSEC / 100) * ms;
635
636 usleep_range(usec, usec + delta_us);
637 }
638
acpi_os_stall(u32 us)639 void acpi_os_stall(u32 us)
640 {
641 while (us) {
642 u32 delay = 1000;
643
644 if (delay > us)
645 delay = us;
646 udelay(delay);
647 touch_nmi_watchdog();
648 us -= delay;
649 }
650 }
651
652 /*
653 * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running,
654 * monotonically increasing timer with 100ns granularity. Do not use
655 * ktime_get() to implement this function because this function may get
656 * called after timekeeping has been suspended. Note: calling this function
657 * after timekeeping has been suspended may lead to unexpected results
658 * because when timekeeping is suspended the jiffies counter is not
659 * incremented. See also timekeeping_suspend().
660 */
acpi_os_get_timer(void)661 u64 acpi_os_get_timer(void)
662 {
663 return (get_jiffies_64() - INITIAL_JIFFIES) *
664 (ACPI_100NSEC_PER_SEC / HZ);
665 }
666
acpi_os_read_port(acpi_io_address port,u32 * value,u32 width)667 acpi_status acpi_os_read_port(acpi_io_address port, u32 *value, u32 width)
668 {
669 u32 dummy;
670
671 if (!IS_ENABLED(CONFIG_HAS_IOPORT)) {
672 /*
673 * set all-1 result as if reading from non-existing
674 * I/O port
675 */
676 *value = GENMASK(width, 0);
677 return AE_NOT_IMPLEMENTED;
678 }
679
680 if (value)
681 *value = 0;
682 else
683 value = &dummy;
684
685 if (width <= 8) {
686 *value = inb(port);
687 } else if (width <= 16) {
688 *value = inw(port);
689 } else if (width <= 32) {
690 *value = inl(port);
691 } else {
692 pr_debug("%s: Access width %d not supported\n", __func__, width);
693 return AE_BAD_PARAMETER;
694 }
695
696 return AE_OK;
697 }
698
699 EXPORT_SYMBOL(acpi_os_read_port);
700
acpi_os_write_port(acpi_io_address port,u32 value,u32 width)701 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
702 {
703 if (!IS_ENABLED(CONFIG_HAS_IOPORT))
704 return AE_NOT_IMPLEMENTED;
705
706 if (width <= 8) {
707 outb(value, port);
708 } else if (width <= 16) {
709 outw(value, port);
710 } else if (width <= 32) {
711 outl(value, port);
712 } else {
713 pr_debug("%s: Access width %d not supported\n", __func__, width);
714 return AE_BAD_PARAMETER;
715 }
716
717 return AE_OK;
718 }
719
720 EXPORT_SYMBOL(acpi_os_write_port);
721
acpi_os_read_iomem(void __iomem * virt_addr,u64 * value,u32 width)722 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
723 {
724
725 switch (width) {
726 case 8:
727 *(u8 *) value = readb(virt_addr);
728 break;
729 case 16:
730 *(u16 *) value = readw(virt_addr);
731 break;
732 case 32:
733 *(u32 *) value = readl(virt_addr);
734 break;
735 case 64:
736 *(u64 *) value = readq(virt_addr);
737 break;
738 default:
739 return -EINVAL;
740 }
741
742 return 0;
743 }
744
745 acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr,u64 * value,u32 width)746 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
747 {
748 void __iomem *virt_addr;
749 unsigned int size = width / 8;
750 bool unmap = false;
751 u64 dummy;
752 int error;
753
754 rcu_read_lock();
755 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
756 if (!virt_addr) {
757 rcu_read_unlock();
758 virt_addr = acpi_os_ioremap(phys_addr, size);
759 if (!virt_addr)
760 return AE_BAD_ADDRESS;
761 unmap = true;
762 }
763
764 if (!value)
765 value = &dummy;
766
767 error = acpi_os_read_iomem(virt_addr, value, width);
768 BUG_ON(error);
769
770 if (unmap)
771 iounmap(virt_addr);
772 else
773 rcu_read_unlock();
774
775 return AE_OK;
776 }
777
778 acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr,u64 value,u32 width)779 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
780 {
781 void __iomem *virt_addr;
782 unsigned int size = width / 8;
783 bool unmap = false;
784
785 rcu_read_lock();
786 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
787 if (!virt_addr) {
788 rcu_read_unlock();
789 virt_addr = acpi_os_ioremap(phys_addr, size);
790 if (!virt_addr)
791 return AE_BAD_ADDRESS;
792 unmap = true;
793 }
794
795 switch (width) {
796 case 8:
797 writeb(value, virt_addr);
798 break;
799 case 16:
800 writew(value, virt_addr);
801 break;
802 case 32:
803 writel(value, virt_addr);
804 break;
805 case 64:
806 writeq(value, virt_addr);
807 break;
808 default:
809 BUG();
810 }
811
812 if (unmap)
813 iounmap(virt_addr);
814 else
815 rcu_read_unlock();
816
817 return AE_OK;
818 }
819
820 #ifdef CONFIG_PCI
821 acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id,u32 reg,u64 * value,u32 width)822 acpi_os_read_pci_configuration(struct acpi_pci_id *pci_id, u32 reg,
823 u64 *value, u32 width)
824 {
825 int result, size;
826 u32 value32;
827
828 if (!value)
829 return AE_BAD_PARAMETER;
830
831 switch (width) {
832 case 8:
833 size = 1;
834 break;
835 case 16:
836 size = 2;
837 break;
838 case 32:
839 size = 4;
840 break;
841 default:
842 return AE_ERROR;
843 }
844
845 result = raw_pci_read(pci_id->segment, pci_id->bus,
846 PCI_DEVFN(pci_id->device, pci_id->function),
847 reg, size, &value32);
848 *value = value32;
849
850 return (result ? AE_ERROR : AE_OK);
851 }
852
853 acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id,u32 reg,u64 value,u32 width)854 acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id, u32 reg,
855 u64 value, u32 width)
856 {
857 int result, size;
858
859 switch (width) {
860 case 8:
861 size = 1;
862 break;
863 case 16:
864 size = 2;
865 break;
866 case 32:
867 size = 4;
868 break;
869 default:
870 return AE_ERROR;
871 }
872
873 result = raw_pci_write(pci_id->segment, pci_id->bus,
874 PCI_DEVFN(pci_id->device, pci_id->function),
875 reg, size, value);
876
877 return (result ? AE_ERROR : AE_OK);
878 }
879 #endif
880
acpi_os_execute_deferred(struct work_struct * work)881 static void acpi_os_execute_deferred(struct work_struct *work)
882 {
883 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
884
885 dpc->function(dpc->context);
886 kfree(dpc);
887 }
888
889 #ifdef CONFIG_ACPI_DEBUGGER
890 static struct acpi_debugger acpi_debugger;
891 static bool acpi_debugger_initialized;
892
acpi_register_debugger(struct module * owner,const struct acpi_debugger_ops * ops)893 int acpi_register_debugger(struct module *owner,
894 const struct acpi_debugger_ops *ops)
895 {
896 int ret = 0;
897
898 mutex_lock(&acpi_debugger.lock);
899 if (acpi_debugger.ops) {
900 ret = -EBUSY;
901 goto err_lock;
902 }
903
904 acpi_debugger.owner = owner;
905 acpi_debugger.ops = ops;
906
907 err_lock:
908 mutex_unlock(&acpi_debugger.lock);
909 return ret;
910 }
911 EXPORT_SYMBOL(acpi_register_debugger);
912
acpi_unregister_debugger(const struct acpi_debugger_ops * ops)913 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
914 {
915 mutex_lock(&acpi_debugger.lock);
916 if (ops == acpi_debugger.ops) {
917 acpi_debugger.ops = NULL;
918 acpi_debugger.owner = NULL;
919 }
920 mutex_unlock(&acpi_debugger.lock);
921 }
922 EXPORT_SYMBOL(acpi_unregister_debugger);
923
acpi_debugger_create_thread(acpi_osd_exec_callback function,void * context)924 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
925 {
926 int ret;
927 int (*func)(acpi_osd_exec_callback, void *);
928 struct module *owner;
929
930 if (!acpi_debugger_initialized)
931 return -ENODEV;
932 mutex_lock(&acpi_debugger.lock);
933 if (!acpi_debugger.ops) {
934 ret = -ENODEV;
935 goto err_lock;
936 }
937 if (!try_module_get(acpi_debugger.owner)) {
938 ret = -ENODEV;
939 goto err_lock;
940 }
941 func = acpi_debugger.ops->create_thread;
942 owner = acpi_debugger.owner;
943 mutex_unlock(&acpi_debugger.lock);
944
945 ret = func(function, context);
946
947 mutex_lock(&acpi_debugger.lock);
948 module_put(owner);
949 err_lock:
950 mutex_unlock(&acpi_debugger.lock);
951 return ret;
952 }
953
acpi_debugger_write_log(const char * msg)954 ssize_t acpi_debugger_write_log(const char *msg)
955 {
956 ssize_t ret;
957 ssize_t (*func)(const char *);
958 struct module *owner;
959
960 if (!acpi_debugger_initialized)
961 return -ENODEV;
962 mutex_lock(&acpi_debugger.lock);
963 if (!acpi_debugger.ops) {
964 ret = -ENODEV;
965 goto err_lock;
966 }
967 if (!try_module_get(acpi_debugger.owner)) {
968 ret = -ENODEV;
969 goto err_lock;
970 }
971 func = acpi_debugger.ops->write_log;
972 owner = acpi_debugger.owner;
973 mutex_unlock(&acpi_debugger.lock);
974
975 ret = func(msg);
976
977 mutex_lock(&acpi_debugger.lock);
978 module_put(owner);
979 err_lock:
980 mutex_unlock(&acpi_debugger.lock);
981 return ret;
982 }
983
acpi_debugger_read_cmd(char * buffer,size_t buffer_length)984 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
985 {
986 ssize_t ret;
987 ssize_t (*func)(char *, size_t);
988 struct module *owner;
989
990 if (!acpi_debugger_initialized)
991 return -ENODEV;
992 mutex_lock(&acpi_debugger.lock);
993 if (!acpi_debugger.ops) {
994 ret = -ENODEV;
995 goto err_lock;
996 }
997 if (!try_module_get(acpi_debugger.owner)) {
998 ret = -ENODEV;
999 goto err_lock;
1000 }
1001 func = acpi_debugger.ops->read_cmd;
1002 owner = acpi_debugger.owner;
1003 mutex_unlock(&acpi_debugger.lock);
1004
1005 ret = func(buffer, buffer_length);
1006
1007 mutex_lock(&acpi_debugger.lock);
1008 module_put(owner);
1009 err_lock:
1010 mutex_unlock(&acpi_debugger.lock);
1011 return ret;
1012 }
1013
acpi_debugger_wait_command_ready(void)1014 int acpi_debugger_wait_command_ready(void)
1015 {
1016 int ret;
1017 int (*func)(bool, char *, size_t);
1018 struct module *owner;
1019
1020 if (!acpi_debugger_initialized)
1021 return -ENODEV;
1022 mutex_lock(&acpi_debugger.lock);
1023 if (!acpi_debugger.ops) {
1024 ret = -ENODEV;
1025 goto err_lock;
1026 }
1027 if (!try_module_get(acpi_debugger.owner)) {
1028 ret = -ENODEV;
1029 goto err_lock;
1030 }
1031 func = acpi_debugger.ops->wait_command_ready;
1032 owner = acpi_debugger.owner;
1033 mutex_unlock(&acpi_debugger.lock);
1034
1035 ret = func(acpi_gbl_method_executing,
1036 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
1037
1038 mutex_lock(&acpi_debugger.lock);
1039 module_put(owner);
1040 err_lock:
1041 mutex_unlock(&acpi_debugger.lock);
1042 return ret;
1043 }
1044
acpi_debugger_notify_command_complete(void)1045 int acpi_debugger_notify_command_complete(void)
1046 {
1047 int ret;
1048 int (*func)(void);
1049 struct module *owner;
1050
1051 if (!acpi_debugger_initialized)
1052 return -ENODEV;
1053 mutex_lock(&acpi_debugger.lock);
1054 if (!acpi_debugger.ops) {
1055 ret = -ENODEV;
1056 goto err_lock;
1057 }
1058 if (!try_module_get(acpi_debugger.owner)) {
1059 ret = -ENODEV;
1060 goto err_lock;
1061 }
1062 func = acpi_debugger.ops->notify_command_complete;
1063 owner = acpi_debugger.owner;
1064 mutex_unlock(&acpi_debugger.lock);
1065
1066 ret = func();
1067
1068 mutex_lock(&acpi_debugger.lock);
1069 module_put(owner);
1070 err_lock:
1071 mutex_unlock(&acpi_debugger.lock);
1072 return ret;
1073 }
1074
acpi_debugger_init(void)1075 int __init acpi_debugger_init(void)
1076 {
1077 mutex_init(&acpi_debugger.lock);
1078 acpi_debugger_initialized = true;
1079 return 0;
1080 }
1081 #endif
1082
1083 /*******************************************************************************
1084 *
1085 * FUNCTION: acpi_os_execute
1086 *
1087 * PARAMETERS: Type - Type of the callback
1088 * Function - Function to be executed
1089 * Context - Function parameters
1090 *
1091 * RETURN: Status
1092 *
1093 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1094 * immediately executes function on a separate thread.
1095 *
1096 ******************************************************************************/
1097
acpi_os_execute(acpi_execute_type type,acpi_osd_exec_callback function,void * context)1098 acpi_status acpi_os_execute(acpi_execute_type type,
1099 acpi_osd_exec_callback function, void *context)
1100 {
1101 struct acpi_os_dpc *dpc;
1102 int ret;
1103
1104 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1105 "Scheduling function [%p(%p)] for deferred execution.\n",
1106 function, context));
1107
1108 if (type == OSL_DEBUGGER_MAIN_THREAD) {
1109 ret = acpi_debugger_create_thread(function, context);
1110 if (ret) {
1111 pr_err("Kernel thread creation failed\n");
1112 return AE_ERROR;
1113 }
1114 return AE_OK;
1115 }
1116
1117 /*
1118 * Allocate/initialize DPC structure. Note that this memory will be
1119 * freed by the callee. The kernel handles the work_struct list in a
1120 * way that allows us to also free its memory inside the callee.
1121 * Because we may want to schedule several tasks with different
1122 * parameters we can't use the approach some kernel code uses of
1123 * having a static work_struct.
1124 */
1125
1126 dpc = kzalloc_obj(struct acpi_os_dpc, GFP_ATOMIC);
1127 if (!dpc)
1128 return AE_NO_MEMORY;
1129
1130 dpc->function = function;
1131 dpc->context = context;
1132 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1133
1134 /*
1135 * To prevent lockdep from complaining unnecessarily, make sure that
1136 * there is a different static lockdep key for each workqueue by using
1137 * INIT_WORK() for each of them separately.
1138 */
1139 switch (type) {
1140 case OSL_NOTIFY_HANDLER:
1141 ret = queue_work(kacpi_notify_wq, &dpc->work);
1142 break;
1143 case OSL_GPE_HANDLER:
1144 /*
1145 * On some machines, a software-initiated SMI causes corruption
1146 * unless the SMI runs on CPU 0. An SMI can be initiated by
1147 * any AML, but typically it's done in GPE-related methods that
1148 * are run via workqueues, so we can avoid the known corruption
1149 * cases by always queueing on CPU 0.
1150 */
1151 ret = queue_work_on(0, kacpid_wq, &dpc->work);
1152 break;
1153 default:
1154 pr_err("Unsupported os_execute type %d.\n", type);
1155 goto err;
1156 }
1157 if (!ret) {
1158 pr_err("Unable to queue work\n");
1159 goto err;
1160 }
1161
1162 return AE_OK;
1163
1164 err:
1165 kfree(dpc);
1166 return AE_ERROR;
1167 }
1168 EXPORT_SYMBOL(acpi_os_execute);
1169
acpi_os_wait_events_complete(void)1170 void acpi_os_wait_events_complete(void)
1171 {
1172 /*
1173 * Make sure the GPE handler or the fixed event handler is not used
1174 * on another CPU after removal.
1175 */
1176 if (acpi_sci_irq_valid())
1177 synchronize_hardirq(acpi_sci_irq);
1178 flush_workqueue(kacpid_wq);
1179 flush_workqueue(kacpi_notify_wq);
1180 }
1181 EXPORT_SYMBOL(acpi_os_wait_events_complete);
1182
1183 struct acpi_hp_work {
1184 struct work_struct work;
1185 struct acpi_device *adev;
1186 u32 src;
1187 };
1188
acpi_hotplug_work_fn(struct work_struct * work)1189 static void acpi_hotplug_work_fn(struct work_struct *work)
1190 {
1191 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1192
1193 acpi_os_wait_events_complete();
1194 acpi_device_hotplug(hpw->adev, hpw->src);
1195 kfree(hpw);
1196 }
1197
acpi_hotplug_schedule(struct acpi_device * adev,u32 src)1198 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1199 {
1200 struct acpi_hp_work *hpw;
1201
1202 acpi_handle_debug(adev->handle,
1203 "Scheduling hotplug event %u for deferred handling\n",
1204 src);
1205
1206 hpw = kmalloc_obj(*hpw);
1207 if (!hpw)
1208 return AE_NO_MEMORY;
1209
1210 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1211 hpw->adev = adev;
1212 hpw->src = src;
1213 /*
1214 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1215 * the hotplug code may call driver .remove() functions, which may
1216 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1217 * these workqueues.
1218 */
1219 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1220 kfree(hpw);
1221 return AE_ERROR;
1222 }
1223 return AE_OK;
1224 }
1225
acpi_queue_hotplug_work(struct work_struct * work)1226 bool acpi_queue_hotplug_work(struct work_struct *work)
1227 {
1228 return queue_work(kacpi_hotplug_wq, work);
1229 }
1230
1231 acpi_status
acpi_os_create_semaphore(u32 max_units,u32 initial_units,acpi_handle * handle)1232 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle *handle)
1233 {
1234 struct semaphore *sem = NULL;
1235
1236 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1237 if (!sem)
1238 return AE_NO_MEMORY;
1239
1240 sema_init(sem, initial_units);
1241
1242 *handle = (acpi_handle *) sem;
1243
1244 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1245 *handle, initial_units));
1246
1247 return AE_OK;
1248 }
1249
1250 /*
1251 * TODO: A better way to delete semaphores? Linux doesn't have a
1252 * 'delete_semaphore()' function -- may result in an invalid
1253 * pointer dereference for non-synchronized consumers. Should
1254 * we at least check for blocked threads and signal/cancel them?
1255 */
1256
acpi_os_delete_semaphore(acpi_handle handle)1257 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1258 {
1259 struct semaphore *sem = (struct semaphore *)handle;
1260
1261 if (!sem)
1262 return AE_BAD_PARAMETER;
1263
1264 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1265
1266 BUG_ON(sem->first_waiter);
1267 kfree(sem);
1268 sem = NULL;
1269
1270 return AE_OK;
1271 }
1272
1273 /*
1274 * TODO: Support for units > 1?
1275 */
acpi_os_wait_semaphore(acpi_handle handle,u32 units,u16 timeout)1276 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1277 {
1278 acpi_status status = AE_OK;
1279 struct semaphore *sem = (struct semaphore *)handle;
1280 long jiffies;
1281 int ret = 0;
1282
1283 if (!acpi_os_initialized)
1284 return AE_OK;
1285
1286 if (!sem || (units < 1))
1287 return AE_BAD_PARAMETER;
1288
1289 if (units > 1)
1290 return AE_SUPPORT;
1291
1292 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1293 handle, units, timeout));
1294
1295 if (timeout == ACPI_WAIT_FOREVER)
1296 jiffies = MAX_SCHEDULE_TIMEOUT;
1297 else
1298 jiffies = msecs_to_jiffies(timeout);
1299
1300 ret = down_timeout(sem, jiffies);
1301 if (ret)
1302 status = AE_TIME;
1303
1304 if (ACPI_FAILURE(status)) {
1305 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1306 "Failed to acquire semaphore[%p|%d|%d], %s",
1307 handle, units, timeout,
1308 acpi_format_exception(status)));
1309 } else {
1310 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1311 "Acquired semaphore[%p|%d|%d]", handle,
1312 units, timeout));
1313 }
1314
1315 return status;
1316 }
1317
1318 /*
1319 * TODO: Support for units > 1?
1320 */
acpi_os_signal_semaphore(acpi_handle handle,u32 units)1321 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1322 {
1323 struct semaphore *sem = (struct semaphore *)handle;
1324
1325 if (!acpi_os_initialized)
1326 return AE_OK;
1327
1328 if (!sem || (units < 1))
1329 return AE_BAD_PARAMETER;
1330
1331 if (units > 1)
1332 return AE_SUPPORT;
1333
1334 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1335 units));
1336
1337 up(sem);
1338
1339 return AE_OK;
1340 }
1341
acpi_os_get_line(char * buffer,u32 buffer_length,u32 * bytes_read)1342 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1343 {
1344 #ifdef ENABLE_DEBUGGER
1345 if (acpi_in_debugger) {
1346 u32 chars;
1347
1348 kdb_read(buffer, buffer_length);
1349
1350 /* remove the CR kdb includes */
1351 chars = strlen(buffer) - 1;
1352 buffer[chars] = '\0';
1353 }
1354 #else
1355 int ret;
1356
1357 ret = acpi_debugger_read_cmd(buffer, buffer_length);
1358 if (ret < 0)
1359 return AE_ERROR;
1360 if (bytes_read)
1361 *bytes_read = ret;
1362 #endif
1363
1364 return AE_OK;
1365 }
1366 EXPORT_SYMBOL(acpi_os_get_line);
1367
acpi_os_wait_command_ready(void)1368 acpi_status acpi_os_wait_command_ready(void)
1369 {
1370 int ret;
1371
1372 ret = acpi_debugger_wait_command_ready();
1373 if (ret < 0)
1374 return AE_ERROR;
1375 return AE_OK;
1376 }
1377
acpi_os_notify_command_complete(void)1378 acpi_status acpi_os_notify_command_complete(void)
1379 {
1380 int ret;
1381
1382 ret = acpi_debugger_notify_command_complete();
1383 if (ret < 0)
1384 return AE_ERROR;
1385 return AE_OK;
1386 }
1387
acpi_os_signal(u32 function,void * info)1388 acpi_status acpi_os_signal(u32 function, void *info)
1389 {
1390 struct acpi_signal_fatal_info *fatal_info;
1391
1392 switch (function) {
1393 case ACPI_SIGNAL_FATAL:
1394 fatal_info = info;
1395 pr_emerg("Fatal error while evaluating ACPI control method\n");
1396 pr_emerg("Type 0x%X Code 0x%X Argument 0x%X\n",
1397 fatal_info->type, fatal_info->code, fatal_info->argument);
1398
1399 if (poweroff_on_fatal)
1400 orderly_poweroff(true);
1401 else
1402 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
1403
1404 break;
1405 case ACPI_SIGNAL_BREAKPOINT:
1406 /*
1407 * AML Breakpoint
1408 * ACPI spec. says to treat it as a NOP unless
1409 * you are debugging. So if/when we integrate
1410 * AML debugger into the kernel debugger its
1411 * hook will go here. But until then it is
1412 * not useful to print anything on breakpoints.
1413 */
1414 break;
1415 default:
1416 break;
1417 }
1418
1419 return AE_OK;
1420 }
1421
acpi_os_name_setup(char * str)1422 static int __init acpi_os_name_setup(char *str)
1423 {
1424 char *p = acpi_os_name;
1425 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1426
1427 if (!str || !*str)
1428 return 0;
1429
1430 for (; count-- && *str; str++) {
1431 if (isalnum(*str) || *str == ' ' || *str == ':')
1432 *p++ = *str;
1433 else if (*str == '\'' || *str == '"')
1434 continue;
1435 else
1436 break;
1437 }
1438 *p = 0;
1439
1440 return 1;
1441
1442 }
1443
1444 __setup("acpi_os_name=", acpi_os_name_setup);
1445
1446 /*
1447 * Disable the auto-serialization of named objects creation methods.
1448 *
1449 * This feature is enabled by default. It marks the AML control methods
1450 * that contain the opcodes to create named objects as "Serialized".
1451 */
acpi_no_auto_serialize_setup(char * str)1452 static int __init acpi_no_auto_serialize_setup(char *str)
1453 {
1454 acpi_gbl_auto_serialize_methods = FALSE;
1455 pr_info("Auto-serialization disabled\n");
1456
1457 return 1;
1458 }
1459
1460 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1461
1462 /* Check of resource interference between native drivers and ACPI
1463 * OperationRegions (SystemIO and System Memory only).
1464 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1465 * in arbitrary AML code and can interfere with legacy drivers.
1466 * acpi_enforce_resources= can be set to:
1467 *
1468 * - strict (default) (2)
1469 * -> further driver trying to access the resources will not load
1470 * - lax (1)
1471 * -> further driver trying to access the resources will load, but you
1472 * get a system message that something might go wrong...
1473 *
1474 * - no (0)
1475 * -> ACPI Operation Region resources will not be registered
1476 *
1477 */
1478 #define ENFORCE_RESOURCES_STRICT 2
1479 #define ENFORCE_RESOURCES_LAX 1
1480 #define ENFORCE_RESOURCES_NO 0
1481
1482 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1483
acpi_enforce_resources_setup(char * str)1484 static int __init acpi_enforce_resources_setup(char *str)
1485 {
1486 if (str == NULL || *str == '\0')
1487 return 0;
1488
1489 if (!strcmp("strict", str))
1490 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1491 else if (!strcmp("lax", str))
1492 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1493 else if (!strcmp("no", str))
1494 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1495
1496 return 1;
1497 }
1498
1499 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1500
1501 /* Check for resource conflicts between ACPI OperationRegions and native
1502 * drivers */
acpi_check_resource_conflict(const struct resource * res)1503 int acpi_check_resource_conflict(const struct resource *res)
1504 {
1505 acpi_adr_space_type space_id;
1506
1507 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1508 return 0;
1509
1510 if (res->flags & IORESOURCE_IO)
1511 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1512 else if (res->flags & IORESOURCE_MEM)
1513 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1514 else
1515 return 0;
1516
1517 if (!acpi_check_address_range(space_id, res->start, resource_size(res), 1))
1518 return 0;
1519
1520 pr_info("Resource conflict; ACPI support missing from driver?\n");
1521
1522 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1523 return -EBUSY;
1524
1525 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1526 pr_notice("Resource conflict: System may be unstable or behave erratically\n");
1527
1528 return 0;
1529 }
1530 EXPORT_SYMBOL(acpi_check_resource_conflict);
1531
acpi_check_region(resource_size_t start,resource_size_t n,const char * name)1532 int acpi_check_region(resource_size_t start, resource_size_t n,
1533 const char *name)
1534 {
1535 struct resource res = DEFINE_RES_IO_NAMED(start, n, name);
1536
1537 return acpi_check_resource_conflict(&res);
1538 }
1539 EXPORT_SYMBOL(acpi_check_region);
1540
1541 /*
1542 * Let drivers know whether the resource checks are effective
1543 */
acpi_resources_are_enforced(void)1544 int acpi_resources_are_enforced(void)
1545 {
1546 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1547 }
1548 EXPORT_SYMBOL(acpi_resources_are_enforced);
1549
1550 /*
1551 * Deallocate the memory for a spinlock.
1552 */
acpi_os_delete_lock(acpi_spinlock handle)1553 void acpi_os_delete_lock(acpi_spinlock handle)
1554 {
1555 ACPI_FREE(handle);
1556 }
1557
1558 /*
1559 * Acquire a spinlock.
1560 *
1561 * handle is a pointer to the spinlock_t.
1562 */
1563
acpi_os_acquire_lock(acpi_spinlock lockp)1564 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1565 __acquires(lockp)
1566 {
1567 spin_lock(lockp);
1568 return 0;
1569 }
1570
1571 /*
1572 * Release a spinlock. See above.
1573 */
1574
acpi_os_release_lock(acpi_spinlock lockp,acpi_cpu_flags not_used)1575 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags not_used)
1576 __releases(lockp)
1577 {
1578 spin_unlock(lockp);
1579 }
1580
1581 #ifndef ACPI_USE_LOCAL_CACHE
1582
1583 /*******************************************************************************
1584 *
1585 * FUNCTION: acpi_os_create_cache
1586 *
1587 * PARAMETERS: name - Ascii name for the cache
1588 * size - Size of each cached object
1589 * depth - Maximum depth of the cache (in objects) <ignored>
1590 * cache - Where the new cache object is returned
1591 *
1592 * RETURN: status
1593 *
1594 * DESCRIPTION: Create a cache object
1595 *
1596 ******************************************************************************/
1597
1598 acpi_status
acpi_os_create_cache(char * name,u16 size,u16 depth,acpi_cache_t ** cache)1599 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t **cache)
1600 {
1601 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1602 if (*cache == NULL)
1603 return AE_ERROR;
1604 else
1605 return AE_OK;
1606 }
1607
1608 /*******************************************************************************
1609 *
1610 * FUNCTION: acpi_os_purge_cache
1611 *
1612 * PARAMETERS: Cache - Handle to cache object
1613 *
1614 * RETURN: Status
1615 *
1616 * DESCRIPTION: Free all objects within the requested cache.
1617 *
1618 ******************************************************************************/
1619
acpi_os_purge_cache(acpi_cache_t * cache)1620 acpi_status acpi_os_purge_cache(acpi_cache_t *cache)
1621 {
1622 kmem_cache_shrink(cache);
1623 return AE_OK;
1624 }
1625
1626 /*******************************************************************************
1627 *
1628 * FUNCTION: acpi_os_delete_cache
1629 *
1630 * PARAMETERS: Cache - Handle to cache object
1631 *
1632 * RETURN: Status
1633 *
1634 * DESCRIPTION: Free all objects within the requested cache and delete the
1635 * cache object.
1636 *
1637 ******************************************************************************/
1638
acpi_os_delete_cache(acpi_cache_t * cache)1639 acpi_status acpi_os_delete_cache(acpi_cache_t *cache)
1640 {
1641 kmem_cache_destroy(cache);
1642 return AE_OK;
1643 }
1644
1645 /*******************************************************************************
1646 *
1647 * FUNCTION: acpi_os_release_object
1648 *
1649 * PARAMETERS: Cache - Handle to cache object
1650 * Object - The object to be released
1651 *
1652 * RETURN: None
1653 *
1654 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1655 * the object is deleted.
1656 *
1657 ******************************************************************************/
1658
acpi_os_release_object(acpi_cache_t * cache,void * object)1659 acpi_status acpi_os_release_object(acpi_cache_t *cache, void *object)
1660 {
1661 kmem_cache_free(cache, object);
1662 return AE_OK;
1663 }
1664 #endif
1665
acpi_no_static_ssdt_setup(char * s)1666 static int __init acpi_no_static_ssdt_setup(char *s)
1667 {
1668 acpi_gbl_disable_ssdt_table_install = TRUE;
1669 pr_info("Static SSDT installation disabled\n");
1670
1671 return 0;
1672 }
1673
1674 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1675
acpi_disable_return_repair(char * s)1676 static int __init acpi_disable_return_repair(char *s)
1677 {
1678 pr_notice("Predefined validation mechanism disabled\n");
1679 acpi_gbl_disable_auto_repair = TRUE;
1680
1681 return 1;
1682 }
1683
1684 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1685
acpi_os_initialize(void)1686 acpi_status __init acpi_os_initialize(void)
1687 {
1688 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1689 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1690
1691 acpi_gbl_xgpe0_block_logical_address =
1692 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1693 acpi_gbl_xgpe1_block_logical_address =
1694 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1695
1696 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1697 /*
1698 * Use acpi_os_map_generic_address to pre-map the reset
1699 * register if it's in system memory.
1700 */
1701 void __iomem *rv;
1702
1703 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1704 pr_debug("%s: Reset register mapping %s\n", __func__,
1705 rv ? "successful" : "failed");
1706 }
1707 acpi_os_initialized = true;
1708
1709 return AE_OK;
1710 }
1711
acpi_os_initialize1(void)1712 acpi_status __init acpi_os_initialize1(void)
1713 {
1714 kacpid_wq = alloc_workqueue("kacpid", WQ_PERCPU, 1);
1715 kacpi_notify_wq = alloc_workqueue("kacpi_notify", WQ_PERCPU, 0);
1716 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1717 BUG_ON(!kacpid_wq);
1718 BUG_ON(!kacpi_notify_wq);
1719 BUG_ON(!kacpi_hotplug_wq);
1720 acpi_osi_init();
1721 return AE_OK;
1722 }
1723
acpi_os_terminate(void)1724 acpi_status acpi_os_terminate(void)
1725 {
1726 if (acpi_irq_handler) {
1727 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1728 acpi_irq_handler);
1729 }
1730
1731 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1732 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1733 acpi_gbl_xgpe0_block_logical_address = 0UL;
1734 acpi_gbl_xgpe1_block_logical_address = 0UL;
1735
1736 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1737 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1738
1739 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1740 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1741
1742 destroy_workqueue(kacpid_wq);
1743 destroy_workqueue(kacpi_notify_wq);
1744 destroy_workqueue(kacpi_hotplug_wq);
1745
1746 return AE_OK;
1747 }
1748
acpi_os_prepare_sleep(u8 sleep_state,u32 pm1a_control,u32 pm1b_control)1749 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1750 u32 pm1b_control)
1751 {
1752 int rc = 0;
1753
1754 if (__acpi_os_prepare_sleep)
1755 rc = __acpi_os_prepare_sleep(sleep_state,
1756 pm1a_control, pm1b_control);
1757 if (rc < 0)
1758 return AE_ERROR;
1759 else if (rc > 0)
1760 return AE_CTRL_TERMINATE;
1761
1762 return AE_OK;
1763 }
1764
acpi_os_set_prepare_sleep(int (* func)(u8 sleep_state,u32 pm1a_ctrl,u32 pm1b_ctrl))1765 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1766 u32 pm1a_ctrl, u32 pm1b_ctrl))
1767 {
1768 __acpi_os_prepare_sleep = func;
1769 }
1770
1771 #if (ACPI_REDUCED_HARDWARE)
acpi_os_prepare_extended_sleep(u8 sleep_state,u32 val_a,u32 val_b)1772 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1773 u32 val_b)
1774 {
1775 int rc = 0;
1776
1777 if (__acpi_os_prepare_extended_sleep)
1778 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1779 val_a, val_b);
1780 if (rc < 0)
1781 return AE_ERROR;
1782 else if (rc > 0)
1783 return AE_CTRL_TERMINATE;
1784
1785 return AE_OK;
1786 }
1787 #else
acpi_os_prepare_extended_sleep(u8 sleep_state,u32 val_a,u32 val_b)1788 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1789 u32 val_b)
1790 {
1791 return AE_OK;
1792 }
1793 #endif
1794
acpi_os_set_prepare_extended_sleep(int (* func)(u8 sleep_state,u32 val_a,u32 val_b))1795 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1796 u32 val_a, u32 val_b))
1797 {
1798 __acpi_os_prepare_extended_sleep = func;
1799 }
1800
acpi_os_enter_sleep(u8 sleep_state,u32 reg_a_value,u32 reg_b_value)1801 acpi_status acpi_os_enter_sleep(u8 sleep_state,
1802 u32 reg_a_value, u32 reg_b_value)
1803 {
1804 acpi_status status;
1805
1806 if (acpi_gbl_reduced_hardware)
1807 status = acpi_os_prepare_extended_sleep(sleep_state,
1808 reg_a_value,
1809 reg_b_value);
1810 else
1811 status = acpi_os_prepare_sleep(sleep_state,
1812 reg_a_value, reg_b_value);
1813 return status;
1814 }
1815