xref: /linux/drivers/acpi/osl.c (revision ed3174d93c342b8b2eeba6bbd124707d55304a7b)
1 /*
2  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3  *
4  *  Copyright (C) 2000       Andrew Henroid
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *
8  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9  *
10  *  This program is free software; you can redistribute it and/or modify
11  *  it under the terms of the GNU General Public License as published by
12  *  the Free Software Foundation; either version 2 of the License, or
13  *  (at your option) any later version.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to the Free Software
22  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  *
26  */
27 
28 #include <linux/module.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/mm.h>
32 #include <linux/pci.h>
33 #include <linux/interrupt.h>
34 #include <linux/kmod.h>
35 #include <linux/delay.h>
36 #include <linux/dmi.h>
37 #include <linux/workqueue.h>
38 #include <linux/nmi.h>
39 #include <linux/acpi.h>
40 #include <acpi/acpi.h>
41 #include <asm/io.h>
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
44 #include <asm/uaccess.h>
45 
46 #include <linux/efi.h>
47 #include <linux/ioport.h>
48 #include <linux/list.h>
49 
50 #define _COMPONENT		ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52 #define PREFIX		"ACPI: "
53 struct acpi_os_dpc {
54 	acpi_osd_exec_callback function;
55 	void *context;
56 	struct work_struct work;
57 };
58 
59 #ifdef CONFIG_ACPI_CUSTOM_DSDT
60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
61 #endif
62 
63 #ifdef ENABLE_DEBUGGER
64 #include <linux/kdb.h>
65 
66 /* stuff for debugger support */
67 int acpi_in_debugger;
68 EXPORT_SYMBOL(acpi_in_debugger);
69 
70 extern char line_buf[80];
71 #endif				/*ENABLE_DEBUGGER */
72 
73 static unsigned int acpi_irq_irq;
74 static acpi_osd_handler acpi_irq_handler;
75 static void *acpi_irq_context;
76 static struct workqueue_struct *kacpid_wq;
77 static struct workqueue_struct *kacpi_notify_wq;
78 
79 struct acpi_res_list {
80 	resource_size_t start;
81 	resource_size_t end;
82 	acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
83 	char name[5];   /* only can have a length of 4 chars, make use of this
84 			   one instead of res->name, no need to kalloc then */
85 	struct list_head resource_list;
86 };
87 
88 static LIST_HEAD(resource_list_head);
89 static DEFINE_SPINLOCK(acpi_res_lock);
90 
91 #define	OSI_STRING_LENGTH_MAX 64	/* arbitrary */
92 static char osi_additional_string[OSI_STRING_LENGTH_MAX];
93 
94 #ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
95 static int acpi_no_initrd_override;
96 #endif
97 
98 /*
99  * "Ode to _OSI(Linux)"
100  *
101  * osi_linux -- Control response to BIOS _OSI(Linux) query.
102  *
103  * As Linux evolves, the features that it supports change.
104  * So an OSI string such as "Linux" is not specific enough
105  * to be useful across multiple versions of Linux.  It
106  * doesn't identify any particular feature, interface,
107  * or even any particular version of Linux...
108  *
109  * Unfortunately, Linux-2.6.22 and earlier responded "yes"
110  * to a BIOS _OSI(Linux) query.  When
111  * a reference mobile BIOS started using it, its use
112  * started to spread to many vendor platforms.
113  * As it is not supportable, we need to halt that spread.
114  *
115  * Today, most BIOS references to _OSI(Linux) are noise --
116  * they have no functional effect and are just dead code
117  * carried over from the reference BIOS.
118  *
119  * The next most common case is that _OSI(Linux) harms Linux,
120  * usually by causing the BIOS to follow paths that are
121  * not tested during Windows validation.
122  *
123  * Finally, there is a short list of platforms
124  * where OSI(Linux) benefits Linux.
125  *
126  * In Linux-2.6.23, OSI(Linux) is first disabled by default.
127  * DMI is used to disable the dmesg warning about OSI(Linux)
128  * on platforms where it is known to have no effect.
129  * But a dmesg warning remains for systems where
130  * we do not know if OSI(Linux) is good or bad for the system.
131  * DMI is also used to enable OSI(Linux) for the machines
132  * that are known to need it.
133  *
134  * BIOS writers should NOT query _OSI(Linux) on future systems.
135  * It will be ignored by default, and to get Linux to
136  * not ignore it will require a kernel source update to
137  * add a DMI entry, or a boot-time "acpi_osi=Linux" invocation.
138  */
139 #define OSI_LINUX_ENABLE 0
140 
141 static struct osi_linux {
142 	unsigned int	enable:1;
143 	unsigned int	dmi:1;
144 	unsigned int	cmdline:1;
145 	unsigned int	known:1;
146 } osi_linux = { OSI_LINUX_ENABLE, 0, 0, 0};
147 
148 static void __init acpi_request_region (struct acpi_generic_address *addr,
149 	unsigned int length, char *desc)
150 {
151 	struct resource *res;
152 
153 	if (!addr->address || !length)
154 		return;
155 
156 	if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
157 		res = request_region(addr->address, length, desc);
158 	else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
159 		res = request_mem_region(addr->address, length, desc);
160 }
161 
162 static int __init acpi_reserve_resources(void)
163 {
164 	acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
165 		"ACPI PM1a_EVT_BLK");
166 
167 	acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
168 		"ACPI PM1b_EVT_BLK");
169 
170 	acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
171 		"ACPI PM1a_CNT_BLK");
172 
173 	acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
174 		"ACPI PM1b_CNT_BLK");
175 
176 	if (acpi_gbl_FADT.pm_timer_length == 4)
177 		acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
178 
179 	acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
180 		"ACPI PM2_CNT_BLK");
181 
182 	/* Length of GPE blocks must be a non-negative multiple of 2 */
183 
184 	if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
185 		acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
186 			       acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
187 
188 	if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
189 		acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
190 			       acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
191 
192 	return 0;
193 }
194 device_initcall(acpi_reserve_resources);
195 
196 acpi_status __init acpi_os_initialize(void)
197 {
198 	return AE_OK;
199 }
200 
201 acpi_status acpi_os_initialize1(void)
202 {
203 	kacpid_wq = create_singlethread_workqueue("kacpid");
204 	kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
205 	BUG_ON(!kacpid_wq);
206 	BUG_ON(!kacpi_notify_wq);
207 	return AE_OK;
208 }
209 
210 acpi_status acpi_os_terminate(void)
211 {
212 	if (acpi_irq_handler) {
213 		acpi_os_remove_interrupt_handler(acpi_irq_irq,
214 						 acpi_irq_handler);
215 	}
216 
217 	destroy_workqueue(kacpid_wq);
218 	destroy_workqueue(kacpi_notify_wq);
219 
220 	return AE_OK;
221 }
222 
223 void acpi_os_printf(const char *fmt, ...)
224 {
225 	va_list args;
226 	va_start(args, fmt);
227 	acpi_os_vprintf(fmt, args);
228 	va_end(args);
229 }
230 
231 void acpi_os_vprintf(const char *fmt, va_list args)
232 {
233 	static char buffer[512];
234 
235 	vsprintf(buffer, fmt, args);
236 
237 #ifdef ENABLE_DEBUGGER
238 	if (acpi_in_debugger) {
239 		kdb_printf("%s", buffer);
240 	} else {
241 		printk("%s", buffer);
242 	}
243 #else
244 	printk("%s", buffer);
245 #endif
246 }
247 
248 acpi_physical_address __init acpi_os_get_root_pointer(void)
249 {
250 	if (efi_enabled) {
251 		if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
252 			return efi.acpi20;
253 		else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
254 			return efi.acpi;
255 		else {
256 			printk(KERN_ERR PREFIX
257 			       "System description tables not found\n");
258 			return 0;
259 		}
260 	} else {
261 		acpi_physical_address pa = 0;
262 
263 		acpi_find_root_pointer(&pa);
264 		return pa;
265 	}
266 }
267 
268 void __iomem *__init_refok
269 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
270 {
271 	if (phys > ULONG_MAX) {
272 		printk(KERN_ERR PREFIX "Cannot map memory that high\n");
273 		return NULL;
274 	}
275 	if (acpi_gbl_permanent_mmap)
276 		/*
277 		* ioremap checks to ensure this is in reserved space
278 		*/
279 		return ioremap((unsigned long)phys, size);
280 	else
281 		return __acpi_map_table((unsigned long)phys, size);
282 }
283 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
284 
285 void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
286 {
287 	if (acpi_gbl_permanent_mmap) {
288 		iounmap(virt);
289 	}
290 }
291 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
292 
293 #ifdef ACPI_FUTURE_USAGE
294 acpi_status
295 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
296 {
297 	if (!phys || !virt)
298 		return AE_BAD_PARAMETER;
299 
300 	*phys = virt_to_phys(virt);
301 
302 	return AE_OK;
303 }
304 #endif
305 
306 #define ACPI_MAX_OVERRIDE_LEN 100
307 
308 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
309 
310 acpi_status
311 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
312 			    acpi_string * new_val)
313 {
314 	if (!init_val || !new_val)
315 		return AE_BAD_PARAMETER;
316 
317 	*new_val = NULL;
318 	if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
319 		printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
320 		       acpi_os_name);
321 		*new_val = acpi_os_name;
322 	}
323 
324 	return AE_OK;
325 }
326 
327 #ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
328 struct acpi_table_header *acpi_find_dsdt_initrd(void)
329 {
330 	struct file *firmware_file;
331 	mm_segment_t oldfs;
332 	unsigned long len, len2;
333 	struct acpi_table_header *dsdt_buffer, *ret = NULL;
334 	struct kstat stat;
335 	char *ramfs_dsdt_name = "/DSDT.aml";
336 
337 	printk(KERN_INFO PREFIX "Checking initramfs for custom DSDT\n");
338 
339 	/*
340 	 * Never do this at home, only the user-space is allowed to open a file.
341 	 * The clean way would be to use the firmware loader.
342 	 * But this code must be run before there is any userspace available.
343 	 * A static/init firmware infrastructure doesn't exist yet...
344 	 */
345 	if (vfs_stat(ramfs_dsdt_name, &stat) < 0)
346 		return ret;
347 
348 	len = stat.size;
349 	/* check especially against empty files */
350 	if (len <= 4) {
351 		printk(KERN_ERR PREFIX "Failed: DSDT only %lu bytes.\n", len);
352 		return ret;
353 	}
354 
355 	firmware_file = filp_open(ramfs_dsdt_name, O_RDONLY, 0);
356 	if (IS_ERR(firmware_file)) {
357 		printk(KERN_ERR PREFIX "Failed to open %s.\n", ramfs_dsdt_name);
358 		return ret;
359 	}
360 
361 	dsdt_buffer = kmalloc(len, GFP_ATOMIC);
362 	if (!dsdt_buffer) {
363 		printk(KERN_ERR PREFIX "Failed to allocate %lu bytes.\n", len);
364 		goto err;
365 	}
366 
367 	oldfs = get_fs();
368 	set_fs(KERNEL_DS);
369 	len2 = vfs_read(firmware_file, (char __user *)dsdt_buffer, len,
370 		&firmware_file->f_pos);
371 	set_fs(oldfs);
372 	if (len2 < len) {
373 		printk(KERN_ERR PREFIX "Failed to read %lu bytes from %s.\n",
374 			len, ramfs_dsdt_name);
375 		ACPI_FREE(dsdt_buffer);
376 		goto err;
377 	}
378 
379 	printk(KERN_INFO PREFIX "Found %lu byte DSDT in %s.\n",
380 			len, ramfs_dsdt_name);
381 	ret = dsdt_buffer;
382 err:
383 	filp_close(firmware_file, NULL);
384 	return ret;
385 }
386 #endif
387 
388 acpi_status
389 acpi_os_table_override(struct acpi_table_header * existing_table,
390 		       struct acpi_table_header ** new_table)
391 {
392 	if (!existing_table || !new_table)
393 		return AE_BAD_PARAMETER;
394 
395 	*new_table = NULL;
396 
397 #ifdef CONFIG_ACPI_CUSTOM_DSDT
398 	if (strncmp(existing_table->signature, "DSDT", 4) == 0)
399 		*new_table = (struct acpi_table_header *)AmlCode;
400 #endif
401 #ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
402 	if ((strncmp(existing_table->signature, "DSDT", 4) == 0) &&
403 	    !acpi_no_initrd_override) {
404 		struct acpi_table_header *initrd_table;
405 
406 		initrd_table = acpi_find_dsdt_initrd();
407 		if (initrd_table)
408 			*new_table = initrd_table;
409 	}
410 #endif
411 	if (*new_table != NULL) {
412 		printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
413 			   "this is unsafe: tainting kernel\n",
414 		       existing_table->signature,
415 		       existing_table->oem_table_id);
416 		add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
417 	}
418 	return AE_OK;
419 }
420 
421 #ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
422 int __init acpi_no_initrd_override_setup(char *s)
423 {
424 	acpi_no_initrd_override = 1;
425 	return 1;
426 }
427 __setup("acpi_no_initrd_override", acpi_no_initrd_override_setup);
428 #endif
429 
430 static irqreturn_t acpi_irq(int irq, void *dev_id)
431 {
432 	u32 handled;
433 
434 	handled = (*acpi_irq_handler) (acpi_irq_context);
435 
436 	if (handled) {
437 		acpi_irq_handled++;
438 		return IRQ_HANDLED;
439 	} else
440 		return IRQ_NONE;
441 }
442 
443 acpi_status
444 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
445 				  void *context)
446 {
447 	unsigned int irq;
448 
449 	acpi_irq_stats_init();
450 
451 	/*
452 	 * Ignore the GSI from the core, and use the value in our copy of the
453 	 * FADT. It may not be the same if an interrupt source override exists
454 	 * for the SCI.
455 	 */
456 	gsi = acpi_gbl_FADT.sci_interrupt;
457 	if (acpi_gsi_to_irq(gsi, &irq) < 0) {
458 		printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
459 		       gsi);
460 		return AE_OK;
461 	}
462 
463 	acpi_irq_handler = handler;
464 	acpi_irq_context = context;
465 	if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
466 		printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
467 		return AE_NOT_ACQUIRED;
468 	}
469 	acpi_irq_irq = irq;
470 
471 	return AE_OK;
472 }
473 
474 acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
475 {
476 	if (irq) {
477 		free_irq(irq, acpi_irq);
478 		acpi_irq_handler = NULL;
479 		acpi_irq_irq = 0;
480 	}
481 
482 	return AE_OK;
483 }
484 
485 /*
486  * Running in interpreter thread context, safe to sleep
487  */
488 
489 void acpi_os_sleep(acpi_integer ms)
490 {
491 	schedule_timeout_interruptible(msecs_to_jiffies(ms));
492 }
493 
494 void acpi_os_stall(u32 us)
495 {
496 	while (us) {
497 		u32 delay = 1000;
498 
499 		if (delay > us)
500 			delay = us;
501 		udelay(delay);
502 		touch_nmi_watchdog();
503 		us -= delay;
504 	}
505 }
506 
507 /*
508  * Support ACPI 3.0 AML Timer operand
509  * Returns 64-bit free-running, monotonically increasing timer
510  * with 100ns granularity
511  */
512 u64 acpi_os_get_timer(void)
513 {
514 	static u64 t;
515 
516 #ifdef	CONFIG_HPET
517 	/* TBD: use HPET if available */
518 #endif
519 
520 #ifdef	CONFIG_X86_PM_TIMER
521 	/* TBD: default to PM timer if HPET was not available */
522 #endif
523 	if (!t)
524 		printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n");
525 
526 	return ++t;
527 }
528 
529 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
530 {
531 	u32 dummy;
532 
533 	if (!value)
534 		value = &dummy;
535 
536 	*value = 0;
537 	if (width <= 8) {
538 		*(u8 *) value = inb(port);
539 	} else if (width <= 16) {
540 		*(u16 *) value = inw(port);
541 	} else if (width <= 32) {
542 		*(u32 *) value = inl(port);
543 	} else {
544 		BUG();
545 	}
546 
547 	return AE_OK;
548 }
549 
550 EXPORT_SYMBOL(acpi_os_read_port);
551 
552 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
553 {
554 	if (width <= 8) {
555 		outb(value, port);
556 	} else if (width <= 16) {
557 		outw(value, port);
558 	} else if (width <= 32) {
559 		outl(value, port);
560 	} else {
561 		BUG();
562 	}
563 
564 	return AE_OK;
565 }
566 
567 EXPORT_SYMBOL(acpi_os_write_port);
568 
569 acpi_status
570 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
571 {
572 	u32 dummy;
573 	void __iomem *virt_addr;
574 
575 	virt_addr = ioremap(phys_addr, width);
576 	if (!value)
577 		value = &dummy;
578 
579 	switch (width) {
580 	case 8:
581 		*(u8 *) value = readb(virt_addr);
582 		break;
583 	case 16:
584 		*(u16 *) value = readw(virt_addr);
585 		break;
586 	case 32:
587 		*(u32 *) value = readl(virt_addr);
588 		break;
589 	default:
590 		BUG();
591 	}
592 
593 	iounmap(virt_addr);
594 
595 	return AE_OK;
596 }
597 
598 acpi_status
599 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
600 {
601 	void __iomem *virt_addr;
602 
603 	virt_addr = ioremap(phys_addr, width);
604 
605 	switch (width) {
606 	case 8:
607 		writeb(value, virt_addr);
608 		break;
609 	case 16:
610 		writew(value, virt_addr);
611 		break;
612 	case 32:
613 		writel(value, virt_addr);
614 		break;
615 	default:
616 		BUG();
617 	}
618 
619 	iounmap(virt_addr);
620 
621 	return AE_OK;
622 }
623 
624 acpi_status
625 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
626 			       void *value, u32 width)
627 {
628 	int result, size;
629 
630 	if (!value)
631 		return AE_BAD_PARAMETER;
632 
633 	switch (width) {
634 	case 8:
635 		size = 1;
636 		break;
637 	case 16:
638 		size = 2;
639 		break;
640 	case 32:
641 		size = 4;
642 		break;
643 	default:
644 		return AE_ERROR;
645 	}
646 
647 	result = raw_pci_read(pci_id->segment, pci_id->bus,
648 				PCI_DEVFN(pci_id->device, pci_id->function),
649 				reg, size, value);
650 
651 	return (result ? AE_ERROR : AE_OK);
652 }
653 
654 acpi_status
655 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
656 				acpi_integer value, u32 width)
657 {
658 	int result, size;
659 
660 	switch (width) {
661 	case 8:
662 		size = 1;
663 		break;
664 	case 16:
665 		size = 2;
666 		break;
667 	case 32:
668 		size = 4;
669 		break;
670 	default:
671 		return AE_ERROR;
672 	}
673 
674 	result = raw_pci_write(pci_id->segment, pci_id->bus,
675 				PCI_DEVFN(pci_id->device, pci_id->function),
676 				reg, size, value);
677 
678 	return (result ? AE_ERROR : AE_OK);
679 }
680 
681 /* TODO: Change code to take advantage of driver model more */
682 static void acpi_os_derive_pci_id_2(acpi_handle rhandle,	/* upper bound  */
683 				    acpi_handle chandle,	/* current node */
684 				    struct acpi_pci_id **id,
685 				    int *is_bridge, u8 * bus_number)
686 {
687 	acpi_handle handle;
688 	struct acpi_pci_id *pci_id = *id;
689 	acpi_status status;
690 	unsigned long temp;
691 	acpi_object_type type;
692 	u8 tu8;
693 
694 	acpi_get_parent(chandle, &handle);
695 	if (handle != rhandle) {
696 		acpi_os_derive_pci_id_2(rhandle, handle, &pci_id, is_bridge,
697 					bus_number);
698 
699 		status = acpi_get_type(handle, &type);
700 		if ((ACPI_FAILURE(status)) || (type != ACPI_TYPE_DEVICE))
701 			return;
702 
703 		status =
704 		    acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL,
705 					  &temp);
706 		if (ACPI_SUCCESS(status)) {
707 			pci_id->device = ACPI_HIWORD(ACPI_LODWORD(temp));
708 			pci_id->function = ACPI_LOWORD(ACPI_LODWORD(temp));
709 
710 			if (*is_bridge)
711 				pci_id->bus = *bus_number;
712 
713 			/* any nicer way to get bus number of bridge ? */
714 			status =
715 			    acpi_os_read_pci_configuration(pci_id, 0x0e, &tu8,
716 							   8);
717 			if (ACPI_SUCCESS(status)
718 			    && ((tu8 & 0x7f) == 1 || (tu8 & 0x7f) == 2)) {
719 				status =
720 				    acpi_os_read_pci_configuration(pci_id, 0x18,
721 								   &tu8, 8);
722 				if (!ACPI_SUCCESS(status)) {
723 					/* Certainly broken...  FIX ME */
724 					return;
725 				}
726 				*is_bridge = 1;
727 				pci_id->bus = tu8;
728 				status =
729 				    acpi_os_read_pci_configuration(pci_id, 0x19,
730 								   &tu8, 8);
731 				if (ACPI_SUCCESS(status)) {
732 					*bus_number = tu8;
733 				}
734 			} else
735 				*is_bridge = 0;
736 		}
737 	}
738 }
739 
740 void acpi_os_derive_pci_id(acpi_handle rhandle,	/* upper bound  */
741 			   acpi_handle chandle,	/* current node */
742 			   struct acpi_pci_id **id)
743 {
744 	int is_bridge = 1;
745 	u8 bus_number = (*id)->bus;
746 
747 	acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number);
748 }
749 
750 static void acpi_os_execute_deferred(struct work_struct *work)
751 {
752 	struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
753 	if (!dpc) {
754 		printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
755 		return;
756 	}
757 
758 	dpc->function(dpc->context);
759 	kfree(dpc);
760 
761 	return;
762 }
763 
764 /*******************************************************************************
765  *
766  * FUNCTION:    acpi_os_execute
767  *
768  * PARAMETERS:  Type               - Type of the callback
769  *              Function           - Function to be executed
770  *              Context            - Function parameters
771  *
772  * RETURN:      Status
773  *
774  * DESCRIPTION: Depending on type, either queues function for deferred execution or
775  *              immediately executes function on a separate thread.
776  *
777  ******************************************************************************/
778 
779 acpi_status acpi_os_execute(acpi_execute_type type,
780 			    acpi_osd_exec_callback function, void *context)
781 {
782 	acpi_status status = AE_OK;
783 	struct acpi_os_dpc *dpc;
784 	struct workqueue_struct *queue;
785 	ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
786 			  "Scheduling function [%p(%p)] for deferred execution.\n",
787 			  function, context));
788 
789 	if (!function)
790 		return AE_BAD_PARAMETER;
791 
792 	/*
793 	 * Allocate/initialize DPC structure.  Note that this memory will be
794 	 * freed by the callee.  The kernel handles the work_struct list  in a
795 	 * way that allows us to also free its memory inside the callee.
796 	 * Because we may want to schedule several tasks with different
797 	 * parameters we can't use the approach some kernel code uses of
798 	 * having a static work_struct.
799 	 */
800 
801 	dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
802 	if (!dpc)
803 		return_ACPI_STATUS(AE_NO_MEMORY);
804 
805 	dpc->function = function;
806 	dpc->context = context;
807 
808 	INIT_WORK(&dpc->work, acpi_os_execute_deferred);
809 	queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq;
810 	if (!queue_work(queue, &dpc->work)) {
811 		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
812 			  "Call to queue_work() failed.\n"));
813 		status = AE_ERROR;
814 		kfree(dpc);
815 	}
816 	return_ACPI_STATUS(status);
817 }
818 
819 EXPORT_SYMBOL(acpi_os_execute);
820 
821 void acpi_os_wait_events_complete(void *context)
822 {
823 	flush_workqueue(kacpid_wq);
824 }
825 
826 EXPORT_SYMBOL(acpi_os_wait_events_complete);
827 
828 /*
829  * Allocate the memory for a spinlock and initialize it.
830  */
831 acpi_status acpi_os_create_lock(acpi_spinlock * handle)
832 {
833 	spin_lock_init(*handle);
834 
835 	return AE_OK;
836 }
837 
838 /*
839  * Deallocate the memory for a spinlock.
840  */
841 void acpi_os_delete_lock(acpi_spinlock handle)
842 {
843 	return;
844 }
845 
846 acpi_status
847 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
848 {
849 	struct semaphore *sem = NULL;
850 
851 
852 	sem = acpi_os_allocate(sizeof(struct semaphore));
853 	if (!sem)
854 		return AE_NO_MEMORY;
855 	memset(sem, 0, sizeof(struct semaphore));
856 
857 	sema_init(sem, initial_units);
858 
859 	*handle = (acpi_handle *) sem;
860 
861 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
862 			  *handle, initial_units));
863 
864 	return AE_OK;
865 }
866 
867 /*
868  * TODO: A better way to delete semaphores?  Linux doesn't have a
869  * 'delete_semaphore()' function -- may result in an invalid
870  * pointer dereference for non-synchronized consumers.	Should
871  * we at least check for blocked threads and signal/cancel them?
872  */
873 
874 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
875 {
876 	struct semaphore *sem = (struct semaphore *)handle;
877 
878 
879 	if (!sem)
880 		return AE_BAD_PARAMETER;
881 
882 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
883 
884 	kfree(sem);
885 	sem = NULL;
886 
887 	return AE_OK;
888 }
889 
890 /*
891  * TODO: The kernel doesn't have a 'down_timeout' function -- had to
892  * improvise.  The process is to sleep for one scheduler quantum
893  * until the semaphore becomes available.  Downside is that this
894  * may result in starvation for timeout-based waits when there's
895  * lots of semaphore activity.
896  *
897  * TODO: Support for units > 1?
898  */
899 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
900 {
901 	acpi_status status = AE_OK;
902 	struct semaphore *sem = (struct semaphore *)handle;
903 	int ret = 0;
904 
905 
906 	if (!sem || (units < 1))
907 		return AE_BAD_PARAMETER;
908 
909 	if (units > 1)
910 		return AE_SUPPORT;
911 
912 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
913 			  handle, units, timeout));
914 
915 	/*
916 	 * This can be called during resume with interrupts off.
917 	 * Like boot-time, we should be single threaded and will
918 	 * always get the lock if we try -- timeout or not.
919 	 * If this doesn't succeed, then we will oops courtesy of
920 	 * might_sleep() in down().
921 	 */
922 	if (!down_trylock(sem))
923 		return AE_OK;
924 
925 	switch (timeout) {
926 		/*
927 		 * No Wait:
928 		 * --------
929 		 * A zero timeout value indicates that we shouldn't wait - just
930 		 * acquire the semaphore if available otherwise return AE_TIME
931 		 * (a.k.a. 'would block').
932 		 */
933 	case 0:
934 		if (down_trylock(sem))
935 			status = AE_TIME;
936 		break;
937 
938 		/*
939 		 * Wait Indefinitely:
940 		 * ------------------
941 		 */
942 	case ACPI_WAIT_FOREVER:
943 		down(sem);
944 		break;
945 
946 		/*
947 		 * Wait w/ Timeout:
948 		 * ----------------
949 		 */
950 	default:
951 		// TODO: A better timeout algorithm?
952 		{
953 			int i = 0;
954 			static const int quantum_ms = 1000 / HZ;
955 
956 			ret = down_trylock(sem);
957 			for (i = timeout; (i > 0 && ret != 0); i -= quantum_ms) {
958 				schedule_timeout_interruptible(1);
959 				ret = down_trylock(sem);
960 			}
961 
962 			if (ret != 0)
963 				status = AE_TIME;
964 		}
965 		break;
966 	}
967 
968 	if (ACPI_FAILURE(status)) {
969 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
970 				  "Failed to acquire semaphore[%p|%d|%d], %s",
971 				  handle, units, timeout,
972 				  acpi_format_exception(status)));
973 	} else {
974 		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
975 				  "Acquired semaphore[%p|%d|%d]", handle,
976 				  units, timeout));
977 	}
978 
979 	return status;
980 }
981 
982 /*
983  * TODO: Support for units > 1?
984  */
985 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
986 {
987 	struct semaphore *sem = (struct semaphore *)handle;
988 
989 
990 	if (!sem || (units < 1))
991 		return AE_BAD_PARAMETER;
992 
993 	if (units > 1)
994 		return AE_SUPPORT;
995 
996 	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
997 			  units));
998 
999 	up(sem);
1000 
1001 	return AE_OK;
1002 }
1003 
1004 #ifdef ACPI_FUTURE_USAGE
1005 u32 acpi_os_get_line(char *buffer)
1006 {
1007 
1008 #ifdef ENABLE_DEBUGGER
1009 	if (acpi_in_debugger) {
1010 		u32 chars;
1011 
1012 		kdb_read(buffer, sizeof(line_buf));
1013 
1014 		/* remove the CR kdb includes */
1015 		chars = strlen(buffer) - 1;
1016 		buffer[chars] = '\0';
1017 	}
1018 #endif
1019 
1020 	return 0;
1021 }
1022 #endif				/*  ACPI_FUTURE_USAGE  */
1023 
1024 acpi_status acpi_os_signal(u32 function, void *info)
1025 {
1026 	switch (function) {
1027 	case ACPI_SIGNAL_FATAL:
1028 		printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1029 		break;
1030 	case ACPI_SIGNAL_BREAKPOINT:
1031 		/*
1032 		 * AML Breakpoint
1033 		 * ACPI spec. says to treat it as a NOP unless
1034 		 * you are debugging.  So if/when we integrate
1035 		 * AML debugger into the kernel debugger its
1036 		 * hook will go here.  But until then it is
1037 		 * not useful to print anything on breakpoints.
1038 		 */
1039 		break;
1040 	default:
1041 		break;
1042 	}
1043 
1044 	return AE_OK;
1045 }
1046 
1047 static int __init acpi_os_name_setup(char *str)
1048 {
1049 	char *p = acpi_os_name;
1050 	int count = ACPI_MAX_OVERRIDE_LEN - 1;
1051 
1052 	if (!str || !*str)
1053 		return 0;
1054 
1055 	for (; count-- && str && *str; str++) {
1056 		if (isalnum(*str) || *str == ' ' || *str == ':')
1057 			*p++ = *str;
1058 		else if (*str == '\'' || *str == '"')
1059 			continue;
1060 		else
1061 			break;
1062 	}
1063 	*p = 0;
1064 
1065 	return 1;
1066 
1067 }
1068 
1069 __setup("acpi_os_name=", acpi_os_name_setup);
1070 
1071 static void __init set_osi_linux(unsigned int enable)
1072 {
1073 	if (osi_linux.enable != enable) {
1074 		osi_linux.enable = enable;
1075 		printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n",
1076 			enable ? "Add": "Delet");
1077 	}
1078 	return;
1079 }
1080 
1081 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1082 {
1083 	osi_linux.cmdline = 1;	/* cmdline set the default */
1084 	set_osi_linux(enable);
1085 
1086 	return;
1087 }
1088 
1089 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1090 {
1091 	osi_linux.dmi = 1;	/* DMI knows that this box asks OSI(Linux) */
1092 
1093 	printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1094 
1095 	if (enable == -1)
1096 		return;
1097 
1098 	osi_linux.known = 1;	/* DMI knows which OSI(Linux) default needed */
1099 
1100 	set_osi_linux(enable);
1101 
1102 	return;
1103 }
1104 
1105 /*
1106  * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1107  *
1108  * empty string disables _OSI
1109  * string starting with '!' disables that string
1110  * otherwise string is added to list, augmenting built-in strings
1111  */
1112 static int __init acpi_osi_setup(char *str)
1113 {
1114 	if (str == NULL || *str == '\0') {
1115 		printk(KERN_INFO PREFIX "_OSI method disabled\n");
1116 		acpi_gbl_create_osi_method = FALSE;
1117 	} else if (!strcmp("!Linux", str)) {
1118 		acpi_cmdline_osi_linux(0);	/* !enable */
1119 	} else if (*str == '!') {
1120 		if (acpi_osi_invalidate(++str) == AE_OK)
1121 			printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1122 	} else if (!strcmp("Linux", str)) {
1123 		acpi_cmdline_osi_linux(1);	/* enable */
1124 	} else if (*osi_additional_string == '\0') {
1125 		strncpy(osi_additional_string, str, OSI_STRING_LENGTH_MAX);
1126 		printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1127 	}
1128 
1129 	return 1;
1130 }
1131 
1132 __setup("acpi_osi=", acpi_osi_setup);
1133 
1134 /* enable serialization to combat AE_ALREADY_EXISTS errors */
1135 static int __init acpi_serialize_setup(char *str)
1136 {
1137 	printk(KERN_INFO PREFIX "serialize enabled\n");
1138 
1139 	acpi_gbl_all_methods_serialized = TRUE;
1140 
1141 	return 1;
1142 }
1143 
1144 __setup("acpi_serialize", acpi_serialize_setup);
1145 
1146 /*
1147  * Wake and Run-Time GPES are expected to be separate.
1148  * We disable wake-GPEs at run-time to prevent spurious
1149  * interrupts.
1150  *
1151  * However, if a system exists that shares Wake and
1152  * Run-time events on the same GPE this flag is available
1153  * to tell Linux to keep the wake-time GPEs enabled at run-time.
1154  */
1155 static int __init acpi_wake_gpes_always_on_setup(char *str)
1156 {
1157 	printk(KERN_INFO PREFIX "wake GPEs not disabled\n");
1158 
1159 	acpi_gbl_leave_wake_gpes_disabled = FALSE;
1160 
1161 	return 1;
1162 }
1163 
1164 __setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
1165 
1166 /* Check of resource interference between native drivers and ACPI
1167  * OperationRegions (SystemIO and System Memory only).
1168  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1169  * in arbitrary AML code and can interfere with legacy drivers.
1170  * acpi_enforce_resources= can be set to:
1171  *
1172  *   - strict           (2)
1173  *     -> further driver trying to access the resources will not load
1174  *   - lax (default)    (1)
1175  *     -> further driver trying to access the resources will load, but you
1176  *     get a system message that something might go wrong...
1177  *
1178  *   - no               (0)
1179  *     -> ACPI Operation Region resources will not be registered
1180  *
1181  */
1182 #define ENFORCE_RESOURCES_STRICT 2
1183 #define ENFORCE_RESOURCES_LAX    1
1184 #define ENFORCE_RESOURCES_NO     0
1185 
1186 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1187 
1188 static int __init acpi_enforce_resources_setup(char *str)
1189 {
1190 	if (str == NULL || *str == '\0')
1191 		return 0;
1192 
1193 	if (!strcmp("strict", str))
1194 		acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1195 	else if (!strcmp("lax", str))
1196 		acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1197 	else if (!strcmp("no", str))
1198 		acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1199 
1200 	return 1;
1201 }
1202 
1203 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1204 
1205 /* Check for resource conflicts between ACPI OperationRegions and native
1206  * drivers */
1207 int acpi_check_resource_conflict(struct resource *res)
1208 {
1209 	struct acpi_res_list *res_list_elem;
1210 	int ioport;
1211 	int clash = 0;
1212 
1213 	if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1214 		return 0;
1215 	if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1216 		return 0;
1217 
1218 	ioport = res->flags & IORESOURCE_IO;
1219 
1220 	spin_lock(&acpi_res_lock);
1221 	list_for_each_entry(res_list_elem, &resource_list_head,
1222 			    resource_list) {
1223 		if (ioport && (res_list_elem->resource_type
1224 			       != ACPI_ADR_SPACE_SYSTEM_IO))
1225 			continue;
1226 		if (!ioport && (res_list_elem->resource_type
1227 				!= ACPI_ADR_SPACE_SYSTEM_MEMORY))
1228 			continue;
1229 
1230 		if (res->end < res_list_elem->start
1231 		    || res_list_elem->end < res->start)
1232 			continue;
1233 		clash = 1;
1234 		break;
1235 	}
1236 	spin_unlock(&acpi_res_lock);
1237 
1238 	if (clash) {
1239 		if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1240 			printk(KERN_INFO "%sACPI: %s resource %s [0x%llx-0x%llx]"
1241 			       " conflicts with ACPI region %s"
1242 			       " [0x%llx-0x%llx]\n",
1243 			       acpi_enforce_resources == ENFORCE_RESOURCES_LAX
1244 			       ? KERN_WARNING : KERN_ERR,
1245 			       ioport ? "I/O" : "Memory", res->name,
1246 			       (long long) res->start, (long long) res->end,
1247 			       res_list_elem->name,
1248 			       (long long) res_list_elem->start,
1249 			       (long long) res_list_elem->end);
1250 			printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
1251 		}
1252 		if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1253 			return -EBUSY;
1254 	}
1255 	return 0;
1256 }
1257 EXPORT_SYMBOL(acpi_check_resource_conflict);
1258 
1259 int acpi_check_region(resource_size_t start, resource_size_t n,
1260 		      const char *name)
1261 {
1262 	struct resource res = {
1263 		.start = start,
1264 		.end   = start + n - 1,
1265 		.name  = name,
1266 		.flags = IORESOURCE_IO,
1267 	};
1268 
1269 	return acpi_check_resource_conflict(&res);
1270 }
1271 EXPORT_SYMBOL(acpi_check_region);
1272 
1273 int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1274 		      const char *name)
1275 {
1276 	struct resource res = {
1277 		.start = start,
1278 		.end   = start + n - 1,
1279 		.name  = name,
1280 		.flags = IORESOURCE_MEM,
1281 	};
1282 
1283 	return acpi_check_resource_conflict(&res);
1284 
1285 }
1286 EXPORT_SYMBOL(acpi_check_mem_region);
1287 
1288 /*
1289  * Acquire a spinlock.
1290  *
1291  * handle is a pointer to the spinlock_t.
1292  */
1293 
1294 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1295 {
1296 	acpi_cpu_flags flags;
1297 	spin_lock_irqsave(lockp, flags);
1298 	return flags;
1299 }
1300 
1301 /*
1302  * Release a spinlock. See above.
1303  */
1304 
1305 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1306 {
1307 	spin_unlock_irqrestore(lockp, flags);
1308 }
1309 
1310 #ifndef ACPI_USE_LOCAL_CACHE
1311 
1312 /*******************************************************************************
1313  *
1314  * FUNCTION:    acpi_os_create_cache
1315  *
1316  * PARAMETERS:  name      - Ascii name for the cache
1317  *              size      - Size of each cached object
1318  *              depth     - Maximum depth of the cache (in objects) <ignored>
1319  *              cache     - Where the new cache object is returned
1320  *
1321  * RETURN:      status
1322  *
1323  * DESCRIPTION: Create a cache object
1324  *
1325  ******************************************************************************/
1326 
1327 acpi_status
1328 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1329 {
1330 	*cache = kmem_cache_create(name, size, 0, 0, NULL);
1331 	if (*cache == NULL)
1332 		return AE_ERROR;
1333 	else
1334 		return AE_OK;
1335 }
1336 
1337 /*******************************************************************************
1338  *
1339  * FUNCTION:    acpi_os_purge_cache
1340  *
1341  * PARAMETERS:  Cache           - Handle to cache object
1342  *
1343  * RETURN:      Status
1344  *
1345  * DESCRIPTION: Free all objects within the requested cache.
1346  *
1347  ******************************************************************************/
1348 
1349 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1350 {
1351 	kmem_cache_shrink(cache);
1352 	return (AE_OK);
1353 }
1354 
1355 /*******************************************************************************
1356  *
1357  * FUNCTION:    acpi_os_delete_cache
1358  *
1359  * PARAMETERS:  Cache           - Handle to cache object
1360  *
1361  * RETURN:      Status
1362  *
1363  * DESCRIPTION: Free all objects within the requested cache and delete the
1364  *              cache object.
1365  *
1366  ******************************************************************************/
1367 
1368 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1369 {
1370 	kmem_cache_destroy(cache);
1371 	return (AE_OK);
1372 }
1373 
1374 /*******************************************************************************
1375  *
1376  * FUNCTION:    acpi_os_release_object
1377  *
1378  * PARAMETERS:  Cache       - Handle to cache object
1379  *              Object      - The object to be released
1380  *
1381  * RETURN:      None
1382  *
1383  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1384  *              the object is deleted.
1385  *
1386  ******************************************************************************/
1387 
1388 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1389 {
1390 	kmem_cache_free(cache, object);
1391 	return (AE_OK);
1392 }
1393 
1394 /**
1395  *	acpi_dmi_dump - dump DMI slots needed for blacklist entry
1396  *
1397  *	Returns 0 on success
1398  */
1399 static int acpi_dmi_dump(void)
1400 {
1401 
1402 	if (!dmi_available)
1403 		return -1;
1404 
1405 	printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
1406 		dmi_get_system_info(DMI_SYS_VENDOR));
1407 	printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
1408 		dmi_get_system_info(DMI_PRODUCT_NAME));
1409 	printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
1410 		dmi_get_system_info(DMI_PRODUCT_VERSION));
1411 	printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
1412 		dmi_get_system_info(DMI_BOARD_NAME));
1413 	printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
1414 		dmi_get_system_info(DMI_BIOS_VENDOR));
1415 	printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
1416 		dmi_get_system_info(DMI_BIOS_DATE));
1417 
1418 	return 0;
1419 }
1420 
1421 
1422 /******************************************************************************
1423  *
1424  * FUNCTION:    acpi_os_validate_interface
1425  *
1426  * PARAMETERS:  interface           - Requested interface to be validated
1427  *
1428  * RETURN:      AE_OK if interface is supported, AE_SUPPORT otherwise
1429  *
1430  * DESCRIPTION: Match an interface string to the interfaces supported by the
1431  *              host. Strings originate from an AML call to the _OSI method.
1432  *
1433  *****************************************************************************/
1434 
1435 acpi_status
1436 acpi_os_validate_interface (char *interface)
1437 {
1438 	if (!strncmp(osi_additional_string, interface, OSI_STRING_LENGTH_MAX))
1439 		return AE_OK;
1440 	if (!strcmp("Linux", interface)) {
1441 
1442 		printk(KERN_NOTICE PREFIX
1443 			"BIOS _OSI(Linux) query %s%s\n",
1444 			osi_linux.enable ? "honored" : "ignored",
1445 			osi_linux.cmdline ? " via cmdline" :
1446 			osi_linux.dmi ? " via DMI" : "");
1447 
1448 		if (!osi_linux.dmi) {
1449 			if (acpi_dmi_dump())
1450 				printk(KERN_NOTICE PREFIX
1451 					"[please extract dmidecode output]\n");
1452 			printk(KERN_NOTICE PREFIX
1453 				"Please send DMI info above to "
1454 				"linux-acpi@vger.kernel.org\n");
1455 		}
1456 		if (!osi_linux.known && !osi_linux.cmdline) {
1457 			printk(KERN_NOTICE PREFIX
1458 				"If \"acpi_osi=%sLinux\" works better, "
1459 				"please notify linux-acpi@vger.kernel.org\n",
1460 				osi_linux.enable ? "!" : "");
1461 		}
1462 
1463 		if (osi_linux.enable)
1464 			return AE_OK;
1465 	}
1466 	return AE_SUPPORT;
1467 }
1468 
1469 /******************************************************************************
1470  *
1471  * FUNCTION:    acpi_os_validate_address
1472  *
1473  * PARAMETERS:  space_id             - ACPI space ID
1474  *              address             - Physical address
1475  *              length              - Address length
1476  *
1477  * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
1478  *              should return AE_AML_ILLEGAL_ADDRESS.
1479  *
1480  * DESCRIPTION: Validate a system address via the host OS. Used to validate
1481  *              the addresses accessed by AML operation regions.
1482  *
1483  *****************************************************************************/
1484 
1485 acpi_status
1486 acpi_os_validate_address (
1487     u8                   space_id,
1488     acpi_physical_address   address,
1489     acpi_size               length,
1490     char *name)
1491 {
1492 	struct acpi_res_list *res;
1493 	if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1494 		return AE_OK;
1495 
1496 	switch (space_id) {
1497 	case ACPI_ADR_SPACE_SYSTEM_IO:
1498 	case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1499 		/* Only interference checks against SystemIO and SytemMemory
1500 		   are needed */
1501 		res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1502 		if (!res)
1503 			return AE_OK;
1504 		/* ACPI names are fixed to 4 bytes, still better use strlcpy */
1505 		strlcpy(res->name, name, 5);
1506 		res->start = address;
1507 		res->end = address + length - 1;
1508 		res->resource_type = space_id;
1509 		spin_lock(&acpi_res_lock);
1510 		list_add(&res->resource_list, &resource_list_head);
1511 		spin_unlock(&acpi_res_lock);
1512 		pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
1513 			 "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
1514 			 ? "SystemIO" : "System Memory",
1515 			 (unsigned long long)res->start,
1516 			 (unsigned long long)res->end,
1517 			 res->name);
1518 		break;
1519 	case ACPI_ADR_SPACE_PCI_CONFIG:
1520 	case ACPI_ADR_SPACE_EC:
1521 	case ACPI_ADR_SPACE_SMBUS:
1522 	case ACPI_ADR_SPACE_CMOS:
1523 	case ACPI_ADR_SPACE_PCI_BAR_TARGET:
1524 	case ACPI_ADR_SPACE_DATA_TABLE:
1525 	case ACPI_ADR_SPACE_FIXED_HARDWARE:
1526 		break;
1527 	}
1528 	return AE_OK;
1529 }
1530 
1531 #endif
1532