xref: /linux/drivers/acpi/processor_core.c (revision 93d546399c2b7d66a54d5fbd5eee17de19246bf6)
1 /*
2  * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *  TBD:
28  *	1. Make # power states dynamic.
29  *	2. Support duty_cycle values that span bit 4.
30  *	3. Optimize by having scheduler determine business instead of
31  *	   having us try to calculate it here.
32  *	4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/dmi.h>
46 #include <linux/moduleparam.h>
47 #include <linux/cpuidle.h>
48 
49 #include <asm/io.h>
50 #include <asm/system.h>
51 #include <asm/cpu.h>
52 #include <asm/delay.h>
53 #include <asm/uaccess.h>
54 #include <asm/processor.h>
55 #include <asm/smp.h>
56 #include <asm/acpi.h>
57 
58 #include <acpi/acpi_bus.h>
59 #include <acpi/acpi_drivers.h>
60 #include <acpi/processor.h>
61 
62 #define ACPI_PROCESSOR_CLASS		"processor"
63 #define ACPI_PROCESSOR_DEVICE_NAME	"Processor"
64 #define ACPI_PROCESSOR_FILE_INFO	"info"
65 #define ACPI_PROCESSOR_FILE_THROTTLING	"throttling"
66 #define ACPI_PROCESSOR_FILE_LIMIT	"limit"
67 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
68 #define ACPI_PROCESSOR_NOTIFY_POWER	0x81
69 #define ACPI_PROCESSOR_NOTIFY_THROTTLING	0x82
70 
71 #define ACPI_PROCESSOR_LIMIT_USER	0
72 #define ACPI_PROCESSOR_LIMIT_THERMAL	1
73 
74 #define _COMPONENT		ACPI_PROCESSOR_COMPONENT
75 ACPI_MODULE_NAME("processor_core");
76 
77 MODULE_AUTHOR("Paul Diefenbaugh");
78 MODULE_DESCRIPTION("ACPI Processor Driver");
79 MODULE_LICENSE("GPL");
80 
81 static int acpi_processor_add(struct acpi_device *device);
82 static int acpi_processor_start(struct acpi_device *device);
83 static int acpi_processor_remove(struct acpi_device *device, int type);
84 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
85 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
86 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
87 static int acpi_processor_handle_eject(struct acpi_processor *pr);
88 
89 
90 static const struct acpi_device_id processor_device_ids[] = {
91 	{ACPI_PROCESSOR_OBJECT_HID, 0},
92 	{ACPI_PROCESSOR_HID, 0},
93 	{"", 0},
94 };
95 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
96 
97 static struct acpi_driver acpi_processor_driver = {
98 	.name = "processor",
99 	.class = ACPI_PROCESSOR_CLASS,
100 	.ids = processor_device_ids,
101 	.ops = {
102 		.add = acpi_processor_add,
103 		.remove = acpi_processor_remove,
104 		.start = acpi_processor_start,
105 		.suspend = acpi_processor_suspend,
106 		.resume = acpi_processor_resume,
107 		},
108 };
109 
110 #define INSTALL_NOTIFY_HANDLER		1
111 #define UNINSTALL_NOTIFY_HANDLER	2
112 
113 static const struct file_operations acpi_processor_info_fops = {
114 	.owner = THIS_MODULE,
115 	.open = acpi_processor_info_open_fs,
116 	.read = seq_read,
117 	.llseek = seq_lseek,
118 	.release = single_release,
119 };
120 
121 DEFINE_PER_CPU(struct acpi_processor *, processors);
122 struct acpi_processor_errata errata __read_mostly;
123 static int set_no_mwait(const struct dmi_system_id *id)
124 {
125 	printk(KERN_NOTICE PREFIX "%s detected - "
126 		"disabling mwait for CPU C-states\n", id->ident);
127 	idle_nomwait = 1;
128 	return 0;
129 }
130 
131 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
132 	{
133 	set_no_mwait, "IFL91 board", {
134 	DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
135 	DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
136 	DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
137 	DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
138 	{
139 	set_no_mwait, "Extensa 5220", {
140 	DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
141 	DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
142 	DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
143 	DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
144 	{},
145 };
146 
147 /* --------------------------------------------------------------------------
148                                 Errata Handling
149    -------------------------------------------------------------------------- */
150 
151 static int acpi_processor_errata_piix4(struct pci_dev *dev)
152 {
153 	u8 value1 = 0;
154 	u8 value2 = 0;
155 
156 
157 	if (!dev)
158 		return -EINVAL;
159 
160 	/*
161 	 * Note that 'dev' references the PIIX4 ACPI Controller.
162 	 */
163 
164 	switch (dev->revision) {
165 	case 0:
166 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
167 		break;
168 	case 1:
169 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
170 		break;
171 	case 2:
172 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
173 		break;
174 	case 3:
175 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
176 		break;
177 	default:
178 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
179 		break;
180 	}
181 
182 	switch (dev->revision) {
183 
184 	case 0:		/* PIIX4 A-step */
185 	case 1:		/* PIIX4 B-step */
186 		/*
187 		 * See specification changes #13 ("Manual Throttle Duty Cycle")
188 		 * and #14 ("Enabling and Disabling Manual Throttle"), plus
189 		 * erratum #5 ("STPCLK# Deassertion Time") from the January
190 		 * 2002 PIIX4 specification update.  Applies to only older
191 		 * PIIX4 models.
192 		 */
193 		errata.piix4.throttle = 1;
194 
195 	case 2:		/* PIIX4E */
196 	case 3:		/* PIIX4M */
197 		/*
198 		 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
199 		 * Livelock") from the January 2002 PIIX4 specification update.
200 		 * Applies to all PIIX4 models.
201 		 */
202 
203 		/*
204 		 * BM-IDE
205 		 * ------
206 		 * Find the PIIX4 IDE Controller and get the Bus Master IDE
207 		 * Status register address.  We'll use this later to read
208 		 * each IDE controller's DMA status to make sure we catch all
209 		 * DMA activity.
210 		 */
211 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
212 				     PCI_DEVICE_ID_INTEL_82371AB,
213 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
214 		if (dev) {
215 			errata.piix4.bmisx = pci_resource_start(dev, 4);
216 			pci_dev_put(dev);
217 		}
218 
219 		/*
220 		 * Type-F DMA
221 		 * ----------
222 		 * Find the PIIX4 ISA Controller and read the Motherboard
223 		 * DMA controller's status to see if Type-F (Fast) DMA mode
224 		 * is enabled (bit 7) on either channel.  Note that we'll
225 		 * disable C3 support if this is enabled, as some legacy
226 		 * devices won't operate well if fast DMA is disabled.
227 		 */
228 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
229 				     PCI_DEVICE_ID_INTEL_82371AB_0,
230 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
231 		if (dev) {
232 			pci_read_config_byte(dev, 0x76, &value1);
233 			pci_read_config_byte(dev, 0x77, &value2);
234 			if ((value1 & 0x80) || (value2 & 0x80))
235 				errata.piix4.fdma = 1;
236 			pci_dev_put(dev);
237 		}
238 
239 		break;
240 	}
241 
242 	if (errata.piix4.bmisx)
243 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
244 				  "Bus master activity detection (BM-IDE) erratum enabled\n"));
245 	if (errata.piix4.fdma)
246 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
247 				  "Type-F DMA livelock erratum (C3 disabled)\n"));
248 
249 	return 0;
250 }
251 
252 static int acpi_processor_errata(struct acpi_processor *pr)
253 {
254 	int result = 0;
255 	struct pci_dev *dev = NULL;
256 
257 
258 	if (!pr)
259 		return -EINVAL;
260 
261 	/*
262 	 * PIIX4
263 	 */
264 	dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
265 			     PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
266 			     PCI_ANY_ID, NULL);
267 	if (dev) {
268 		result = acpi_processor_errata_piix4(dev);
269 		pci_dev_put(dev);
270 	}
271 
272 	return result;
273 }
274 
275 /* --------------------------------------------------------------------------
276                               Common ACPI processor functions
277    -------------------------------------------------------------------------- */
278 
279 /*
280  * _PDC is required for a BIOS-OS handshake for most of the newer
281  * ACPI processor features.
282  */
283 static int acpi_processor_set_pdc(struct acpi_processor *pr)
284 {
285 	struct acpi_object_list *pdc_in = pr->pdc;
286 	acpi_status status = AE_OK;
287 
288 
289 	if (!pdc_in)
290 		return status;
291 	if (idle_nomwait) {
292 		/*
293 		 * If mwait is disabled for CPU C-states, the C2C3_FFH access
294 		 * mode will be disabled in the parameter of _PDC object.
295 		 * Of course C1_FFH access mode will also be disabled.
296 		 */
297 		union acpi_object *obj;
298 		u32 *buffer = NULL;
299 
300 		obj = pdc_in->pointer;
301 		buffer = (u32 *)(obj->buffer.pointer);
302 		buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
303 
304 	}
305 	status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
306 
307 	if (ACPI_FAILURE(status))
308 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
309 		    "Could not evaluate _PDC, using legacy perf. control...\n"));
310 
311 	return status;
312 }
313 
314 /* --------------------------------------------------------------------------
315                               FS Interface (/proc)
316    -------------------------------------------------------------------------- */
317 
318 static struct proc_dir_entry *acpi_processor_dir = NULL;
319 
320 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
321 {
322 	struct acpi_processor *pr = seq->private;
323 
324 
325 	if (!pr)
326 		goto end;
327 
328 	seq_printf(seq, "processor id:            %d\n"
329 		   "acpi id:                 %d\n"
330 		   "bus mastering control:   %s\n"
331 		   "power management:        %s\n"
332 		   "throttling control:      %s\n"
333 		   "limit interface:         %s\n",
334 		   pr->id,
335 		   pr->acpi_id,
336 		   pr->flags.bm_control ? "yes" : "no",
337 		   pr->flags.power ? "yes" : "no",
338 		   pr->flags.throttling ? "yes" : "no",
339 		   pr->flags.limit ? "yes" : "no");
340 
341       end:
342 	return 0;
343 }
344 
345 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
346 {
347 	return single_open(file, acpi_processor_info_seq_show,
348 			   PDE(inode)->data);
349 }
350 
351 static int acpi_processor_add_fs(struct acpi_device *device)
352 {
353 	struct proc_dir_entry *entry = NULL;
354 
355 
356 	if (!acpi_device_dir(device)) {
357 		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
358 						     acpi_processor_dir);
359 		if (!acpi_device_dir(device))
360 			return -ENODEV;
361 	}
362 	acpi_device_dir(device)->owner = THIS_MODULE;
363 
364 	/* 'info' [R] */
365 	entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
366 				 S_IRUGO, acpi_device_dir(device),
367 				 &acpi_processor_info_fops,
368 				 acpi_driver_data(device));
369 	if (!entry)
370 		return -EIO;
371 
372 	/* 'throttling' [R/W] */
373 	entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
374 				 S_IFREG | S_IRUGO | S_IWUSR,
375 				 acpi_device_dir(device),
376 				 &acpi_processor_throttling_fops,
377 				 acpi_driver_data(device));
378 	if (!entry)
379 		return -EIO;
380 
381 	/* 'limit' [R/W] */
382 	entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
383 				 S_IFREG | S_IRUGO | S_IWUSR,
384 				 acpi_device_dir(device),
385 				 &acpi_processor_limit_fops,
386 				 acpi_driver_data(device));
387 	if (!entry)
388 		return -EIO;
389 	return 0;
390 }
391 
392 static int acpi_processor_remove_fs(struct acpi_device *device)
393 {
394 
395 	if (acpi_device_dir(device)) {
396 		remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
397 				  acpi_device_dir(device));
398 		remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
399 				  acpi_device_dir(device));
400 		remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
401 				  acpi_device_dir(device));
402 		remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
403 		acpi_device_dir(device) = NULL;
404 	}
405 
406 	return 0;
407 }
408 
409 /* Use the acpiid in MADT to map cpus in case of SMP */
410 
411 #ifndef CONFIG_SMP
412 static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id) { return -1; }
413 #else
414 
415 static struct acpi_table_madt *madt;
416 
417 static int map_lapic_id(struct acpi_subtable_header *entry,
418 		 u32 acpi_id, int *apic_id)
419 {
420 	struct acpi_madt_local_apic *lapic =
421 		(struct acpi_madt_local_apic *)entry;
422 	if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
423 	    lapic->processor_id == acpi_id) {
424 		*apic_id = lapic->id;
425 		return 1;
426 	}
427 	return 0;
428 }
429 
430 static int map_lsapic_id(struct acpi_subtable_header *entry,
431 		int device_declaration, u32 acpi_id, int *apic_id)
432 {
433 	struct acpi_madt_local_sapic *lsapic =
434 		(struct acpi_madt_local_sapic *)entry;
435 	u32 tmp = (lsapic->id << 8) | lsapic->eid;
436 
437 	/* Only check enabled APICs*/
438 	if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
439 		return 0;
440 
441 	/* Device statement declaration type */
442 	if (device_declaration) {
443 		if (entry->length < 16)
444 			printk(KERN_ERR PREFIX
445 			    "Invalid LSAPIC with Device type processor (SAPIC ID %#x)\n",
446 			    tmp);
447 		else if (lsapic->uid == acpi_id)
448 			goto found;
449 	/* Processor statement declaration type */
450 	} else if (lsapic->processor_id == acpi_id)
451 		goto found;
452 
453 	return 0;
454 found:
455 	*apic_id = tmp;
456 	return 1;
457 }
458 
459 static int map_madt_entry(int type, u32 acpi_id)
460 {
461 	unsigned long madt_end, entry;
462 	int apic_id = -1;
463 
464 	if (!madt)
465 		return apic_id;
466 
467 	entry = (unsigned long)madt;
468 	madt_end = entry + madt->header.length;
469 
470 	/* Parse all entries looking for a match. */
471 
472 	entry += sizeof(struct acpi_table_madt);
473 	while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
474 		struct acpi_subtable_header *header =
475 			(struct acpi_subtable_header *)entry;
476 		if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
477 			if (map_lapic_id(header, acpi_id, &apic_id))
478 				break;
479 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
480 			if (map_lsapic_id(header, type, acpi_id, &apic_id))
481 				break;
482 		}
483 		entry += header->length;
484 	}
485 	return apic_id;
486 }
487 
488 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
489 {
490 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
491 	union acpi_object *obj;
492 	struct acpi_subtable_header *header;
493 	int apic_id = -1;
494 
495 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
496 		goto exit;
497 
498 	if (!buffer.length || !buffer.pointer)
499 		goto exit;
500 
501 	obj = buffer.pointer;
502 	if (obj->type != ACPI_TYPE_BUFFER ||
503 	    obj->buffer.length < sizeof(struct acpi_subtable_header)) {
504 		goto exit;
505 	}
506 
507 	header = (struct acpi_subtable_header *)obj->buffer.pointer;
508 	if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
509 		map_lapic_id(header, acpi_id, &apic_id);
510 	} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
511 		map_lsapic_id(header, type, acpi_id, &apic_id);
512 	}
513 
514 exit:
515 	if (buffer.pointer)
516 		kfree(buffer.pointer);
517 	return apic_id;
518 }
519 
520 static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id)
521 {
522 	int i;
523 	int apic_id = -1;
524 
525 	apic_id = map_mat_entry(handle, type, acpi_id);
526 	if (apic_id == -1)
527 		apic_id = map_madt_entry(type, acpi_id);
528 	if (apic_id == -1)
529 		return apic_id;
530 
531 	for_each_possible_cpu(i) {
532 		if (cpu_physical_id(i) == apic_id)
533 			return i;
534 	}
535 	return -1;
536 }
537 #endif
538 
539 /* --------------------------------------------------------------------------
540                                  Driver Interface
541    -------------------------------------------------------------------------- */
542 
543 static int acpi_processor_get_info(struct acpi_device *device)
544 {
545 	acpi_status status = 0;
546 	union acpi_object object = { 0 };
547 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
548 	struct acpi_processor *pr;
549 	int cpu_index, device_declaration = 0;
550 	static int cpu0_initialized;
551 
552 	pr = acpi_driver_data(device);
553 	if (!pr)
554 		return -EINVAL;
555 
556 	if (num_online_cpus() > 1)
557 		errata.smp = TRUE;
558 
559 	acpi_processor_errata(pr);
560 
561 	/*
562 	 * Check to see if we have bus mastering arbitration control.  This
563 	 * is required for proper C3 usage (to maintain cache coherency).
564 	 */
565 	if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
566 		pr->flags.bm_control = 1;
567 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
568 				  "Bus mastering arbitration control present\n"));
569 	} else
570 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
571 				  "No bus mastering arbitration control\n"));
572 
573 	if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_HID)) {
574 		/*
575 		 * Declared with "Device" statement; match _UID.
576 		 * Note that we don't handle string _UIDs yet.
577 		 */
578 		unsigned long long value;
579 		status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
580 						NULL, &value);
581 		if (ACPI_FAILURE(status)) {
582 			printk(KERN_ERR PREFIX
583 			    "Evaluating processor _UID [%#x]\n", status);
584 			return -ENODEV;
585 		}
586 		device_declaration = 1;
587 		pr->acpi_id = value;
588 	} else {
589 		/* Declared with "Processor" statement; match ProcessorID */
590 		status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
591 		if (ACPI_FAILURE(status)) {
592 			printk(KERN_ERR PREFIX "Evaluating processor object\n");
593 			return -ENODEV;
594 		}
595 
596 		/*
597 		 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
598 		 *      >>> 'acpi_get_processor_id(acpi_id, &id)' in
599 		 *      arch/xxx/acpi.c
600 		 */
601 		pr->acpi_id = object.processor.proc_id;
602 	}
603 	cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id);
604 
605 	/* Handle UP system running SMP kernel, with no LAPIC in MADT */
606 	if (!cpu0_initialized && (cpu_index == -1) &&
607 	    (num_online_cpus() == 1)) {
608 		cpu_index = 0;
609 	}
610 
611 	cpu0_initialized = 1;
612 
613 	pr->id = cpu_index;
614 
615 	/*
616 	 *  Extra Processor objects may be enumerated on MP systems with
617 	 *  less than the max # of CPUs. They should be ignored _iff
618 	 *  they are physically not present.
619 	 */
620 	if (pr->id == -1) {
621 		if (ACPI_FAILURE
622 		    (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
623 			return -ENODEV;
624 		}
625 	}
626 
627 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
628 			  pr->acpi_id));
629 
630 	if (!object.processor.pblk_address)
631 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
632 	else if (object.processor.pblk_length != 6)
633 		printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
634 			    object.processor.pblk_length);
635 	else {
636 		pr->throttling.address = object.processor.pblk_address;
637 		pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
638 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
639 
640 		pr->pblk = object.processor.pblk_address;
641 
642 		/*
643 		 * We don't care about error returns - we just try to mark
644 		 * these reserved so that nobody else is confused into thinking
645 		 * that this region might be unused..
646 		 *
647 		 * (In particular, allocating the IO range for Cardbus)
648 		 */
649 		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
650 	}
651 
652 	/*
653 	 * If ACPI describes a slot number for this CPU, we can use it
654 	 * ensure we get the right value in the "physical id" field
655 	 * of /proc/cpuinfo
656 	 */
657 	status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
658 	if (ACPI_SUCCESS(status))
659 		arch_fix_phys_package_id(pr->id, object.integer.value);
660 
661 	return 0;
662 }
663 
664 static DEFINE_PER_CPU(void *, processor_device_array);
665 
666 static int __cpuinit acpi_processor_start(struct acpi_device *device)
667 {
668 	int result = 0;
669 	acpi_status status = AE_OK;
670 	struct acpi_processor *pr;
671 	struct sys_device *sysdev;
672 
673 	pr = acpi_driver_data(device);
674 
675 	result = acpi_processor_get_info(device);
676 	if (result) {
677 		/* Processor is physically not present */
678 		return 0;
679 	}
680 
681 	BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
682 
683 	/*
684 	 * Buggy BIOS check
685 	 * ACPI id of processors can be reported wrongly by the BIOS.
686 	 * Don't trust it blindly
687 	 */
688 	if (per_cpu(processor_device_array, pr->id) != NULL &&
689 	    per_cpu(processor_device_array, pr->id) != device) {
690 		printk(KERN_WARNING "BIOS reported wrong ACPI id "
691 			"for the processor\n");
692 		return -ENODEV;
693 	}
694 	per_cpu(processor_device_array, pr->id) = device;
695 
696 	per_cpu(processors, pr->id) = pr;
697 
698 	result = acpi_processor_add_fs(device);
699 	if (result)
700 		goto end;
701 
702 	sysdev = get_cpu_sysdev(pr->id);
703 	if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev"))
704 		return -EFAULT;
705 
706 	status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
707 					     acpi_processor_notify, pr);
708 
709 	/* _PDC call should be done before doing anything else (if reqd.). */
710 	arch_acpi_processor_init_pdc(pr);
711 	acpi_processor_set_pdc(pr);
712 #ifdef CONFIG_CPU_FREQ
713 	acpi_processor_ppc_has_changed(pr);
714 #endif
715 	acpi_processor_get_throttling_info(pr);
716 	acpi_processor_get_limit_info(pr);
717 
718 
719 	acpi_processor_power_init(pr, device);
720 
721 	pr->cdev = thermal_cooling_device_register("Processor", device,
722 						&processor_cooling_ops);
723 	if (IS_ERR(pr->cdev)) {
724 		result = PTR_ERR(pr->cdev);
725 		goto end;
726 	}
727 
728 	dev_info(&device->dev, "registered as cooling_device%d\n",
729 		 pr->cdev->id);
730 
731 	result = sysfs_create_link(&device->dev.kobj,
732 				   &pr->cdev->device.kobj,
733 				   "thermal_cooling");
734 	if (result)
735 		printk(KERN_ERR PREFIX "Create sysfs link\n");
736 	result = sysfs_create_link(&pr->cdev->device.kobj,
737 				   &device->dev.kobj,
738 				   "device");
739 	if (result)
740 		printk(KERN_ERR PREFIX "Create sysfs link\n");
741 
742 	if (pr->flags.throttling) {
743 		printk(KERN_INFO PREFIX "%s [%s] (supports",
744 		       acpi_device_name(device), acpi_device_bid(device));
745 		printk(" %d throttling states", pr->throttling.state_count);
746 		printk(")\n");
747 	}
748 
749       end:
750 
751 	return result;
752 }
753 
754 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
755 {
756 	struct acpi_processor *pr = data;
757 	struct acpi_device *device = NULL;
758 	int saved;
759 
760 	if (!pr)
761 		return;
762 
763 	if (acpi_bus_get_device(pr->handle, &device))
764 		return;
765 
766 	switch (event) {
767 	case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
768 		saved = pr->performance_platform_limit;
769 		acpi_processor_ppc_has_changed(pr);
770 		if (saved == pr->performance_platform_limit)
771 			break;
772 		acpi_bus_generate_proc_event(device, event,
773 					pr->performance_platform_limit);
774 		acpi_bus_generate_netlink_event(device->pnp.device_class,
775 						  dev_name(&device->dev), event,
776 						  pr->performance_platform_limit);
777 		break;
778 	case ACPI_PROCESSOR_NOTIFY_POWER:
779 		acpi_processor_cst_has_changed(pr);
780 		acpi_bus_generate_proc_event(device, event, 0);
781 		acpi_bus_generate_netlink_event(device->pnp.device_class,
782 						  dev_name(&device->dev), event, 0);
783 		break;
784 	case ACPI_PROCESSOR_NOTIFY_THROTTLING:
785 		acpi_processor_tstate_has_changed(pr);
786 		acpi_bus_generate_proc_event(device, event, 0);
787 		acpi_bus_generate_netlink_event(device->pnp.device_class,
788 						  dev_name(&device->dev), event, 0);
789 	default:
790 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
791 				  "Unsupported event [0x%x]\n", event));
792 		break;
793 	}
794 
795 	return;
796 }
797 
798 static int acpi_cpu_soft_notify(struct notifier_block *nfb,
799 		unsigned long action, void *hcpu)
800 {
801 	unsigned int cpu = (unsigned long)hcpu;
802 	struct acpi_processor *pr = per_cpu(processors, cpu);
803 
804 	if (action == CPU_ONLINE && pr) {
805 		acpi_processor_ppc_has_changed(pr);
806 		acpi_processor_cst_has_changed(pr);
807 		acpi_processor_tstate_has_changed(pr);
808 	}
809 	return NOTIFY_OK;
810 }
811 
812 static struct notifier_block acpi_cpu_notifier =
813 {
814 	    .notifier_call = acpi_cpu_soft_notify,
815 };
816 
817 static int acpi_processor_add(struct acpi_device *device)
818 {
819 	struct acpi_processor *pr = NULL;
820 
821 
822 	if (!device)
823 		return -EINVAL;
824 
825 	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
826 	if (!pr)
827 		return -ENOMEM;
828 
829 	pr->handle = device->handle;
830 	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
831 	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
832 	device->driver_data = pr;
833 
834 	return 0;
835 }
836 
837 static int acpi_processor_remove(struct acpi_device *device, int type)
838 {
839 	acpi_status status = AE_OK;
840 	struct acpi_processor *pr = NULL;
841 
842 
843 	if (!device || !acpi_driver_data(device))
844 		return -EINVAL;
845 
846 	pr = acpi_driver_data(device);
847 
848 	if (pr->id >= nr_cpu_ids) {
849 		kfree(pr);
850 		return 0;
851 	}
852 
853 	if (type == ACPI_BUS_REMOVAL_EJECT) {
854 		if (acpi_processor_handle_eject(pr))
855 			return -EINVAL;
856 	}
857 
858 	acpi_processor_power_exit(pr, device);
859 
860 	status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
861 					    acpi_processor_notify);
862 
863 	sysfs_remove_link(&device->dev.kobj, "sysdev");
864 
865 	acpi_processor_remove_fs(device);
866 
867 	if (pr->cdev) {
868 		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
869 		sysfs_remove_link(&pr->cdev->device.kobj, "device");
870 		thermal_cooling_device_unregister(pr->cdev);
871 		pr->cdev = NULL;
872 	}
873 
874 	per_cpu(processors, pr->id) = NULL;
875 	per_cpu(processor_device_array, pr->id) = NULL;
876 	kfree(pr);
877 
878 	return 0;
879 }
880 
881 #ifdef CONFIG_ACPI_HOTPLUG_CPU
882 /****************************************************************************
883  * 	Acpi processor hotplug support 				       	    *
884  ****************************************************************************/
885 
886 static int is_processor_present(acpi_handle handle)
887 {
888 	acpi_status status;
889 	unsigned long long sta = 0;
890 
891 
892 	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
893 
894 	if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
895 		return 1;
896 
897 	/*
898 	 * _STA is mandatory for a processor that supports hot plug
899 	 */
900 	if (status == AE_NOT_FOUND)
901 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
902 				"Processor does not support hot plug\n"));
903 	else
904 		ACPI_EXCEPTION((AE_INFO, status,
905 				"Processor Device is not present"));
906 	return 0;
907 }
908 
909 static
910 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
911 {
912 	acpi_handle phandle;
913 	struct acpi_device *pdev;
914 	struct acpi_processor *pr;
915 
916 
917 	if (acpi_get_parent(handle, &phandle)) {
918 		return -ENODEV;
919 	}
920 
921 	if (acpi_bus_get_device(phandle, &pdev)) {
922 		return -ENODEV;
923 	}
924 
925 	if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
926 		return -ENODEV;
927 	}
928 
929 	acpi_bus_start(*device);
930 
931 	pr = acpi_driver_data(*device);
932 	if (!pr)
933 		return -ENODEV;
934 
935 	if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
936 		kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
937 	}
938 	return 0;
939 }
940 
941 static void __ref acpi_processor_hotplug_notify(acpi_handle handle,
942 						u32 event, void *data)
943 {
944 	struct acpi_processor *pr;
945 	struct acpi_device *device = NULL;
946 	int result;
947 
948 
949 	switch (event) {
950 	case ACPI_NOTIFY_BUS_CHECK:
951 	case ACPI_NOTIFY_DEVICE_CHECK:
952 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
953 		"Processor driver received %s event\n",
954 		       (event == ACPI_NOTIFY_BUS_CHECK) ?
955 		       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"));
956 
957 		if (!is_processor_present(handle))
958 			break;
959 
960 		if (acpi_bus_get_device(handle, &device)) {
961 			result = acpi_processor_device_add(handle, &device);
962 			if (result)
963 				printk(KERN_ERR PREFIX
964 					    "Unable to add the device\n");
965 			break;
966 		}
967 
968 		pr = acpi_driver_data(device);
969 		if (!pr) {
970 			printk(KERN_ERR PREFIX "Driver data is NULL\n");
971 			break;
972 		}
973 
974 		if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
975 			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
976 			break;
977 		}
978 
979 		result = acpi_processor_start(device);
980 		if ((!result) && ((pr->id >= 0) && (pr->id < nr_cpu_ids))) {
981 			kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
982 		} else {
983 			printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
984 				    acpi_device_bid(device));
985 		}
986 		break;
987 	case ACPI_NOTIFY_EJECT_REQUEST:
988 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
989 				  "received ACPI_NOTIFY_EJECT_REQUEST\n"));
990 
991 		if (acpi_bus_get_device(handle, &device)) {
992 			printk(KERN_ERR PREFIX
993 				    "Device don't exist, dropping EJECT\n");
994 			break;
995 		}
996 		pr = acpi_driver_data(device);
997 		if (!pr) {
998 			printk(KERN_ERR PREFIX
999 				    "Driver data is NULL, dropping EJECT\n");
1000 			return;
1001 		}
1002 
1003 		if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
1004 			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
1005 		break;
1006 	default:
1007 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1008 				  "Unsupported event [0x%x]\n", event));
1009 		break;
1010 	}
1011 
1012 	return;
1013 }
1014 
1015 static acpi_status
1016 processor_walk_namespace_cb(acpi_handle handle,
1017 			    u32 lvl, void *context, void **rv)
1018 {
1019 	acpi_status status;
1020 	int *action = context;
1021 	acpi_object_type type = 0;
1022 
1023 	status = acpi_get_type(handle, &type);
1024 	if (ACPI_FAILURE(status))
1025 		return (AE_OK);
1026 
1027 	if (type != ACPI_TYPE_PROCESSOR)
1028 		return (AE_OK);
1029 
1030 	switch (*action) {
1031 	case INSTALL_NOTIFY_HANDLER:
1032 		acpi_install_notify_handler(handle,
1033 					    ACPI_SYSTEM_NOTIFY,
1034 					    acpi_processor_hotplug_notify,
1035 					    NULL);
1036 		break;
1037 	case UNINSTALL_NOTIFY_HANDLER:
1038 		acpi_remove_notify_handler(handle,
1039 					   ACPI_SYSTEM_NOTIFY,
1040 					   acpi_processor_hotplug_notify);
1041 		break;
1042 	default:
1043 		break;
1044 	}
1045 
1046 	return (AE_OK);
1047 }
1048 
1049 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1050 {
1051 
1052 	if (!is_processor_present(handle)) {
1053 		return AE_ERROR;
1054 	}
1055 
1056 	if (acpi_map_lsapic(handle, p_cpu))
1057 		return AE_ERROR;
1058 
1059 	if (arch_register_cpu(*p_cpu)) {
1060 		acpi_unmap_lsapic(*p_cpu);
1061 		return AE_ERROR;
1062 	}
1063 
1064 	return AE_OK;
1065 }
1066 
1067 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1068 {
1069 	if (cpu_online(pr->id))
1070 		cpu_down(pr->id);
1071 
1072 	arch_unregister_cpu(pr->id);
1073 	acpi_unmap_lsapic(pr->id);
1074 	return (0);
1075 }
1076 #else
1077 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
1078 {
1079 	return AE_ERROR;
1080 }
1081 static int acpi_processor_handle_eject(struct acpi_processor *pr)
1082 {
1083 	return (-EINVAL);
1084 }
1085 #endif
1086 
1087 static
1088 void acpi_processor_install_hotplug_notify(void)
1089 {
1090 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1091 	int action = INSTALL_NOTIFY_HANDLER;
1092 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1093 			    ACPI_ROOT_OBJECT,
1094 			    ACPI_UINT32_MAX,
1095 			    processor_walk_namespace_cb, &action, NULL);
1096 #endif
1097 	register_hotcpu_notifier(&acpi_cpu_notifier);
1098 }
1099 
1100 static
1101 void acpi_processor_uninstall_hotplug_notify(void)
1102 {
1103 #ifdef CONFIG_ACPI_HOTPLUG_CPU
1104 	int action = UNINSTALL_NOTIFY_HANDLER;
1105 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
1106 			    ACPI_ROOT_OBJECT,
1107 			    ACPI_UINT32_MAX,
1108 			    processor_walk_namespace_cb, &action, NULL);
1109 #endif
1110 	unregister_hotcpu_notifier(&acpi_cpu_notifier);
1111 }
1112 
1113 /*
1114  * We keep the driver loaded even when ACPI is not running.
1115  * This is needed for the powernow-k8 driver, that works even without
1116  * ACPI, but needs symbols from this driver
1117  */
1118 
1119 static int __init acpi_processor_init(void)
1120 {
1121 	int result = 0;
1122 
1123 	memset(&errata, 0, sizeof(errata));
1124 
1125 #ifdef CONFIG_SMP
1126 	if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1127 				(struct acpi_table_header **)&madt)))
1128 		madt = NULL;
1129 #endif
1130 
1131 	acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1132 	if (!acpi_processor_dir)
1133 		return -ENOMEM;
1134 	acpi_processor_dir->owner = THIS_MODULE;
1135 
1136 	/*
1137 	 * Check whether the system is DMI table. If yes, OSPM
1138 	 * should not use mwait for CPU-states.
1139 	 */
1140 	dmi_check_system(processor_idle_dmi_table);
1141 	result = cpuidle_register_driver(&acpi_idle_driver);
1142 	if (result < 0)
1143 		goto out_proc;
1144 
1145 	result = acpi_bus_register_driver(&acpi_processor_driver);
1146 	if (result < 0)
1147 		goto out_cpuidle;
1148 
1149 	acpi_processor_install_hotplug_notify();
1150 
1151 	acpi_thermal_cpufreq_init();
1152 
1153 	acpi_processor_ppc_init();
1154 
1155 	acpi_processor_throttling_init();
1156 
1157 	return 0;
1158 
1159 out_cpuidle:
1160 	cpuidle_unregister_driver(&acpi_idle_driver);
1161 
1162 out_proc:
1163 	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1164 
1165 	return result;
1166 }
1167 
1168 static void __exit acpi_processor_exit(void)
1169 {
1170 	acpi_processor_ppc_exit();
1171 
1172 	acpi_thermal_cpufreq_exit();
1173 
1174 	acpi_processor_uninstall_hotplug_notify();
1175 
1176 	acpi_bus_unregister_driver(&acpi_processor_driver);
1177 
1178 	cpuidle_unregister_driver(&acpi_idle_driver);
1179 
1180 	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1181 
1182 	return;
1183 }
1184 
1185 module_init(acpi_processor_init);
1186 module_exit(acpi_processor_exit);
1187 
1188 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1189 
1190 MODULE_ALIAS("processor");
1191