xref: /linux/drivers/acpi/processor_core.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *  TBD:
28  *	1. Make # power states dynamic.
29  *	2. Support duty_cycle values that span bit 4.
30  *	3. Optimize by having scheduler determine business instead of
31  *	   having us try to calculate it here.
32  *	4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/dmi.h>
46 #include <linux/moduleparam.h>
47 
48 #include <asm/io.h>
49 #include <asm/system.h>
50 #include <asm/cpu.h>
51 #include <asm/delay.h>
52 #include <asm/uaccess.h>
53 #include <asm/processor.h>
54 #include <asm/smp.h>
55 #include <asm/acpi.h>
56 
57 #include <acpi/acpi_bus.h>
58 #include <acpi/acpi_drivers.h>
59 #include <acpi/processor.h>
60 
61 #define ACPI_PROCESSOR_COMPONENT	0x01000000
62 #define ACPI_PROCESSOR_CLASS		"processor"
63 #define ACPI_PROCESSOR_DEVICE_NAME	"Processor"
64 #define ACPI_PROCESSOR_FILE_INFO	"info"
65 #define ACPI_PROCESSOR_FILE_THROTTLING	"throttling"
66 #define ACPI_PROCESSOR_FILE_LIMIT	"limit"
67 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
68 #define ACPI_PROCESSOR_NOTIFY_POWER	0x81
69 
70 #define ACPI_PROCESSOR_LIMIT_USER	0
71 #define ACPI_PROCESSOR_LIMIT_THERMAL	1
72 
73 #define ACPI_STA_PRESENT 0x00000001
74 
75 #define _COMPONENT		ACPI_PROCESSOR_COMPONENT
76 ACPI_MODULE_NAME("processor_core");
77 
78 MODULE_AUTHOR("Paul Diefenbaugh");
79 MODULE_DESCRIPTION("ACPI Processor Driver");
80 MODULE_LICENSE("GPL");
81 
82 static int acpi_processor_add(struct acpi_device *device);
83 static int acpi_processor_start(struct acpi_device *device);
84 static int acpi_processor_remove(struct acpi_device *device, int type);
85 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
86 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
87 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
88 static int acpi_processor_handle_eject(struct acpi_processor *pr);
89 
90 static struct acpi_driver acpi_processor_driver = {
91 	.name = "processor",
92 	.class = ACPI_PROCESSOR_CLASS,
93 	.ids = ACPI_PROCESSOR_HID,
94 	.ops = {
95 		.add = acpi_processor_add,
96 		.remove = acpi_processor_remove,
97 		.start = acpi_processor_start,
98 		},
99 };
100 
101 #define INSTALL_NOTIFY_HANDLER		1
102 #define UNINSTALL_NOTIFY_HANDLER	2
103 
104 static const struct file_operations acpi_processor_info_fops = {
105 	.open = acpi_processor_info_open_fs,
106 	.read = seq_read,
107 	.llseek = seq_lseek,
108 	.release = single_release,
109 };
110 
111 struct acpi_processor *processors[NR_CPUS];
112 struct acpi_processor_errata errata __read_mostly;
113 
114 /* --------------------------------------------------------------------------
115                                 Errata Handling
116    -------------------------------------------------------------------------- */
117 
118 static int acpi_processor_errata_piix4(struct pci_dev *dev)
119 {
120 	u8 rev = 0;
121 	u8 value1 = 0;
122 	u8 value2 = 0;
123 
124 
125 	if (!dev)
126 		return -EINVAL;
127 
128 	/*
129 	 * Note that 'dev' references the PIIX4 ACPI Controller.
130 	 */
131 
132 	pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
133 
134 	switch (rev) {
135 	case 0:
136 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
137 		break;
138 	case 1:
139 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
140 		break;
141 	case 2:
142 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
143 		break;
144 	case 3:
145 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
146 		break;
147 	default:
148 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
149 		break;
150 	}
151 
152 	switch (rev) {
153 
154 	case 0:		/* PIIX4 A-step */
155 	case 1:		/* PIIX4 B-step */
156 		/*
157 		 * See specification changes #13 ("Manual Throttle Duty Cycle")
158 		 * and #14 ("Enabling and Disabling Manual Throttle"), plus
159 		 * erratum #5 ("STPCLK# Deassertion Time") from the January
160 		 * 2002 PIIX4 specification update.  Applies to only older
161 		 * PIIX4 models.
162 		 */
163 		errata.piix4.throttle = 1;
164 
165 	case 2:		/* PIIX4E */
166 	case 3:		/* PIIX4M */
167 		/*
168 		 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
169 		 * Livelock") from the January 2002 PIIX4 specification update.
170 		 * Applies to all PIIX4 models.
171 		 */
172 
173 		/*
174 		 * BM-IDE
175 		 * ------
176 		 * Find the PIIX4 IDE Controller and get the Bus Master IDE
177 		 * Status register address.  We'll use this later to read
178 		 * each IDE controller's DMA status to make sure we catch all
179 		 * DMA activity.
180 		 */
181 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
182 				     PCI_DEVICE_ID_INTEL_82371AB,
183 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
184 		if (dev) {
185 			errata.piix4.bmisx = pci_resource_start(dev, 4);
186 			pci_dev_put(dev);
187 		}
188 
189 		/*
190 		 * Type-F DMA
191 		 * ----------
192 		 * Find the PIIX4 ISA Controller and read the Motherboard
193 		 * DMA controller's status to see if Type-F (Fast) DMA mode
194 		 * is enabled (bit 7) on either channel.  Note that we'll
195 		 * disable C3 support if this is enabled, as some legacy
196 		 * devices won't operate well if fast DMA is disabled.
197 		 */
198 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
199 				     PCI_DEVICE_ID_INTEL_82371AB_0,
200 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
201 		if (dev) {
202 			pci_read_config_byte(dev, 0x76, &value1);
203 			pci_read_config_byte(dev, 0x77, &value2);
204 			if ((value1 & 0x80) || (value2 & 0x80))
205 				errata.piix4.fdma = 1;
206 			pci_dev_put(dev);
207 		}
208 
209 		break;
210 	}
211 
212 	if (errata.piix4.bmisx)
213 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
214 				  "Bus master activity detection (BM-IDE) erratum enabled\n"));
215 	if (errata.piix4.fdma)
216 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
217 				  "Type-F DMA livelock erratum (C3 disabled)\n"));
218 
219 	return 0;
220 }
221 
222 static int acpi_processor_errata(struct acpi_processor *pr)
223 {
224 	int result = 0;
225 	struct pci_dev *dev = NULL;
226 
227 
228 	if (!pr)
229 		return -EINVAL;
230 
231 	/*
232 	 * PIIX4
233 	 */
234 	dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
235 			     PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
236 			     PCI_ANY_ID, NULL);
237 	if (dev) {
238 		result = acpi_processor_errata_piix4(dev);
239 		pci_dev_put(dev);
240 	}
241 
242 	return result;
243 }
244 
245 /* --------------------------------------------------------------------------
246                               Common ACPI processor functions
247    -------------------------------------------------------------------------- */
248 
249 /*
250  * _PDC is required for a BIOS-OS handshake for most of the newer
251  * ACPI processor features.
252  */
253 static int acpi_processor_set_pdc(struct acpi_processor *pr)
254 {
255 	struct acpi_object_list *pdc_in = pr->pdc;
256 	acpi_status status = AE_OK;
257 
258 
259 	if (!pdc_in)
260 		return status;
261 
262 	status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
263 
264 	if (ACPI_FAILURE(status))
265 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
266 		    "Could not evaluate _PDC, using legacy perf. control...\n"));
267 
268 	return status;
269 }
270 
271 /* --------------------------------------------------------------------------
272                               FS Interface (/proc)
273    -------------------------------------------------------------------------- */
274 
275 static struct proc_dir_entry *acpi_processor_dir = NULL;
276 
277 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
278 {
279 	struct acpi_processor *pr = seq->private;
280 
281 
282 	if (!pr)
283 		goto end;
284 
285 	seq_printf(seq, "processor id:            %d\n"
286 		   "acpi id:                 %d\n"
287 		   "bus mastering control:   %s\n"
288 		   "power management:        %s\n"
289 		   "throttling control:      %s\n"
290 		   "limit interface:         %s\n",
291 		   pr->id,
292 		   pr->acpi_id,
293 		   pr->flags.bm_control ? "yes" : "no",
294 		   pr->flags.power ? "yes" : "no",
295 		   pr->flags.throttling ? "yes" : "no",
296 		   pr->flags.limit ? "yes" : "no");
297 
298       end:
299 	return 0;
300 }
301 
302 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
303 {
304 	return single_open(file, acpi_processor_info_seq_show,
305 			   PDE(inode)->data);
306 }
307 
308 static int acpi_processor_add_fs(struct acpi_device *device)
309 {
310 	struct proc_dir_entry *entry = NULL;
311 
312 
313 	if (!acpi_device_dir(device)) {
314 		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
315 						     acpi_processor_dir);
316 		if (!acpi_device_dir(device))
317 			return -ENODEV;
318 	}
319 	acpi_device_dir(device)->owner = THIS_MODULE;
320 
321 	/* 'info' [R] */
322 	entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO,
323 				  S_IRUGO, acpi_device_dir(device));
324 	if (!entry)
325 		return -EIO;
326 	else {
327 		entry->proc_fops = &acpi_processor_info_fops;
328 		entry->data = acpi_driver_data(device);
329 		entry->owner = THIS_MODULE;
330 	}
331 
332 	/* 'throttling' [R/W] */
333 	entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
334 				  S_IFREG | S_IRUGO | S_IWUSR,
335 				  acpi_device_dir(device));
336 	if (!entry)
337 		return -EIO;
338 	else {
339 		entry->proc_fops = &acpi_processor_throttling_fops;
340 		entry->data = acpi_driver_data(device);
341 		entry->owner = THIS_MODULE;
342 	}
343 
344 	/* 'limit' [R/W] */
345 	entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
346 				  S_IFREG | S_IRUGO | S_IWUSR,
347 				  acpi_device_dir(device));
348 	if (!entry)
349 		return -EIO;
350 	else {
351 		entry->proc_fops = &acpi_processor_limit_fops;
352 		entry->data = acpi_driver_data(device);
353 		entry->owner = THIS_MODULE;
354 	}
355 
356 	return 0;
357 }
358 
359 static int acpi_processor_remove_fs(struct acpi_device *device)
360 {
361 
362 	if (acpi_device_dir(device)) {
363 		remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
364 				  acpi_device_dir(device));
365 		remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
366 				  acpi_device_dir(device));
367 		remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
368 				  acpi_device_dir(device));
369 		remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
370 		acpi_device_dir(device) = NULL;
371 	}
372 
373 	return 0;
374 }
375 
376 /* Use the acpiid in MADT to map cpus in case of SMP */
377 
378 #ifndef CONFIG_SMP
379 static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;}
380 #else
381 
382 static struct acpi_table_madt *madt;
383 
384 static int map_lapic_id(struct acpi_subtable_header *entry,
385 		 u32 acpi_id, int *apic_id)
386 {
387 	struct acpi_madt_local_apic *lapic =
388 		(struct acpi_madt_local_apic *)entry;
389 	if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
390 	    lapic->processor_id == acpi_id) {
391 		*apic_id = lapic->id;
392 		return 1;
393 	}
394 	return 0;
395 }
396 
397 static int map_lsapic_id(struct acpi_subtable_header *entry,
398 		  u32 acpi_id, int *apic_id)
399 {
400 	struct acpi_madt_local_sapic *lsapic =
401 		(struct acpi_madt_local_sapic *)entry;
402 	/* Only check enabled APICs*/
403 	if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
404 		/* First check against id */
405 		if (lsapic->processor_id == acpi_id) {
406 			*apic_id = (lsapic->id << 8) | lsapic->eid;
407 			return 1;
408 		/* Check against optional uid */
409 		} else if (entry->length >= 16 &&
410 			lsapic->uid == acpi_id) {
411 			*apic_id = lsapic->uid;
412 			return 1;
413 		}
414 	}
415 	return 0;
416 }
417 
418 #ifdef CONFIG_IA64
419 #define arch_cpu_to_apicid 	ia64_cpu_to_sapicid
420 #else
421 #define arch_cpu_to_apicid 	x86_cpu_to_apicid
422 #endif
423 
424 static int map_madt_entry(u32 acpi_id)
425 {
426 	unsigned long madt_end, entry;
427 	int apic_id = -1;
428 
429 	if (!madt)
430 		return apic_id;
431 
432 	entry = (unsigned long)madt;
433 	madt_end = entry + madt->header.length;
434 
435 	/* Parse all entries looking for a match. */
436 
437 	entry += sizeof(struct acpi_table_madt);
438 	while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
439 		struct acpi_subtable_header *header =
440 			(struct acpi_subtable_header *)entry;
441 		if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
442 			if (map_lapic_id(header, acpi_id, &apic_id))
443 				break;
444 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
445 			if (map_lsapic_id(header, acpi_id, &apic_id))
446 				break;
447 		}
448 		entry += header->length;
449 	}
450 	return apic_id;
451 }
452 
453 static int map_mat_entry(acpi_handle handle, u32 acpi_id)
454 {
455 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
456 	union acpi_object *obj;
457 	struct acpi_subtable_header *header;
458 	int apic_id = -1;
459 
460 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
461 		goto exit;
462 
463 	if (!buffer.length || !buffer.pointer)
464 		goto exit;
465 
466 	obj = buffer.pointer;
467 	if (obj->type != ACPI_TYPE_BUFFER ||
468 	    obj->buffer.length < sizeof(struct acpi_subtable_header)) {
469 		goto exit;
470 	}
471 
472 	header = (struct acpi_subtable_header *)obj->buffer.pointer;
473 	if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
474 		map_lapic_id(header, acpi_id, &apic_id);
475 	} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
476 		map_lsapic_id(header, acpi_id, &apic_id);
477 	}
478 
479 exit:
480 	if (buffer.pointer)
481 		kfree(buffer.pointer);
482 	return apic_id;
483 }
484 
485 static int get_cpu_id(acpi_handle handle, u32 acpi_id)
486 {
487 	int i;
488 	int apic_id = -1;
489 
490 	apic_id = map_mat_entry(handle, acpi_id);
491 	if (apic_id == -1)
492 		apic_id = map_madt_entry(acpi_id);
493 	if (apic_id == -1)
494 		return apic_id;
495 
496 	for (i = 0; i < NR_CPUS; ++i) {
497 		if (arch_cpu_to_apicid[i] == apic_id)
498 			return i;
499 	}
500 	return -1;
501 }
502 #endif
503 
504 /* --------------------------------------------------------------------------
505                                  Driver Interface
506    -------------------------------------------------------------------------- */
507 
508 static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
509 {
510 	acpi_status status = 0;
511 	union acpi_object object = { 0 };
512 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
513 	int cpu_index;
514 	static int cpu0_initialized;
515 
516 
517 	if (!pr)
518 		return -EINVAL;
519 
520 	if (num_online_cpus() > 1)
521 		errata.smp = TRUE;
522 
523 	acpi_processor_errata(pr);
524 
525 	/*
526 	 * Check to see if we have bus mastering arbitration control.  This
527 	 * is required for proper C3 usage (to maintain cache coherency).
528 	 */
529 	if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
530 		pr->flags.bm_control = 1;
531 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
532 				  "Bus mastering arbitration control present\n"));
533 	} else
534 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
535 				  "No bus mastering arbitration control\n"));
536 
537 	/* Check if it is a Device with HID and UID */
538 	if (has_uid) {
539 		unsigned long value;
540 		status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
541 						NULL, &value);
542 		if (ACPI_FAILURE(status)) {
543 			printk(KERN_ERR PREFIX "Evaluating processor _UID\n");
544 			return -ENODEV;
545 		}
546 		pr->acpi_id = value;
547 	} else {
548 		/*
549 		* Evalute the processor object.  Note that it is common on SMP to
550 		* have the first (boot) processor with a valid PBLK address while
551 		* all others have a NULL address.
552 		*/
553 		status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
554 		if (ACPI_FAILURE(status)) {
555 			printk(KERN_ERR PREFIX "Evaluating processor object\n");
556 			return -ENODEV;
557 		}
558 
559 		/*
560 		* TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
561 		*      >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
562 		*/
563 		pr->acpi_id = object.processor.proc_id;
564 	}
565 	cpu_index = get_cpu_id(pr->handle, pr->acpi_id);
566 
567 	/* Handle UP system running SMP kernel, with no LAPIC in MADT */
568 	if (!cpu0_initialized && (cpu_index == -1) &&
569 	    (num_online_cpus() == 1)) {
570 		cpu_index = 0;
571 	}
572 
573 	cpu0_initialized = 1;
574 
575 	pr->id = cpu_index;
576 
577 	/*
578 	 *  Extra Processor objects may be enumerated on MP systems with
579 	 *  less than the max # of CPUs. They should be ignored _iff
580 	 *  they are physically not present.
581 	 */
582 	if (pr->id == -1) {
583 		if (ACPI_FAILURE
584 		    (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
585 			return -ENODEV;
586 		}
587 	}
588 
589 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
590 			  pr->acpi_id));
591 
592 	if (!object.processor.pblk_address)
593 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
594 	else if (object.processor.pblk_length != 6)
595 		printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
596 			    object.processor.pblk_length);
597 	else {
598 		pr->throttling.address = object.processor.pblk_address;
599 		pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
600 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
601 
602 		pr->pblk = object.processor.pblk_address;
603 
604 		/*
605 		 * We don't care about error returns - we just try to mark
606 		 * these reserved so that nobody else is confused into thinking
607 		 * that this region might be unused..
608 		 *
609 		 * (In particular, allocating the IO range for Cardbus)
610 		 */
611 		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
612 	}
613 
614 #ifdef CONFIG_CPU_FREQ
615 	acpi_processor_ppc_has_changed(pr);
616 #endif
617 	acpi_processor_get_throttling_info(pr);
618 	acpi_processor_get_limit_info(pr);
619 
620 	return 0;
621 }
622 
623 static void *processor_device_array[NR_CPUS];
624 
625 static int __cpuinit acpi_processor_start(struct acpi_device *device)
626 {
627 	int result = 0;
628 	acpi_status status = AE_OK;
629 	struct acpi_processor *pr;
630 
631 
632 	pr = acpi_driver_data(device);
633 
634 	result = acpi_processor_get_info(pr, device->flags.unique_id);
635 	if (result) {
636 		/* Processor is physically not present */
637 		return 0;
638 	}
639 
640 	BUG_ON((pr->id >= NR_CPUS) || (pr->id < 0));
641 
642 	/*
643 	 * Buggy BIOS check
644 	 * ACPI id of processors can be reported wrongly by the BIOS.
645 	 * Don't trust it blindly
646 	 */
647 	if (processor_device_array[pr->id] != NULL &&
648 	    processor_device_array[pr->id] != device) {
649 		printk(KERN_WARNING "BIOS reported wrong ACPI id"
650 			"for the processor\n");
651 		return -ENODEV;
652 	}
653 	processor_device_array[pr->id] = device;
654 
655 	processors[pr->id] = pr;
656 
657 	result = acpi_processor_add_fs(device);
658 	if (result)
659 		goto end;
660 
661 	status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
662 					     acpi_processor_notify, pr);
663 
664 	/* _PDC call should be done before doing anything else (if reqd.). */
665 	arch_acpi_processor_init_pdc(pr);
666 	acpi_processor_set_pdc(pr);
667 
668 	acpi_processor_power_init(pr, device);
669 
670 	if (pr->flags.throttling) {
671 		printk(KERN_INFO PREFIX "%s [%s] (supports",
672 		       acpi_device_name(device), acpi_device_bid(device));
673 		printk(" %d throttling states", pr->throttling.state_count);
674 		printk(")\n");
675 	}
676 
677       end:
678 
679 	return result;
680 }
681 
682 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
683 {
684 	struct acpi_processor *pr = data;
685 	struct acpi_device *device = NULL;
686 
687 
688 	if (!pr)
689 		return;
690 
691 	if (acpi_bus_get_device(pr->handle, &device))
692 		return;
693 
694 	switch (event) {
695 	case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
696 		acpi_processor_ppc_has_changed(pr);
697 		acpi_bus_generate_event(device, event,
698 					pr->performance_platform_limit);
699 		break;
700 	case ACPI_PROCESSOR_NOTIFY_POWER:
701 		acpi_processor_cst_has_changed(pr);
702 		acpi_bus_generate_event(device, event, 0);
703 		break;
704 	default:
705 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
706 				  "Unsupported event [0x%x]\n", event));
707 		break;
708 	}
709 
710 	return;
711 }
712 
713 static int acpi_processor_add(struct acpi_device *device)
714 {
715 	struct acpi_processor *pr = NULL;
716 
717 
718 	if (!device)
719 		return -EINVAL;
720 
721 	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
722 	if (!pr)
723 		return -ENOMEM;
724 
725 	pr->handle = device->handle;
726 	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
727 	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
728 	acpi_driver_data(device) = pr;
729 
730 	return 0;
731 }
732 
733 static int acpi_processor_remove(struct acpi_device *device, int type)
734 {
735 	acpi_status status = AE_OK;
736 	struct acpi_processor *pr = NULL;
737 
738 
739 	if (!device || !acpi_driver_data(device))
740 		return -EINVAL;
741 
742 	pr = acpi_driver_data(device);
743 
744 	if (pr->id >= NR_CPUS) {
745 		kfree(pr);
746 		return 0;
747 	}
748 
749 	if (type == ACPI_BUS_REMOVAL_EJECT) {
750 		if (acpi_processor_handle_eject(pr))
751 			return -EINVAL;
752 	}
753 
754 	acpi_processor_power_exit(pr, device);
755 
756 	status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
757 					    acpi_processor_notify);
758 
759 	acpi_processor_remove_fs(device);
760 
761 	processors[pr->id] = NULL;
762 
763 	kfree(pr);
764 
765 	return 0;
766 }
767 
768 #ifdef CONFIG_ACPI_HOTPLUG_CPU
769 /****************************************************************************
770  * 	Acpi processor hotplug support 				       	    *
771  ****************************************************************************/
772 
773 static int is_processor_present(acpi_handle handle);
774 
775 static int is_processor_present(acpi_handle handle)
776 {
777 	acpi_status status;
778 	unsigned long sta = 0;
779 
780 
781 	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
782 	if (ACPI_FAILURE(status) || !(sta & ACPI_STA_PRESENT)) {
783 		ACPI_EXCEPTION((AE_INFO, status, "Processor Device is not present"));
784 		return 0;
785 	}
786 	return 1;
787 }
788 
789 static
790 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
791 {
792 	acpi_handle phandle;
793 	struct acpi_device *pdev;
794 	struct acpi_processor *pr;
795 
796 
797 	if (acpi_get_parent(handle, &phandle)) {
798 		return -ENODEV;
799 	}
800 
801 	if (acpi_bus_get_device(phandle, &pdev)) {
802 		return -ENODEV;
803 	}
804 
805 	if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
806 		return -ENODEV;
807 	}
808 
809 	acpi_bus_start(*device);
810 
811 	pr = acpi_driver_data(*device);
812 	if (!pr)
813 		return -ENODEV;
814 
815 	if ((pr->id >= 0) && (pr->id < NR_CPUS)) {
816 		kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
817 	}
818 	return 0;
819 }
820 
821 static void
822 acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data)
823 {
824 	struct acpi_processor *pr;
825 	struct acpi_device *device = NULL;
826 	int result;
827 
828 
829 	switch (event) {
830 	case ACPI_NOTIFY_BUS_CHECK:
831 	case ACPI_NOTIFY_DEVICE_CHECK:
832 		printk("Processor driver received %s event\n",
833 		       (event == ACPI_NOTIFY_BUS_CHECK) ?
834 		       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
835 
836 		if (!is_processor_present(handle))
837 			break;
838 
839 		if (acpi_bus_get_device(handle, &device)) {
840 			result = acpi_processor_device_add(handle, &device);
841 			if (result)
842 				printk(KERN_ERR PREFIX
843 					    "Unable to add the device\n");
844 			break;
845 		}
846 
847 		pr = acpi_driver_data(device);
848 		if (!pr) {
849 			printk(KERN_ERR PREFIX "Driver data is NULL\n");
850 			break;
851 		}
852 
853 		if (pr->id >= 0 && (pr->id < NR_CPUS)) {
854 			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
855 			break;
856 		}
857 
858 		result = acpi_processor_start(device);
859 		if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) {
860 			kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
861 		} else {
862 			printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
863 				    acpi_device_bid(device));
864 		}
865 		break;
866 	case ACPI_NOTIFY_EJECT_REQUEST:
867 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
868 				  "received ACPI_NOTIFY_EJECT_REQUEST\n"));
869 
870 		if (acpi_bus_get_device(handle, &device)) {
871 			printk(KERN_ERR PREFIX
872 				    "Device don't exist, dropping EJECT\n");
873 			break;
874 		}
875 		pr = acpi_driver_data(device);
876 		if (!pr) {
877 			printk(KERN_ERR PREFIX
878 				    "Driver data is NULL, dropping EJECT\n");
879 			return;
880 		}
881 
882 		if ((pr->id < NR_CPUS) && (cpu_present(pr->id)))
883 			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
884 		break;
885 	default:
886 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
887 				  "Unsupported event [0x%x]\n", event));
888 		break;
889 	}
890 
891 	return;
892 }
893 
894 static acpi_status
895 processor_walk_namespace_cb(acpi_handle handle,
896 			    u32 lvl, void *context, void **rv)
897 {
898 	acpi_status status;
899 	int *action = context;
900 	acpi_object_type type = 0;
901 
902 	status = acpi_get_type(handle, &type);
903 	if (ACPI_FAILURE(status))
904 		return (AE_OK);
905 
906 	if (type != ACPI_TYPE_PROCESSOR)
907 		return (AE_OK);
908 
909 	switch (*action) {
910 	case INSTALL_NOTIFY_HANDLER:
911 		acpi_install_notify_handler(handle,
912 					    ACPI_SYSTEM_NOTIFY,
913 					    acpi_processor_hotplug_notify,
914 					    NULL);
915 		break;
916 	case UNINSTALL_NOTIFY_HANDLER:
917 		acpi_remove_notify_handler(handle,
918 					   ACPI_SYSTEM_NOTIFY,
919 					   acpi_processor_hotplug_notify);
920 		break;
921 	default:
922 		break;
923 	}
924 
925 	return (AE_OK);
926 }
927 
928 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
929 {
930 
931 	if (!is_processor_present(handle)) {
932 		return AE_ERROR;
933 	}
934 
935 	if (acpi_map_lsapic(handle, p_cpu))
936 		return AE_ERROR;
937 
938 	if (arch_register_cpu(*p_cpu)) {
939 		acpi_unmap_lsapic(*p_cpu);
940 		return AE_ERROR;
941 	}
942 
943 	return AE_OK;
944 }
945 
946 static int acpi_processor_handle_eject(struct acpi_processor *pr)
947 {
948 	if (cpu_online(pr->id)) {
949 		return (-EINVAL);
950 	}
951 	arch_unregister_cpu(pr->id);
952 	acpi_unmap_lsapic(pr->id);
953 	return (0);
954 }
955 #else
956 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
957 {
958 	return AE_ERROR;
959 }
960 static int acpi_processor_handle_eject(struct acpi_processor *pr)
961 {
962 	return (-EINVAL);
963 }
964 #endif
965 
966 static
967 void acpi_processor_install_hotplug_notify(void)
968 {
969 #ifdef CONFIG_ACPI_HOTPLUG_CPU
970 	int action = INSTALL_NOTIFY_HANDLER;
971 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
972 			    ACPI_ROOT_OBJECT,
973 			    ACPI_UINT32_MAX,
974 			    processor_walk_namespace_cb, &action, NULL);
975 #endif
976 }
977 
978 static
979 void acpi_processor_uninstall_hotplug_notify(void)
980 {
981 #ifdef CONFIG_ACPI_HOTPLUG_CPU
982 	int action = UNINSTALL_NOTIFY_HANDLER;
983 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
984 			    ACPI_ROOT_OBJECT,
985 			    ACPI_UINT32_MAX,
986 			    processor_walk_namespace_cb, &action, NULL);
987 #endif
988 }
989 
990 /*
991  * We keep the driver loaded even when ACPI is not running.
992  * This is needed for the powernow-k8 driver, that works even without
993  * ACPI, but needs symbols from this driver
994  */
995 
996 static int __init acpi_processor_init(void)
997 {
998 	int result = 0;
999 
1000 
1001 	memset(&processors, 0, sizeof(processors));
1002 	memset(&errata, 0, sizeof(errata));
1003 
1004 #ifdef CONFIG_SMP
1005 	if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1006 				(struct acpi_table_header **)&madt)))
1007 		madt = NULL;
1008 #endif
1009 
1010 	acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1011 	if (!acpi_processor_dir)
1012 		return -ENOMEM;
1013 	acpi_processor_dir->owner = THIS_MODULE;
1014 
1015 	result = acpi_bus_register_driver(&acpi_processor_driver);
1016 	if (result < 0) {
1017 		remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1018 		return result;
1019 	}
1020 
1021 	acpi_processor_install_hotplug_notify();
1022 
1023 	acpi_thermal_cpufreq_init();
1024 
1025 	acpi_processor_ppc_init();
1026 
1027 	return 0;
1028 }
1029 
1030 static void __exit acpi_processor_exit(void)
1031 {
1032 
1033 	acpi_processor_ppc_exit();
1034 
1035 	acpi_thermal_cpufreq_exit();
1036 
1037 	acpi_processor_uninstall_hotplug_notify();
1038 
1039 	acpi_bus_unregister_driver(&acpi_processor_driver);
1040 
1041 	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1042 
1043 	return;
1044 }
1045 
1046 module_init(acpi_processor_init);
1047 module_exit(acpi_processor_exit);
1048 
1049 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1050 
1051 MODULE_ALIAS("processor");
1052