xref: /linux/drivers/acpi/processor_core.c (revision b454cc6636d254fbf6049b73e9560aee76fb04a3)
1 /*
2  * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *  			- Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  *  TBD:
28  *	1. Make # power states dynamic.
29  *	2. Support duty_cycle values that span bit 4.
30  *	3. Optimize by having scheduler determine business instead of
31  *	   having us try to calculate it here.
32  *	4. Need C1 timing -- must modify kernel (IRQ handler) to get this.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/pci.h>
40 #include <linux/pm.h>
41 #include <linux/cpufreq.h>
42 #include <linux/cpu.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/dmi.h>
46 #include <linux/moduleparam.h>
47 
48 #include <asm/io.h>
49 #include <asm/system.h>
50 #include <asm/cpu.h>
51 #include <asm/delay.h>
52 #include <asm/uaccess.h>
53 #include <asm/processor.h>
54 #include <asm/smp.h>
55 #include <asm/acpi.h>
56 
57 #include <acpi/acpi_bus.h>
58 #include <acpi/acpi_drivers.h>
59 #include <acpi/processor.h>
60 
61 #define ACPI_PROCESSOR_COMPONENT	0x01000000
62 #define ACPI_PROCESSOR_CLASS		"processor"
63 #define ACPI_PROCESSOR_DRIVER_NAME	"ACPI Processor Driver"
64 #define ACPI_PROCESSOR_DEVICE_NAME	"Processor"
65 #define ACPI_PROCESSOR_FILE_INFO	"info"
66 #define ACPI_PROCESSOR_FILE_THROTTLING	"throttling"
67 #define ACPI_PROCESSOR_FILE_LIMIT	"limit"
68 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
69 #define ACPI_PROCESSOR_NOTIFY_POWER	0x81
70 
71 #define ACPI_PROCESSOR_LIMIT_USER	0
72 #define ACPI_PROCESSOR_LIMIT_THERMAL	1
73 
74 #define ACPI_STA_PRESENT 0x00000001
75 
76 #define _COMPONENT		ACPI_PROCESSOR_COMPONENT
77 ACPI_MODULE_NAME("acpi_processor")
78 
79     MODULE_AUTHOR("Paul Diefenbaugh");
80 MODULE_DESCRIPTION(ACPI_PROCESSOR_DRIVER_NAME);
81 MODULE_LICENSE("GPL");
82 
83 static int acpi_processor_add(struct acpi_device *device);
84 static int acpi_processor_start(struct acpi_device *device);
85 static int acpi_processor_remove(struct acpi_device *device, int type);
86 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file);
87 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data);
88 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
89 static int acpi_processor_handle_eject(struct acpi_processor *pr);
90 
91 static struct acpi_driver acpi_processor_driver = {
92 	.name = ACPI_PROCESSOR_DRIVER_NAME,
93 	.class = ACPI_PROCESSOR_CLASS,
94 	.ids = ACPI_PROCESSOR_HID,
95 	.ops = {
96 		.add = acpi_processor_add,
97 		.remove = acpi_processor_remove,
98 		.start = acpi_processor_start,
99 		},
100 };
101 
102 #define INSTALL_NOTIFY_HANDLER		1
103 #define UNINSTALL_NOTIFY_HANDLER	2
104 
105 static const struct file_operations acpi_processor_info_fops = {
106 	.open = acpi_processor_info_open_fs,
107 	.read = seq_read,
108 	.llseek = seq_lseek,
109 	.release = single_release,
110 };
111 
112 struct acpi_processor *processors[NR_CPUS];
113 struct acpi_processor_errata errata __read_mostly;
114 
115 /* --------------------------------------------------------------------------
116                                 Errata Handling
117    -------------------------------------------------------------------------- */
118 
119 static int acpi_processor_errata_piix4(struct pci_dev *dev)
120 {
121 	u8 rev = 0;
122 	u8 value1 = 0;
123 	u8 value2 = 0;
124 
125 
126 	if (!dev)
127 		return -EINVAL;
128 
129 	/*
130 	 * Note that 'dev' references the PIIX4 ACPI Controller.
131 	 */
132 
133 	pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
134 
135 	switch (rev) {
136 	case 0:
137 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
138 		break;
139 	case 1:
140 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
141 		break;
142 	case 2:
143 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
144 		break;
145 	case 3:
146 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
147 		break;
148 	default:
149 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
150 		break;
151 	}
152 
153 	switch (rev) {
154 
155 	case 0:		/* PIIX4 A-step */
156 	case 1:		/* PIIX4 B-step */
157 		/*
158 		 * See specification changes #13 ("Manual Throttle Duty Cycle")
159 		 * and #14 ("Enabling and Disabling Manual Throttle"), plus
160 		 * erratum #5 ("STPCLK# Deassertion Time") from the January
161 		 * 2002 PIIX4 specification update.  Applies to only older
162 		 * PIIX4 models.
163 		 */
164 		errata.piix4.throttle = 1;
165 
166 	case 2:		/* PIIX4E */
167 	case 3:		/* PIIX4M */
168 		/*
169 		 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
170 		 * Livelock") from the January 2002 PIIX4 specification update.
171 		 * Applies to all PIIX4 models.
172 		 */
173 
174 		/*
175 		 * BM-IDE
176 		 * ------
177 		 * Find the PIIX4 IDE Controller and get the Bus Master IDE
178 		 * Status register address.  We'll use this later to read
179 		 * each IDE controller's DMA status to make sure we catch all
180 		 * DMA activity.
181 		 */
182 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
183 				     PCI_DEVICE_ID_INTEL_82371AB,
184 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
185 		if (dev) {
186 			errata.piix4.bmisx = pci_resource_start(dev, 4);
187 			pci_dev_put(dev);
188 		}
189 
190 		/*
191 		 * Type-F DMA
192 		 * ----------
193 		 * Find the PIIX4 ISA Controller and read the Motherboard
194 		 * DMA controller's status to see if Type-F (Fast) DMA mode
195 		 * is enabled (bit 7) on either channel.  Note that we'll
196 		 * disable C3 support if this is enabled, as some legacy
197 		 * devices won't operate well if fast DMA is disabled.
198 		 */
199 		dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
200 				     PCI_DEVICE_ID_INTEL_82371AB_0,
201 				     PCI_ANY_ID, PCI_ANY_ID, NULL);
202 		if (dev) {
203 			pci_read_config_byte(dev, 0x76, &value1);
204 			pci_read_config_byte(dev, 0x77, &value2);
205 			if ((value1 & 0x80) || (value2 & 0x80))
206 				errata.piix4.fdma = 1;
207 			pci_dev_put(dev);
208 		}
209 
210 		break;
211 	}
212 
213 	if (errata.piix4.bmisx)
214 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
215 				  "Bus master activity detection (BM-IDE) erratum enabled\n"));
216 	if (errata.piix4.fdma)
217 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
218 				  "Type-F DMA livelock erratum (C3 disabled)\n"));
219 
220 	return 0;
221 }
222 
223 static int acpi_processor_errata(struct acpi_processor *pr)
224 {
225 	int result = 0;
226 	struct pci_dev *dev = NULL;
227 
228 
229 	if (!pr)
230 		return -EINVAL;
231 
232 	/*
233 	 * PIIX4
234 	 */
235 	dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
236 			     PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
237 			     PCI_ANY_ID, NULL);
238 	if (dev) {
239 		result = acpi_processor_errata_piix4(dev);
240 		pci_dev_put(dev);
241 	}
242 
243 	return result;
244 }
245 
246 /* --------------------------------------------------------------------------
247                               Common ACPI processor functions
248    -------------------------------------------------------------------------- */
249 
250 /*
251  * _PDC is required for a BIOS-OS handshake for most of the newer
252  * ACPI processor features.
253  */
254 static int acpi_processor_set_pdc(struct acpi_processor *pr)
255 {
256 	struct acpi_object_list *pdc_in = pr->pdc;
257 	acpi_status status = AE_OK;
258 
259 
260 	if (!pdc_in)
261 		return status;
262 
263 	status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL);
264 
265 	if (ACPI_FAILURE(status))
266 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
267 		    "Could not evaluate _PDC, using legacy perf. control...\n"));
268 
269 	return status;
270 }
271 
272 /* --------------------------------------------------------------------------
273                               FS Interface (/proc)
274    -------------------------------------------------------------------------- */
275 
276 static struct proc_dir_entry *acpi_processor_dir = NULL;
277 
278 static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset)
279 {
280 	struct acpi_processor *pr = seq->private;
281 
282 
283 	if (!pr)
284 		goto end;
285 
286 	seq_printf(seq, "processor id:            %d\n"
287 		   "acpi id:                 %d\n"
288 		   "bus mastering control:   %s\n"
289 		   "power management:        %s\n"
290 		   "throttling control:      %s\n"
291 		   "limit interface:         %s\n",
292 		   pr->id,
293 		   pr->acpi_id,
294 		   pr->flags.bm_control ? "yes" : "no",
295 		   pr->flags.power ? "yes" : "no",
296 		   pr->flags.throttling ? "yes" : "no",
297 		   pr->flags.limit ? "yes" : "no");
298 
299       end:
300 	return 0;
301 }
302 
303 static int acpi_processor_info_open_fs(struct inode *inode, struct file *file)
304 {
305 	return single_open(file, acpi_processor_info_seq_show,
306 			   PDE(inode)->data);
307 }
308 
309 static int acpi_processor_add_fs(struct acpi_device *device)
310 {
311 	struct proc_dir_entry *entry = NULL;
312 
313 
314 	if (!acpi_device_dir(device)) {
315 		acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
316 						     acpi_processor_dir);
317 		if (!acpi_device_dir(device))
318 			return -ENODEV;
319 	}
320 	acpi_device_dir(device)->owner = THIS_MODULE;
321 
322 	/* 'info' [R] */
323 	entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO,
324 				  S_IRUGO, acpi_device_dir(device));
325 	if (!entry)
326 		return -EIO;
327 	else {
328 		entry->proc_fops = &acpi_processor_info_fops;
329 		entry->data = acpi_driver_data(device);
330 		entry->owner = THIS_MODULE;
331 	}
332 
333 	/* 'throttling' [R/W] */
334 	entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
335 				  S_IFREG | S_IRUGO | S_IWUSR,
336 				  acpi_device_dir(device));
337 	if (!entry)
338 		return -EIO;
339 	else {
340 		entry->proc_fops = &acpi_processor_throttling_fops;
341 		entry->data = acpi_driver_data(device);
342 		entry->owner = THIS_MODULE;
343 	}
344 
345 	/* 'limit' [R/W] */
346 	entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
347 				  S_IFREG | S_IRUGO | S_IWUSR,
348 				  acpi_device_dir(device));
349 	if (!entry)
350 		return -EIO;
351 	else {
352 		entry->proc_fops = &acpi_processor_limit_fops;
353 		entry->data = acpi_driver_data(device);
354 		entry->owner = THIS_MODULE;
355 	}
356 
357 	return 0;
358 }
359 
360 static int acpi_processor_remove_fs(struct acpi_device *device)
361 {
362 
363 	if (acpi_device_dir(device)) {
364 		remove_proc_entry(ACPI_PROCESSOR_FILE_INFO,
365 				  acpi_device_dir(device));
366 		remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
367 				  acpi_device_dir(device));
368 		remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
369 				  acpi_device_dir(device));
370 		remove_proc_entry(acpi_device_bid(device), acpi_processor_dir);
371 		acpi_device_dir(device) = NULL;
372 	}
373 
374 	return 0;
375 }
376 
377 /* Use the acpiid in MADT to map cpus in case of SMP */
378 
379 #ifndef CONFIG_SMP
380 static int get_cpu_id(acpi_handle handle, u32 acpi_id) {return -1;}
381 #else
382 
383 static struct acpi_table_madt *madt;
384 
385 static int map_lapic_id(struct acpi_subtable_header *entry,
386 		 u32 acpi_id, int *apic_id)
387 {
388 	struct acpi_madt_local_apic *lapic =
389 		(struct acpi_madt_local_apic *)entry;
390 	if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
391 	    lapic->processor_id == acpi_id) {
392 		*apic_id = lapic->id;
393 		return 1;
394 	}
395 	return 0;
396 }
397 
398 static int map_lsapic_id(struct acpi_subtable_header *entry,
399 		  u32 acpi_id, int *apic_id)
400 {
401 	struct acpi_madt_local_sapic *lsapic =
402 		(struct acpi_madt_local_sapic *)entry;
403 	/* Only check enabled APICs*/
404 	if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
405 		/* First check against id */
406 		if (lsapic->processor_id == acpi_id) {
407 			*apic_id = lsapic->id;
408 			return 1;
409 		/* Check against optional uid */
410 		} else if (entry->length >= 16 &&
411 			lsapic->uid == acpi_id) {
412 			*apic_id = lsapic->uid;
413 			return 1;
414 		}
415 	}
416 	return 0;
417 }
418 
419 #ifdef CONFIG_IA64
420 #define arch_cpu_to_apicid 	ia64_cpu_to_sapicid
421 #else
422 #define arch_cpu_to_apicid 	x86_cpu_to_apicid
423 #endif
424 
425 static int map_madt_entry(u32 acpi_id)
426 {
427 	unsigned long madt_end, entry;
428 	int apic_id = -1;
429 
430 	if (!madt)
431 		return apic_id;
432 
433 	entry = (unsigned long)madt;
434 	madt_end = entry + madt->header.length;
435 
436 	/* Parse all entries looking for a match. */
437 
438 	entry += sizeof(struct acpi_table_madt);
439 	while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
440 		struct acpi_subtable_header *header =
441 			(struct acpi_subtable_header *)entry;
442 		if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
443 			if (map_lapic_id(header, acpi_id, &apic_id))
444 				break;
445 		} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
446 			if (map_lsapic_id(header, acpi_id, &apic_id))
447 				break;
448 		}
449 		entry += header->length;
450 	}
451 	return apic_id;
452 }
453 
454 static int map_mat_entry(acpi_handle handle, u32 acpi_id)
455 {
456 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
457 	union acpi_object *obj;
458 	struct acpi_subtable_header *header;
459 	int apic_id = -1;
460 
461 	if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
462 		goto exit;
463 
464 	if (!buffer.length || !buffer.pointer)
465 		goto exit;
466 
467 	obj = buffer.pointer;
468 	if (obj->type != ACPI_TYPE_BUFFER ||
469 	    obj->buffer.length < sizeof(struct acpi_subtable_header)) {
470 		goto exit;
471 	}
472 
473 	header = (struct acpi_subtable_header *)obj->buffer.pointer;
474 	if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
475 		map_lapic_id(header, acpi_id, &apic_id);
476 	} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
477 		map_lsapic_id(header, acpi_id, &apic_id);
478 	}
479 
480 exit:
481 	if (buffer.pointer)
482 		kfree(buffer.pointer);
483 	return apic_id;
484 }
485 
486 static int get_cpu_id(acpi_handle handle, u32 acpi_id)
487 {
488 	int i;
489 	int apic_id = -1;
490 
491 	apic_id = map_mat_entry(handle, acpi_id);
492 	if (apic_id == -1)
493 		apic_id = map_madt_entry(acpi_id);
494 	if (apic_id == -1)
495 		return apic_id;
496 
497 	for (i = 0; i < NR_CPUS; ++i) {
498 		if (arch_cpu_to_apicid[i] == apic_id)
499 			return i;
500 	}
501 	return -1;
502 }
503 #endif
504 
505 /* --------------------------------------------------------------------------
506                                  Driver Interface
507    -------------------------------------------------------------------------- */
508 
509 static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
510 {
511 	acpi_status status = 0;
512 	union acpi_object object = { 0 };
513 	struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
514 	int cpu_index;
515 	static int cpu0_initialized;
516 
517 
518 	if (!pr)
519 		return -EINVAL;
520 
521 	if (num_online_cpus() > 1)
522 		errata.smp = TRUE;
523 
524 	acpi_processor_errata(pr);
525 
526 	/*
527 	 * Check to see if we have bus mastering arbitration control.  This
528 	 * is required for proper C3 usage (to maintain cache coherency).
529 	 */
530 	if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
531 		pr->flags.bm_control = 1;
532 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
533 				  "Bus mastering arbitration control present\n"));
534 	} else
535 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
536 				  "No bus mastering arbitration control\n"));
537 
538 	/* Check if it is a Device with HID and UID */
539 	if (has_uid) {
540 		unsigned long value;
541 		status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
542 						NULL, &value);
543 		if (ACPI_FAILURE(status)) {
544 			printk(KERN_ERR PREFIX "Evaluating processor _UID\n");
545 			return -ENODEV;
546 		}
547 		pr->acpi_id = value;
548 	} else {
549 		/*
550 		* Evalute the processor object.  Note that it is common on SMP to
551 		* have the first (boot) processor with a valid PBLK address while
552 		* all others have a NULL address.
553 		*/
554 		status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
555 		if (ACPI_FAILURE(status)) {
556 			printk(KERN_ERR PREFIX "Evaluating processor object\n");
557 			return -ENODEV;
558 		}
559 
560 		/*
561 		* TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
562 		*      >>> 'acpi_get_processor_id(acpi_id, &id)' in arch/xxx/acpi.c
563 		*/
564 		pr->acpi_id = object.processor.proc_id;
565 	}
566 	cpu_index = get_cpu_id(pr->handle, pr->acpi_id);
567 
568 	/* Handle UP system running SMP kernel, with no LAPIC in MADT */
569 	if (!cpu0_initialized && (cpu_index == -1) &&
570 	    (num_online_cpus() == 1)) {
571 		cpu_index = 0;
572 	}
573 
574 	cpu0_initialized = 1;
575 
576 	pr->id = cpu_index;
577 
578 	/*
579 	 *  Extra Processor objects may be enumerated on MP systems with
580 	 *  less than the max # of CPUs. They should be ignored _iff
581 	 *  they are physically not present.
582 	 */
583 	if (pr->id == -1) {
584 		if (ACPI_FAILURE
585 		    (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
586 			return -ENODEV;
587 		}
588 	}
589 
590 	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
591 			  pr->acpi_id));
592 
593 	if (!object.processor.pblk_address)
594 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
595 	else if (object.processor.pblk_length != 6)
596 		printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n",
597 			    object.processor.pblk_length);
598 	else {
599 		pr->throttling.address = object.processor.pblk_address;
600 		pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
601 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
602 
603 		pr->pblk = object.processor.pblk_address;
604 
605 		/*
606 		 * We don't care about error returns - we just try to mark
607 		 * these reserved so that nobody else is confused into thinking
608 		 * that this region might be unused..
609 		 *
610 		 * (In particular, allocating the IO range for Cardbus)
611 		 */
612 		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
613 	}
614 
615 #ifdef CONFIG_CPU_FREQ
616 	acpi_processor_ppc_has_changed(pr);
617 #endif
618 	acpi_processor_get_throttling_info(pr);
619 	acpi_processor_get_limit_info(pr);
620 
621 	return 0;
622 }
623 
624 static void *processor_device_array[NR_CPUS];
625 
626 static int __cpuinit acpi_processor_start(struct acpi_device *device)
627 {
628 	int result = 0;
629 	acpi_status status = AE_OK;
630 	struct acpi_processor *pr;
631 
632 
633 	pr = acpi_driver_data(device);
634 
635 	result = acpi_processor_get_info(pr, device->flags.unique_id);
636 	if (result) {
637 		/* Processor is physically not present */
638 		return 0;
639 	}
640 
641 	BUG_ON((pr->id >= NR_CPUS) || (pr->id < 0));
642 
643 	/*
644 	 * Buggy BIOS check
645 	 * ACPI id of processors can be reported wrongly by the BIOS.
646 	 * Don't trust it blindly
647 	 */
648 	if (processor_device_array[pr->id] != NULL &&
649 	    processor_device_array[pr->id] != device) {
650 		printk(KERN_WARNING "BIOS reported wrong ACPI id"
651 			"for the processor\n");
652 		return -ENODEV;
653 	}
654 	processor_device_array[pr->id] = device;
655 
656 	processors[pr->id] = pr;
657 
658 	result = acpi_processor_add_fs(device);
659 	if (result)
660 		goto end;
661 
662 	status = acpi_install_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
663 					     acpi_processor_notify, pr);
664 
665 	/* _PDC call should be done before doing anything else (if reqd.). */
666 	arch_acpi_processor_init_pdc(pr);
667 	acpi_processor_set_pdc(pr);
668 
669 	acpi_processor_power_init(pr, device);
670 
671 	if (pr->flags.throttling) {
672 		printk(KERN_INFO PREFIX "%s [%s] (supports",
673 		       acpi_device_name(device), acpi_device_bid(device));
674 		printk(" %d throttling states", pr->throttling.state_count);
675 		printk(")\n");
676 	}
677 
678       end:
679 
680 	return result;
681 }
682 
683 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
684 {
685 	struct acpi_processor *pr = data;
686 	struct acpi_device *device = NULL;
687 
688 
689 	if (!pr)
690 		return;
691 
692 	if (acpi_bus_get_device(pr->handle, &device))
693 		return;
694 
695 	switch (event) {
696 	case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
697 		acpi_processor_ppc_has_changed(pr);
698 		acpi_bus_generate_event(device, event,
699 					pr->performance_platform_limit);
700 		break;
701 	case ACPI_PROCESSOR_NOTIFY_POWER:
702 		acpi_processor_cst_has_changed(pr);
703 		acpi_bus_generate_event(device, event, 0);
704 		break;
705 	default:
706 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
707 				  "Unsupported event [0x%x]\n", event));
708 		break;
709 	}
710 
711 	return;
712 }
713 
714 static int acpi_processor_add(struct acpi_device *device)
715 {
716 	struct acpi_processor *pr = NULL;
717 
718 
719 	if (!device)
720 		return -EINVAL;
721 
722 	pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
723 	if (!pr)
724 		return -ENOMEM;
725 
726 	pr->handle = device->handle;
727 	strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
728 	strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
729 	acpi_driver_data(device) = pr;
730 
731 	return 0;
732 }
733 
734 static int acpi_processor_remove(struct acpi_device *device, int type)
735 {
736 	acpi_status status = AE_OK;
737 	struct acpi_processor *pr = NULL;
738 
739 
740 	if (!device || !acpi_driver_data(device))
741 		return -EINVAL;
742 
743 	pr = acpi_driver_data(device);
744 
745 	if (pr->id >= NR_CPUS) {
746 		kfree(pr);
747 		return 0;
748 	}
749 
750 	if (type == ACPI_BUS_REMOVAL_EJECT) {
751 		if (acpi_processor_handle_eject(pr))
752 			return -EINVAL;
753 	}
754 
755 	acpi_processor_power_exit(pr, device);
756 
757 	status = acpi_remove_notify_handler(pr->handle, ACPI_DEVICE_NOTIFY,
758 					    acpi_processor_notify);
759 
760 	acpi_processor_remove_fs(device);
761 
762 	processors[pr->id] = NULL;
763 
764 	kfree(pr);
765 
766 	return 0;
767 }
768 
769 #ifdef CONFIG_ACPI_HOTPLUG_CPU
770 /****************************************************************************
771  * 	Acpi processor hotplug support 				       	    *
772  ****************************************************************************/
773 
774 static int is_processor_present(acpi_handle handle);
775 
776 static int is_processor_present(acpi_handle handle)
777 {
778 	acpi_status status;
779 	unsigned long sta = 0;
780 
781 
782 	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
783 	if (ACPI_FAILURE(status) || !(sta & ACPI_STA_PRESENT)) {
784 		ACPI_EXCEPTION((AE_INFO, status, "Processor Device is not present"));
785 		return 0;
786 	}
787 	return 1;
788 }
789 
790 static
791 int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device)
792 {
793 	acpi_handle phandle;
794 	struct acpi_device *pdev;
795 	struct acpi_processor *pr;
796 
797 
798 	if (acpi_get_parent(handle, &phandle)) {
799 		return -ENODEV;
800 	}
801 
802 	if (acpi_bus_get_device(phandle, &pdev)) {
803 		return -ENODEV;
804 	}
805 
806 	if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) {
807 		return -ENODEV;
808 	}
809 
810 	acpi_bus_start(*device);
811 
812 	pr = acpi_driver_data(*device);
813 	if (!pr)
814 		return -ENODEV;
815 
816 	if ((pr->id >= 0) && (pr->id < NR_CPUS)) {
817 		kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
818 	}
819 	return 0;
820 }
821 
822 static void
823 acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data)
824 {
825 	struct acpi_processor *pr;
826 	struct acpi_device *device = NULL;
827 	int result;
828 
829 
830 	switch (event) {
831 	case ACPI_NOTIFY_BUS_CHECK:
832 	case ACPI_NOTIFY_DEVICE_CHECK:
833 		printk("Processor driver received %s event\n",
834 		       (event == ACPI_NOTIFY_BUS_CHECK) ?
835 		       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
836 
837 		if (!is_processor_present(handle))
838 			break;
839 
840 		if (acpi_bus_get_device(handle, &device)) {
841 			result = acpi_processor_device_add(handle, &device);
842 			if (result)
843 				printk(KERN_ERR PREFIX
844 					    "Unable to add the device\n");
845 			break;
846 		}
847 
848 		pr = acpi_driver_data(device);
849 		if (!pr) {
850 			printk(KERN_ERR PREFIX "Driver data is NULL\n");
851 			break;
852 		}
853 
854 		if (pr->id >= 0 && (pr->id < NR_CPUS)) {
855 			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
856 			break;
857 		}
858 
859 		result = acpi_processor_start(device);
860 		if ((!result) && ((pr->id >= 0) && (pr->id < NR_CPUS))) {
861 			kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
862 		} else {
863 			printk(KERN_ERR PREFIX "Device [%s] failed to start\n",
864 				    acpi_device_bid(device));
865 		}
866 		break;
867 	case ACPI_NOTIFY_EJECT_REQUEST:
868 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
869 				  "received ACPI_NOTIFY_EJECT_REQUEST\n"));
870 
871 		if (acpi_bus_get_device(handle, &device)) {
872 			printk(KERN_ERR PREFIX
873 				    "Device don't exist, dropping EJECT\n");
874 			break;
875 		}
876 		pr = acpi_driver_data(device);
877 		if (!pr) {
878 			printk(KERN_ERR PREFIX
879 				    "Driver data is NULL, dropping EJECT\n");
880 			return;
881 		}
882 
883 		if ((pr->id < NR_CPUS) && (cpu_present(pr->id)))
884 			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
885 		break;
886 	default:
887 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
888 				  "Unsupported event [0x%x]\n", event));
889 		break;
890 	}
891 
892 	return;
893 }
894 
895 static acpi_status
896 processor_walk_namespace_cb(acpi_handle handle,
897 			    u32 lvl, void *context, void **rv)
898 {
899 	acpi_status status;
900 	int *action = context;
901 	acpi_object_type type = 0;
902 
903 	status = acpi_get_type(handle, &type);
904 	if (ACPI_FAILURE(status))
905 		return (AE_OK);
906 
907 	if (type != ACPI_TYPE_PROCESSOR)
908 		return (AE_OK);
909 
910 	switch (*action) {
911 	case INSTALL_NOTIFY_HANDLER:
912 		acpi_install_notify_handler(handle,
913 					    ACPI_SYSTEM_NOTIFY,
914 					    acpi_processor_hotplug_notify,
915 					    NULL);
916 		break;
917 	case UNINSTALL_NOTIFY_HANDLER:
918 		acpi_remove_notify_handler(handle,
919 					   ACPI_SYSTEM_NOTIFY,
920 					   acpi_processor_hotplug_notify);
921 		break;
922 	default:
923 		break;
924 	}
925 
926 	return (AE_OK);
927 }
928 
929 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
930 {
931 
932 	if (!is_processor_present(handle)) {
933 		return AE_ERROR;
934 	}
935 
936 	if (acpi_map_lsapic(handle, p_cpu))
937 		return AE_ERROR;
938 
939 	if (arch_register_cpu(*p_cpu)) {
940 		acpi_unmap_lsapic(*p_cpu);
941 		return AE_ERROR;
942 	}
943 
944 	return AE_OK;
945 }
946 
947 static int acpi_processor_handle_eject(struct acpi_processor *pr)
948 {
949 	if (cpu_online(pr->id)) {
950 		return (-EINVAL);
951 	}
952 	arch_unregister_cpu(pr->id);
953 	acpi_unmap_lsapic(pr->id);
954 	return (0);
955 }
956 #else
957 static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
958 {
959 	return AE_ERROR;
960 }
961 static int acpi_processor_handle_eject(struct acpi_processor *pr)
962 {
963 	return (-EINVAL);
964 }
965 #endif
966 
967 static
968 void acpi_processor_install_hotplug_notify(void)
969 {
970 #ifdef CONFIG_ACPI_HOTPLUG_CPU
971 	int action = INSTALL_NOTIFY_HANDLER;
972 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
973 			    ACPI_ROOT_OBJECT,
974 			    ACPI_UINT32_MAX,
975 			    processor_walk_namespace_cb, &action, NULL);
976 #endif
977 }
978 
979 static
980 void acpi_processor_uninstall_hotplug_notify(void)
981 {
982 #ifdef CONFIG_ACPI_HOTPLUG_CPU
983 	int action = UNINSTALL_NOTIFY_HANDLER;
984 	acpi_walk_namespace(ACPI_TYPE_PROCESSOR,
985 			    ACPI_ROOT_OBJECT,
986 			    ACPI_UINT32_MAX,
987 			    processor_walk_namespace_cb, &action, NULL);
988 #endif
989 }
990 
991 /*
992  * We keep the driver loaded even when ACPI is not running.
993  * This is needed for the powernow-k8 driver, that works even without
994  * ACPI, but needs symbols from this driver
995  */
996 
997 static int __init acpi_processor_init(void)
998 {
999 	int result = 0;
1000 
1001 
1002 	memset(&processors, 0, sizeof(processors));
1003 	memset(&errata, 0, sizeof(errata));
1004 
1005 #ifdef CONFIG_SMP
1006 	if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
1007 				(struct acpi_table_header **)&madt)))
1008 		madt = 0;
1009 #endif
1010 
1011 	acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1012 	if (!acpi_processor_dir)
1013 		return -ENOMEM;
1014 	acpi_processor_dir->owner = THIS_MODULE;
1015 
1016 	result = acpi_bus_register_driver(&acpi_processor_driver);
1017 	if (result < 0) {
1018 		remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1019 		return result;
1020 	}
1021 
1022 	acpi_processor_install_hotplug_notify();
1023 
1024 	acpi_thermal_cpufreq_init();
1025 
1026 	acpi_processor_ppc_init();
1027 
1028 	return 0;
1029 }
1030 
1031 static void __exit acpi_processor_exit(void)
1032 {
1033 
1034 	acpi_processor_ppc_exit();
1035 
1036 	acpi_thermal_cpufreq_exit();
1037 
1038 	acpi_processor_uninstall_hotplug_notify();
1039 
1040 	acpi_bus_unregister_driver(&acpi_processor_driver);
1041 
1042 	remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
1043 
1044 	return;
1045 }
1046 
1047 module_init(acpi_processor_init);
1048 module_exit(acpi_processor_exit);
1049 
1050 EXPORT_SYMBOL(acpi_processor_set_thermal_limit);
1051 
1052 MODULE_ALIAS("processor");
1053