1 /* 2 * Copyright (C) 2005 Intel Corporation 3 * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. 4 * 5 * Alex Chiang <achiang@hp.com> 6 * - Unified x86/ia64 implementations 7 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 8 * - Added _PDC for platforms with Intel CPUs 9 */ 10 #include <linux/dmi.h> 11 #include <linux/slab.h> 12 13 #include <acpi/acpi_drivers.h> 14 #include <acpi/processor.h> 15 16 #include "internal.h" 17 18 #define PREFIX "ACPI: " 19 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 20 ACPI_MODULE_NAME("processor_core"); 21 22 static int set_no_mwait(const struct dmi_system_id *id) 23 { 24 printk(KERN_NOTICE PREFIX "%s detected - " 25 "disabling mwait for CPU C-states\n", id->ident); 26 idle_nomwait = 1; 27 return 0; 28 } 29 30 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { 31 { 32 set_no_mwait, "IFL91 board", { 33 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), 34 DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), 35 DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), 36 DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, 37 { 38 set_no_mwait, "Extensa 5220", { 39 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 40 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 41 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), 42 DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL}, 43 {}, 44 }; 45 46 #ifdef CONFIG_SMP 47 static int map_lapic_id(struct acpi_subtable_header *entry, 48 u32 acpi_id, int *apic_id) 49 { 50 struct acpi_madt_local_apic *lapic = 51 (struct acpi_madt_local_apic *)entry; 52 53 if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) 54 return 0; 55 56 if (lapic->processor_id != acpi_id) 57 return 0; 58 59 *apic_id = lapic->id; 60 return 1; 61 } 62 63 static int map_x2apic_id(struct acpi_subtable_header *entry, 64 int device_declaration, u32 acpi_id, int *apic_id) 65 { 66 struct acpi_madt_local_x2apic *apic = 67 (struct acpi_madt_local_x2apic *)entry; 68 69 if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) 70 return 0; 71 72 if (device_declaration && (apic->uid == acpi_id)) { 73 *apic_id = apic->local_apic_id; 74 return 1; 75 } 76 77 return 0; 78 } 79 80 static int map_lsapic_id(struct acpi_subtable_header *entry, 81 int device_declaration, u32 acpi_id, int *apic_id) 82 { 83 struct acpi_madt_local_sapic *lsapic = 84 (struct acpi_madt_local_sapic *)entry; 85 86 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) 87 return 0; 88 89 if (device_declaration) { 90 if ((entry->length < 16) || (lsapic->uid != acpi_id)) 91 return 0; 92 } else if (lsapic->processor_id != acpi_id) 93 return 0; 94 95 *apic_id = (lsapic->id << 8) | lsapic->eid; 96 return 1; 97 } 98 99 static int map_madt_entry(int type, u32 acpi_id) 100 { 101 unsigned long madt_end, entry; 102 static struct acpi_table_madt *madt; 103 static int read_madt; 104 int apic_id = -1; 105 106 if (!read_madt) { 107 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, 108 (struct acpi_table_header **)&madt))) 109 madt = NULL; 110 read_madt++; 111 } 112 113 if (!madt) 114 return apic_id; 115 116 entry = (unsigned long)madt; 117 madt_end = entry + madt->header.length; 118 119 /* Parse all entries looking for a match. */ 120 121 entry += sizeof(struct acpi_table_madt); 122 while (entry + sizeof(struct acpi_subtable_header) < madt_end) { 123 struct acpi_subtable_header *header = 124 (struct acpi_subtable_header *)entry; 125 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 126 if (map_lapic_id(header, acpi_id, &apic_id)) 127 break; 128 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { 129 if (map_x2apic_id(header, type, acpi_id, &apic_id)) 130 break; 131 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 132 if (map_lsapic_id(header, type, acpi_id, &apic_id)) 133 break; 134 } 135 entry += header->length; 136 } 137 return apic_id; 138 } 139 140 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) 141 { 142 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 143 union acpi_object *obj; 144 struct acpi_subtable_header *header; 145 int apic_id = -1; 146 147 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) 148 goto exit; 149 150 if (!buffer.length || !buffer.pointer) 151 goto exit; 152 153 obj = buffer.pointer; 154 if (obj->type != ACPI_TYPE_BUFFER || 155 obj->buffer.length < sizeof(struct acpi_subtable_header)) { 156 goto exit; 157 } 158 159 header = (struct acpi_subtable_header *)obj->buffer.pointer; 160 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { 161 map_lapic_id(header, acpi_id, &apic_id); 162 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { 163 map_lsapic_id(header, type, acpi_id, &apic_id); 164 } 165 166 exit: 167 if (buffer.pointer) 168 kfree(buffer.pointer); 169 return apic_id; 170 } 171 172 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) 173 { 174 int i; 175 int apic_id = -1; 176 177 apic_id = map_mat_entry(handle, type, acpi_id); 178 if (apic_id == -1) 179 apic_id = map_madt_entry(type, acpi_id); 180 if (apic_id == -1) 181 return apic_id; 182 183 for_each_possible_cpu(i) { 184 if (cpu_physical_id(i) == apic_id) 185 return i; 186 } 187 return -1; 188 } 189 EXPORT_SYMBOL_GPL(acpi_get_cpuid); 190 #endif 191 192 static bool processor_physically_present(acpi_handle handle) 193 { 194 int cpuid, type; 195 u32 acpi_id; 196 acpi_status status; 197 acpi_object_type acpi_type; 198 unsigned long long tmp; 199 union acpi_object object = { 0 }; 200 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 201 202 status = acpi_get_type(handle, &acpi_type); 203 if (ACPI_FAILURE(status)) 204 return false; 205 206 switch (acpi_type) { 207 case ACPI_TYPE_PROCESSOR: 208 status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 209 if (ACPI_FAILURE(status)) 210 return false; 211 acpi_id = object.processor.proc_id; 212 break; 213 case ACPI_TYPE_DEVICE: 214 status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); 215 if (ACPI_FAILURE(status)) 216 return false; 217 acpi_id = tmp; 218 break; 219 default: 220 return false; 221 } 222 223 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; 224 cpuid = acpi_get_cpuid(handle, type, acpi_id); 225 226 if ((cpuid == -1) && (num_possible_cpus() > 1)) 227 return false; 228 229 return true; 230 } 231 232 static void acpi_set_pdc_bits(u32 *buf) 233 { 234 buf[0] = ACPI_PDC_REVISION_ID; 235 buf[1] = 1; 236 237 /* Enable coordination with firmware's _TSD info */ 238 buf[2] = ACPI_PDC_SMP_T_SWCOORD; 239 240 /* Twiddle arch-specific bits needed for _PDC */ 241 arch_acpi_set_pdc_bits(buf); 242 } 243 244 static struct acpi_object_list *acpi_processor_alloc_pdc(void) 245 { 246 struct acpi_object_list *obj_list; 247 union acpi_object *obj; 248 u32 *buf; 249 250 /* allocate and initialize pdc. It will be used later. */ 251 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); 252 if (!obj_list) { 253 printk(KERN_ERR "Memory allocation error\n"); 254 return NULL; 255 } 256 257 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); 258 if (!obj) { 259 printk(KERN_ERR "Memory allocation error\n"); 260 kfree(obj_list); 261 return NULL; 262 } 263 264 buf = kmalloc(12, GFP_KERNEL); 265 if (!buf) { 266 printk(KERN_ERR "Memory allocation error\n"); 267 kfree(obj); 268 kfree(obj_list); 269 return NULL; 270 } 271 272 acpi_set_pdc_bits(buf); 273 274 obj->type = ACPI_TYPE_BUFFER; 275 obj->buffer.length = 12; 276 obj->buffer.pointer = (u8 *) buf; 277 obj_list->count = 1; 278 obj_list->pointer = obj; 279 280 return obj_list; 281 } 282 283 /* 284 * _PDC is required for a BIOS-OS handshake for most of the newer 285 * ACPI processor features. 286 */ 287 static int 288 acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) 289 { 290 acpi_status status = AE_OK; 291 292 if (idle_nomwait) { 293 /* 294 * If mwait is disabled for CPU C-states, the C2C3_FFH access 295 * mode will be disabled in the parameter of _PDC object. 296 * Of course C1_FFH access mode will also be disabled. 297 */ 298 union acpi_object *obj; 299 u32 *buffer = NULL; 300 301 obj = pdc_in->pointer; 302 buffer = (u32 *)(obj->buffer.pointer); 303 buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH); 304 305 } 306 status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL); 307 308 if (ACPI_FAILURE(status)) 309 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 310 "Could not evaluate _PDC, using legacy perf. control.\n")); 311 312 return status; 313 } 314 315 void acpi_processor_set_pdc(acpi_handle handle) 316 { 317 struct acpi_object_list *obj_list; 318 319 if (arch_has_acpi_pdc() == false) 320 return; 321 322 obj_list = acpi_processor_alloc_pdc(); 323 if (!obj_list) 324 return; 325 326 acpi_processor_eval_pdc(handle, obj_list); 327 328 kfree(obj_list->pointer->buffer.pointer); 329 kfree(obj_list->pointer); 330 kfree(obj_list); 331 } 332 EXPORT_SYMBOL_GPL(acpi_processor_set_pdc); 333 334 static acpi_status 335 early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv) 336 { 337 if (processor_physically_present(handle) == false) 338 return AE_OK; 339 340 acpi_processor_set_pdc(handle); 341 return AE_OK; 342 } 343 344 void __init acpi_early_processor_set_pdc(void) 345 { 346 /* 347 * Check whether the system is DMI table. If yes, OSPM 348 * should not use mwait for CPU-states. 349 */ 350 dmi_check_system(processor_idle_dmi_table); 351 352 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 353 ACPI_UINT32_MAX, 354 early_init_pdc, NULL, NULL, NULL); 355 } 356