1 /*- 2 * Copyright (c) 2000 Michael Smith 3 * Copyright (c) 2000 BSDi 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 #include "opt_acpi.h" 30 #include <sys/param.h> 31 #include <sys/kernel.h> 32 #include <sys/bus.h> 33 #include <sys/limits.h> 34 #include <sys/malloc.h> 35 #include <sys/module.h> 36 37 #if defined(__i386__) || defined(__amd64__) 38 #include <machine/pci_cfgreg.h> 39 #endif 40 #include <machine/bus.h> 41 #include <machine/resource.h> 42 #include <sys/rman.h> 43 44 #include <contrib/dev/acpica/include/acpi.h> 45 #include <contrib/dev/acpica/include/accommon.h> 46 47 #include <dev/acpica/acpivar.h> 48 49 #ifdef INTRNG 50 #include "acpi_bus_if.h" 51 #endif 52 53 /* Hooks for the ACPI CA debugging infrastructure */ 54 #define _COMPONENT ACPI_BUS 55 ACPI_MODULE_NAME("RESOURCE") 56 57 struct lookup_irq_request { 58 ACPI_RESOURCE *acpi_res; 59 u_int irq; 60 int counter; 61 int rid; 62 int found; 63 int checkrid; 64 int trig; 65 int pol; 66 }; 67 68 static char *pcilink_ids[] = { "PNP0C0F", NULL }; 69 70 static ACPI_STATUS 71 acpi_lookup_irq_handler(ACPI_RESOURCE *res, void *context) 72 { 73 struct lookup_irq_request *req; 74 size_t len; 75 u_int irqnum, irq, trig, pol; 76 77 switch (res->Type) { 78 case ACPI_RESOURCE_TYPE_IRQ: 79 irqnum = res->Data.Irq.InterruptCount; 80 irq = res->Data.Irq.Interrupts[0]; 81 len = ACPI_RS_SIZE(ACPI_RESOURCE_IRQ); 82 trig = res->Data.Irq.Triggering; 83 pol = res->Data.Irq.Polarity; 84 break; 85 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 86 irqnum = res->Data.ExtendedIrq.InterruptCount; 87 irq = res->Data.ExtendedIrq.Interrupts[0]; 88 len = ACPI_RS_SIZE(ACPI_RESOURCE_EXTENDED_IRQ); 89 trig = res->Data.ExtendedIrq.Triggering; 90 pol = res->Data.ExtendedIrq.Polarity; 91 break; 92 default: 93 return (AE_OK); 94 } 95 if (irqnum != 1) 96 return (AE_OK); 97 req = (struct lookup_irq_request *)context; 98 if (req->checkrid) { 99 if (req->counter != req->rid) { 100 req->counter++; 101 return (AE_OK); 102 } 103 KASSERT(irq == req->irq, ("IRQ resources do not match")); 104 } else { 105 if (req->irq != irq) 106 return (AE_OK); 107 } 108 req->found = 1; 109 req->pol = pol; 110 req->trig = trig; 111 if (req->acpi_res != NULL) 112 bcopy(res, req->acpi_res, len); 113 return (AE_CTRL_TERMINATE); 114 } 115 116 ACPI_STATUS 117 acpi_lookup_irq_resource(device_t dev, int rid, struct resource *res, 118 ACPI_RESOURCE *acpi_res) 119 { 120 struct lookup_irq_request req; 121 ACPI_STATUS status; 122 123 req.acpi_res = acpi_res; 124 req.irq = rman_get_start(res); 125 req.counter = 0; 126 req.rid = rid; 127 req.found = 0; 128 req.checkrid = 1; 129 status = AcpiWalkResources(acpi_get_handle(dev), "_CRS", 130 acpi_lookup_irq_handler, &req); 131 if (ACPI_SUCCESS(status) && req.found == 0) 132 status = AE_NOT_FOUND; 133 return (status); 134 } 135 136 void 137 acpi_config_intr(device_t dev, ACPI_RESOURCE *res) 138 { 139 u_int irq; 140 int pol, trig; 141 142 switch (res->Type) { 143 case ACPI_RESOURCE_TYPE_IRQ: 144 KASSERT(res->Data.Irq.InterruptCount == 1, 145 ("%s: multiple interrupts", __func__)); 146 irq = res->Data.Irq.Interrupts[0]; 147 trig = res->Data.Irq.Triggering; 148 pol = res->Data.Irq.Polarity; 149 break; 150 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 151 KASSERT(res->Data.ExtendedIrq.InterruptCount == 1, 152 ("%s: multiple interrupts", __func__)); 153 irq = res->Data.ExtendedIrq.Interrupts[0]; 154 trig = res->Data.ExtendedIrq.Triggering; 155 pol = res->Data.ExtendedIrq.Polarity; 156 break; 157 default: 158 panic("%s: bad resource type %u", __func__, res->Type); 159 } 160 161 #if defined(__amd64__) || defined(__i386__) 162 /* 163 * XXX: Certain BIOSes have buggy AML that specify an IRQ that is 164 * edge-sensitive and active-lo. However, edge-sensitive IRQs 165 * should be active-hi. Force IRQs with an ISA IRQ value to be 166 * active-hi instead. 167 */ 168 if (irq < 16 && trig == ACPI_EDGE_SENSITIVE && pol == ACPI_ACTIVE_LOW) 169 pol = ACPI_ACTIVE_HIGH; 170 #endif 171 BUS_CONFIG_INTR(dev, irq, (trig == ACPI_EDGE_SENSITIVE) ? 172 INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL, (pol == ACPI_ACTIVE_HIGH) ? 173 INTR_POLARITY_HIGH : INTR_POLARITY_LOW); 174 } 175 176 #ifdef INTRNG 177 int 178 acpi_map_intr(device_t dev, u_int irq, ACPI_HANDLE handle) 179 { 180 struct lookup_irq_request req; 181 int trig, pol; 182 183 trig = ACPI_LEVEL_SENSITIVE; 184 pol = ACPI_ACTIVE_HIGH; 185 if (handle != NULL) { 186 req.found = 0; 187 req.acpi_res = NULL; 188 req.irq = irq; 189 req.counter = 0; 190 req.rid = 0; 191 req.checkrid = 0; 192 AcpiWalkResources(handle, "_CRS", acpi_lookup_irq_handler, &req); 193 if (req.found != 0) { 194 trig = req.trig; 195 pol = req.pol; 196 } 197 } 198 return ACPI_BUS_MAP_INTR(device_get_parent(dev), dev, irq, 199 (trig == ACPI_EDGE_SENSITIVE) ? INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL, 200 (pol == ACPI_ACTIVE_HIGH) ? INTR_POLARITY_HIGH : INTR_POLARITY_LOW); 201 } 202 #endif 203 204 struct acpi_resource_context { 205 struct acpi_parse_resource_set *set; 206 device_t dev; 207 void *context; 208 bool ignore_producer_flag; 209 }; 210 211 #ifdef ACPI_DEBUG_OUTPUT 212 static const char * 213 acpi_address_range_name(UINT8 ResourceType) 214 { 215 static char buf[16]; 216 217 switch (ResourceType) { 218 case ACPI_MEMORY_RANGE: 219 return ("Memory"); 220 case ACPI_IO_RANGE: 221 return ("IO"); 222 case ACPI_BUS_NUMBER_RANGE: 223 return ("Bus Number"); 224 default: 225 snprintf(buf, sizeof(buf), "type %u", ResourceType); 226 return (buf); 227 } 228 } 229 #endif 230 231 static ACPI_STATUS 232 acpi_parse_resource(ACPI_RESOURCE *res, void *context) 233 { 234 struct acpi_parse_resource_set *set; 235 struct acpi_resource_context *arc; 236 UINT64 min, max, length, gran; 237 #ifdef ACPI_DEBUG 238 const char *name; 239 #endif 240 device_t dev; 241 242 arc = context; 243 dev = arc->dev; 244 set = arc->set; 245 246 switch (res->Type) { 247 case ACPI_RESOURCE_TYPE_END_TAG: 248 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "EndTag\n")); 249 break; 250 case ACPI_RESOURCE_TYPE_FIXED_IO: 251 if (res->Data.FixedIo.AddressLength <= 0) 252 break; 253 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedIo 0x%x/%d\n", 254 res->Data.FixedIo.Address, res->Data.FixedIo.AddressLength)); 255 set->set_ioport(dev, arc->context, res->Data.FixedIo.Address, 256 res->Data.FixedIo.AddressLength); 257 break; 258 case ACPI_RESOURCE_TYPE_IO: 259 if (res->Data.Io.AddressLength <= 0) 260 break; 261 if (res->Data.Io.Minimum == res->Data.Io.Maximum) { 262 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x/%d\n", 263 res->Data.Io.Minimum, res->Data.Io.AddressLength)); 264 set->set_ioport(dev, arc->context, res->Data.Io.Minimum, 265 res->Data.Io.AddressLength); 266 } else { 267 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x-0x%x/%d\n", 268 res->Data.Io.Minimum, res->Data.Io.Maximum, 269 res->Data.Io.AddressLength)); 270 set->set_iorange(dev, arc->context, res->Data.Io.Minimum, 271 res->Data.Io.Maximum, res->Data.Io.AddressLength, 272 res->Data.Io.Alignment); 273 } 274 break; 275 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 276 if (res->Data.FixedMemory32.AddressLength <= 0) 277 break; 278 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedMemory32 0x%x/%d\n", 279 res->Data.FixedMemory32.Address, 280 res->Data.FixedMemory32.AddressLength)); 281 set->set_memory(dev, arc->context, res->Data.FixedMemory32.Address, 282 res->Data.FixedMemory32.AddressLength); 283 break; 284 case ACPI_RESOURCE_TYPE_MEMORY32: 285 if (res->Data.Memory32.AddressLength <= 0) 286 break; 287 if (res->Data.Memory32.Minimum == res->Data.Memory32.Maximum) { 288 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x/%d\n", 289 res->Data.Memory32.Minimum, res->Data.Memory32.AddressLength)); 290 set->set_memory(dev, arc->context, res->Data.Memory32.Minimum, 291 res->Data.Memory32.AddressLength); 292 } else { 293 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x-0x%x/%d\n", 294 res->Data.Memory32.Minimum, res->Data.Memory32.Maximum, 295 res->Data.Memory32.AddressLength)); 296 set->set_memoryrange(dev, arc->context, res->Data.Memory32.Minimum, 297 res->Data.Memory32.Maximum, res->Data.Memory32.AddressLength, 298 res->Data.Memory32.Alignment); 299 } 300 break; 301 case ACPI_RESOURCE_TYPE_MEMORY24: 302 if (res->Data.Memory24.AddressLength <= 0) 303 break; 304 if (res->Data.Memory24.Minimum == res->Data.Memory24.Maximum) { 305 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x/%d\n", 306 res->Data.Memory24.Minimum, res->Data.Memory24.AddressLength)); 307 set->set_memory(dev, arc->context, res->Data.Memory24.Minimum, 308 res->Data.Memory24.AddressLength); 309 } else { 310 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x-0x%x/%d\n", 311 res->Data.Memory24.Minimum, res->Data.Memory24.Maximum, 312 res->Data.Memory24.AddressLength)); 313 set->set_memoryrange(dev, arc->context, res->Data.Memory24.Minimum, 314 res->Data.Memory24.Maximum, res->Data.Memory24.AddressLength, 315 res->Data.Memory24.Alignment); 316 } 317 break; 318 case ACPI_RESOURCE_TYPE_IRQ: 319 /* 320 * from 1.0b 6.4.2 321 * "This structure is repeated for each separate interrupt 322 * required" 323 */ 324 set->set_irq(dev, arc->context, res->Data.Irq.Interrupts, 325 res->Data.Irq.InterruptCount, res->Data.Irq.Triggering, 326 res->Data.Irq.Polarity); 327 break; 328 case ACPI_RESOURCE_TYPE_DMA: 329 /* 330 * from 1.0b 6.4.3 331 * "This structure is repeated for each separate DMA channel 332 * required" 333 */ 334 set->set_drq(dev, arc->context, res->Data.Dma.Channels, 335 res->Data.Dma.ChannelCount); 336 break; 337 case ACPI_RESOURCE_TYPE_START_DEPENDENT: 338 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "start dependent functions\n")); 339 set->set_start_dependent(dev, arc->context, 340 res->Data.StartDpf.CompatibilityPriority); 341 break; 342 case ACPI_RESOURCE_TYPE_END_DEPENDENT: 343 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "end dependent functions\n")); 344 set->set_end_dependent(dev, arc->context); 345 break; 346 case ACPI_RESOURCE_TYPE_ADDRESS16: 347 case ACPI_RESOURCE_TYPE_ADDRESS32: 348 case ACPI_RESOURCE_TYPE_ADDRESS64: 349 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: 350 switch (res->Type) { 351 case ACPI_RESOURCE_TYPE_ADDRESS16: 352 gran = res->Data.Address16.Address.Granularity; 353 min = res->Data.Address16.Address.Minimum; 354 max = res->Data.Address16.Address.Maximum; 355 length = res->Data.Address16.Address.AddressLength; 356 #ifdef ACPI_DEBUG 357 name = "Address16"; 358 #endif 359 break; 360 case ACPI_RESOURCE_TYPE_ADDRESS32: 361 gran = res->Data.Address32.Address.Granularity; 362 min = res->Data.Address32.Address.Minimum; 363 max = res->Data.Address32.Address.Maximum; 364 length = res->Data.Address32.Address.AddressLength; 365 #ifdef ACPI_DEBUG 366 name = "Address32"; 367 #endif 368 break; 369 case ACPI_RESOURCE_TYPE_ADDRESS64: 370 gran = res->Data.Address64.Address.Granularity; 371 min = res->Data.Address64.Address.Minimum; 372 max = res->Data.Address64.Address.Maximum; 373 length = res->Data.Address64.Address.AddressLength; 374 #ifdef ACPI_DEBUG 375 name = "Address64"; 376 #endif 377 break; 378 default: 379 KASSERT(res->Type == ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64, 380 ("should never happen")); 381 gran = res->Data.ExtAddress64.Address.Granularity; 382 min = res->Data.ExtAddress64.Address.Minimum; 383 max = res->Data.ExtAddress64.Address.Maximum; 384 length = res->Data.ExtAddress64.Address.AddressLength; 385 #ifdef ACPI_DEBUG 386 name = "ExtAddress64"; 387 #endif 388 break; 389 } 390 if (length <= 0) 391 break; 392 if (!arc->ignore_producer_flag && 393 res->Data.Address.ProducerConsumer != ACPI_CONSUMER) { 394 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, 395 "ignored %s %s producer\n", name, 396 acpi_address_range_name(res->Data.Address.ResourceType))); 397 break; 398 } 399 if (res->Data.Address.ResourceType != ACPI_MEMORY_RANGE && 400 res->Data.Address.ResourceType != ACPI_IO_RANGE) { 401 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, 402 "ignored %s for non-memory, non-I/O\n", name)); 403 break; 404 } 405 406 #ifdef __i386__ 407 if (min > ULONG_MAX || (res->Data.Address.MaxAddressFixed && max > 408 ULONG_MAX)) { 409 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored %s above 4G\n", 410 name)); 411 break; 412 } 413 if (max > ULONG_MAX) 414 max = ULONG_MAX; 415 #endif 416 if (res->Data.Address.MinAddressFixed == ACPI_ADDRESS_FIXED && 417 res->Data.Address.MaxAddressFixed == ACPI_ADDRESS_FIXED) { 418 if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE) { 419 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/Memory 0x%jx/%ju\n", 420 name, (uintmax_t)min, (uintmax_t)length)); 421 set->set_memory(dev, arc->context, min, length); 422 } else { 423 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx/%ju\n", name, 424 (uintmax_t)min, (uintmax_t)length)); 425 set->set_ioport(dev, arc->context, min, length); 426 } 427 } else if (res->Data.Address.MinAddressFixed != ACPI_ADDRESS_FIXED && 428 res->Data.Address.MaxAddressFixed != ACPI_ADDRESS_FIXED) { 429 /* Fixed size, variable location resource descriptor */ 430 min = roundup(min, gran + 1); 431 if ((min + length - 1) > max) { 432 device_printf(dev, 433 "invalid memory range: start: %jx end: %jx max: %jx\n", 434 (uintmax_t)min, (uintmax_t)(min + length - 1), 435 (uintmax_t)max); 436 } else { 437 if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE) { 438 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, 439 "%s/Memory 0x%jx/%ju\n", name, (uintmax_t)min, 440 (uintmax_t)length)); 441 set->set_memory(dev, arc->context, min, length); 442 } else { 443 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx/%ju\n", 444 name, (uintmax_t)min, (uintmax_t)length)); 445 set->set_ioport(dev, arc->context, min, length); 446 } 447 } 448 } else { 449 if (res->Data.Address32.ResourceType == ACPI_MEMORY_RANGE) { 450 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, 451 "%s/Memory 0x%jx-0x%jx/%ju\n", name, (uintmax_t)min, 452 (uintmax_t)max, (uintmax_t)length)); 453 set->set_memoryrange(dev, arc->context, min, max, length, gran); 454 } else { 455 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx-0x%jx/%ju\n", 456 name, (uintmax_t)min, (uintmax_t)max, (uintmax_t)length)); 457 set->set_iorange(dev, arc->context, min, max, length, gran); 458 } 459 } 460 break; 461 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 462 if (res->Data.ExtendedIrq.ProducerConsumer != ACPI_CONSUMER) { 463 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored ExtIRQ producer\n")); 464 break; 465 } 466 set->set_ext_irq(dev, arc->context, res->Data.ExtendedIrq.Interrupts, 467 res->Data.ExtendedIrq.InterruptCount, 468 res->Data.ExtendedIrq.Triggering, res->Data.ExtendedIrq.Polarity); 469 break; 470 case ACPI_RESOURCE_TYPE_VENDOR: 471 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, 472 "unimplemented VendorSpecific resource\n")); 473 break; 474 default: 475 break; 476 } 477 return (AE_OK); 478 } 479 480 /* 481 * Fetch a device's resources and associate them with the device. 482 * 483 * Note that it might be nice to also locate ACPI-specific resource items, such 484 * as GPE bits. 485 * 486 * We really need to split the resource-fetching code out from the 487 * resource-parsing code, since we may want to use the parsing 488 * code for _PRS someday. 489 */ 490 ACPI_STATUS 491 acpi_parse_resources(device_t dev, ACPI_HANDLE handle, 492 struct acpi_parse_resource_set *set, void *arg) 493 { 494 struct acpi_resource_context arc; 495 ACPI_STATUS status; 496 497 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 498 499 set->set_init(dev, arg, &arc.context); 500 arc.set = set; 501 arc.dev = dev; 502 arc.ignore_producer_flag = false; 503 504 /* 505 * UARTs on ThunderX2 set ResourceProducer on memory resources, with 506 * 7.2 firmware. 507 */ 508 if (acpi_MatchHid(handle, "ARMH0011") != ACPI_MATCHHID_NOMATCH) 509 arc.ignore_producer_flag = true; 510 511 /* 512 * ARM Coresight on N1SDP set ResourceProducer on memory resources. 513 * Coresight devices: ETM, STM, TPIU, ETF/ETR, REP, FUN. 514 */ 515 if (acpi_MatchHid(handle, "ARMHC500") != ACPI_MATCHHID_NOMATCH || 516 acpi_MatchHid(handle, "ARMHC502") != ACPI_MATCHHID_NOMATCH || 517 acpi_MatchHid(handle, "ARMHC600") != ACPI_MATCHHID_NOMATCH || 518 acpi_MatchHid(handle, "ARMHC979") != ACPI_MATCHHID_NOMATCH || 519 acpi_MatchHid(handle, "ARMHC97C") != ACPI_MATCHHID_NOMATCH || 520 acpi_MatchHid(handle, "ARMHC98D") != ACPI_MATCHHID_NOMATCH || 521 acpi_MatchHid(handle, "ARMHC9FF") != ACPI_MATCHHID_NOMATCH || 522 acpi_MatchHid(handle, "ARMHD620") != ACPI_MATCHHID_NOMATCH) 523 arc.ignore_producer_flag = true; 524 525 /* 526 * The DesignWare I2C Controller on Ampere Altra sets ResourceProducer on 527 * memory resources. 528 */ 529 if (acpi_MatchHid(handle, "APMC0D0F") != ACPI_MATCHHID_NOMATCH) 530 arc.ignore_producer_flag = true; 531 532 status = AcpiWalkResources(handle, "_CRS", acpi_parse_resource, &arc); 533 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 534 printf("can't fetch resources for %s - %s\n", 535 acpi_name(handle), AcpiFormatException(status)); 536 return_ACPI_STATUS (status); 537 } 538 set->set_done(dev, arc.context); 539 return_ACPI_STATUS (AE_OK); 540 } 541 542 /* 543 * Resource-set vectors used to attach _CRS-derived resources 544 * to an ACPI device. 545 */ 546 static void acpi_res_set_init(device_t dev, void *arg, void **context); 547 static void acpi_res_set_done(device_t dev, void *context); 548 static void acpi_res_set_ioport(device_t dev, void *context, 549 uint64_t base, uint64_t length); 550 static void acpi_res_set_iorange(device_t dev, void *context, 551 uint64_t low, uint64_t high, 552 uint64_t length, uint64_t align); 553 static void acpi_res_set_memory(device_t dev, void *context, 554 uint64_t base, uint64_t length); 555 static void acpi_res_set_memoryrange(device_t dev, void *context, 556 uint64_t low, uint64_t high, 557 uint64_t length, uint64_t align); 558 static void acpi_res_set_irq(device_t dev, void *context, uint8_t *irq, 559 int count, int trig, int pol); 560 static void acpi_res_set_ext_irq(device_t dev, void *context, 561 uint32_t *irq, int count, int trig, int pol); 562 static void acpi_res_set_drq(device_t dev, void *context, uint8_t *drq, 563 int count); 564 static void acpi_res_set_start_dependent(device_t dev, void *context, 565 int preference); 566 static void acpi_res_set_end_dependent(device_t dev, void *context); 567 568 struct acpi_parse_resource_set acpi_res_parse_set = { 569 acpi_res_set_init, 570 acpi_res_set_done, 571 acpi_res_set_ioport, 572 acpi_res_set_iorange, 573 acpi_res_set_memory, 574 acpi_res_set_memoryrange, 575 acpi_res_set_irq, 576 acpi_res_set_ext_irq, 577 acpi_res_set_drq, 578 acpi_res_set_start_dependent, 579 acpi_res_set_end_dependent 580 }; 581 582 struct acpi_res_context { 583 int ar_nio; 584 int ar_nmem; 585 int ar_nirq; 586 int ar_ndrq; 587 void *ar_parent; 588 }; 589 590 /* 591 * Some resources reported via _CRS should not be added as bus 592 * resources. This function returns true if a resource reported via 593 * _CRS should be ignored. 594 */ 595 static bool 596 acpi_res_ignore(device_t dev, int type, rman_res_t start, rman_res_t count) 597 { 598 struct acpi_device *ad = device_get_ivars(dev); 599 ACPI_DEVICE_INFO *devinfo; 600 bool allow; 601 602 /* Ignore IRQ resources for PCI link devices. */ 603 if (type == SYS_RES_IRQ && 604 ACPI_ID_PROBE(device_get_parent(dev), dev, pcilink_ids, NULL) <= 0) 605 return (true); 606 607 /* 608 * Ignore most resources for PCI root bridges. Some BIOSes 609 * incorrectly enumerate the memory ranges they decode as plain 610 * memory resources instead of as ResourceProducer ranges. Other 611 * BIOSes incorrectly list system resource entries for I/O ranges 612 * under the PCI bridge. Do allow the one known-correct case on 613 * x86 of a PCI bridge claiming the I/O ports used for PCI config 614 * access. 615 */ 616 if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { 617 if (ACPI_SUCCESS(AcpiGetObjectInfo(ad->ad_handle, &devinfo))) { 618 if ((devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0) { 619 #if defined(__i386__) || defined(__amd64__) 620 allow = (type == SYS_RES_IOPORT && start == CONF1_ADDR_PORT); 621 #else 622 allow = false; 623 #endif 624 if (!allow) { 625 AcpiOsFree(devinfo); 626 return (true); 627 } 628 } 629 AcpiOsFree(devinfo); 630 } 631 } 632 633 return (false); 634 } 635 636 static void 637 acpi_res_set_init(device_t dev, void *arg, void **context) 638 { 639 struct acpi_res_context *cp; 640 641 if ((cp = AcpiOsAllocate(sizeof(*cp))) != NULL) { 642 bzero(cp, sizeof(*cp)); 643 cp->ar_parent = arg; 644 *context = cp; 645 } 646 } 647 648 static void 649 acpi_res_set_done(device_t dev, void *context) 650 { 651 struct acpi_res_context *cp = (struct acpi_res_context *)context; 652 653 if (cp == NULL) 654 return; 655 AcpiOsFree(cp); 656 } 657 658 static void 659 acpi_res_set_ioport(device_t dev, void *context, uint64_t base, 660 uint64_t length) 661 { 662 struct acpi_res_context *cp = (struct acpi_res_context *)context; 663 664 if (cp == NULL) 665 return; 666 if (acpi_res_ignore(dev, SYS_RES_IOPORT, base, length)) 667 return; 668 bus_set_resource(dev, SYS_RES_IOPORT, cp->ar_nio++, base, length); 669 } 670 671 static void 672 acpi_res_set_iorange(device_t dev, void *context, uint64_t low, 673 uint64_t high, uint64_t length, uint64_t align) 674 { 675 struct acpi_res_context *cp = (struct acpi_res_context *)context; 676 677 if (cp == NULL) 678 return; 679 680 /* 681 * XXX: Some BIOSes contain buggy _CRS entries where fixed I/O 682 * ranges have the maximum base address (_MAX) to the end of the 683 * I/O range instead of the start. These are then treated as a 684 * relocatable I/O range rather than a fixed I/O resource. As a 685 * workaround, treat I/O resources encoded this way as fixed I/O 686 * ports. 687 */ 688 if (high == (low + length)) { 689 if (bootverbose) 690 device_printf(dev, 691 "_CRS has fixed I/O port range defined as relocatable\n"); 692 693 if (acpi_res_ignore(dev, SYS_RES_IOPORT, low, length)) 694 return; 695 bus_set_resource(dev, SYS_RES_IOPORT, cp->ar_nio++, low, length); 696 return; 697 } 698 699 device_printf(dev, "I/O range not supported\n"); 700 } 701 702 static void 703 acpi_res_set_memory(device_t dev, void *context, uint64_t base, 704 uint64_t length) 705 { 706 struct acpi_res_context *cp = (struct acpi_res_context *)context; 707 708 if (cp == NULL) 709 return; 710 if (acpi_res_ignore(dev, SYS_RES_MEMORY, base, length)) 711 return; 712 bus_set_resource(dev, SYS_RES_MEMORY, cp->ar_nmem++, base, length); 713 } 714 715 static void 716 acpi_res_set_memoryrange(device_t dev, void *context, uint64_t low, 717 uint64_t high, uint64_t length, uint64_t align) 718 { 719 struct acpi_res_context *cp = (struct acpi_res_context *)context; 720 721 if (cp == NULL) 722 return; 723 device_printf(dev, "memory range not supported\n"); 724 } 725 726 static void 727 acpi_res_set_irq(device_t dev, void *context, uint8_t *irq, int count, 728 int trig, int pol) 729 { 730 struct acpi_res_context *cp = (struct acpi_res_context *)context; 731 int i; 732 733 if (cp == NULL || irq == NULL) 734 return; 735 736 for (i = 0; i < count; i++) { 737 if (acpi_res_ignore(dev, SYS_RES_IRQ, irq[i], 1)) 738 continue; 739 bus_set_resource(dev, SYS_RES_IRQ, cp->ar_nirq++, irq[i], 1); 740 } 741 } 742 743 static void 744 acpi_res_set_ext_irq(device_t dev, void *context, uint32_t *irq, int count, 745 int trig, int pol) 746 { 747 struct acpi_res_context *cp = (struct acpi_res_context *)context; 748 int i; 749 750 if (cp == NULL || irq == NULL) 751 return; 752 753 for (i = 0; i < count; i++) { 754 if (acpi_res_ignore(dev, SYS_RES_IRQ, irq[i], 1)) 755 continue; 756 bus_set_resource(dev, SYS_RES_IRQ, cp->ar_nirq++, irq[i], 1); 757 } 758 } 759 760 static void 761 acpi_res_set_drq(device_t dev, void *context, uint8_t *drq, int count) 762 { 763 struct acpi_res_context *cp = (struct acpi_res_context *)context; 764 765 if (cp == NULL || drq == NULL) 766 return; 767 768 /* This implements no resource relocation. */ 769 if (count != 1) 770 return; 771 772 if (acpi_res_ignore(dev, SYS_RES_DRQ, *drq, 1)) 773 return; 774 bus_set_resource(dev, SYS_RES_DRQ, cp->ar_ndrq++, *drq, 1); 775 } 776 777 static void 778 acpi_res_set_start_dependent(device_t dev, void *context, int preference) 779 { 780 struct acpi_res_context *cp = (struct acpi_res_context *)context; 781 782 if (cp == NULL) 783 return; 784 device_printf(dev, "dependent functions not supported\n"); 785 } 786 787 static void 788 acpi_res_set_end_dependent(device_t dev, void *context) 789 { 790 struct acpi_res_context *cp = (struct acpi_res_context *)context; 791 792 if (cp == NULL) 793 return; 794 device_printf(dev, "dependent functions not supported\n"); 795 } 796 797 /* 798 * Resource-owning placeholders for IO and memory pseudo-devices. 799 * 800 * This code allocates system resources that will be used by ACPI 801 * child devices. The acpi parent manages these resources through a 802 * private rman. 803 */ 804 805 static int acpi_sysres_probe(device_t dev); 806 static int acpi_sysres_attach(device_t dev); 807 808 static device_method_t acpi_sysres_methods[] = { 809 /* Device interface */ 810 DEVMETHOD(device_probe, acpi_sysres_probe), 811 DEVMETHOD(device_attach, acpi_sysres_attach), 812 813 DEVMETHOD_END 814 }; 815 816 static driver_t acpi_sysres_driver = { 817 "acpi_sysresource", 818 acpi_sysres_methods, 819 0, 820 }; 821 822 DRIVER_MODULE(acpi_sysresource, acpi, acpi_sysres_driver, 0, 0); 823 MODULE_DEPEND(acpi_sysresource, acpi, 1, 1, 1); 824 825 static int 826 acpi_sysres_probe(device_t dev) 827 { 828 static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL }; 829 int rv; 830 831 if (acpi_disabled("sysresource")) 832 return (ENXIO); 833 rv = ACPI_ID_PROBE(device_get_parent(dev), dev, sysres_ids, NULL); 834 if (rv > 0){ 835 return (rv); 836 } 837 device_set_desc(dev, "System Resource"); 838 device_quiet(dev); 839 return (rv); 840 } 841 842 static int 843 acpi_sysres_attach(device_t dev) 844 { 845 device_t bus; 846 struct acpi_softc *bus_sc; 847 struct resource_list_entry *bus_rle, *dev_rle; 848 struct resource_list *bus_rl, *dev_rl; 849 int done, type; 850 rman_res_t start, end, count; 851 852 /* 853 * Loop through all current resources to see if the new one overlaps 854 * any existing ones. If so, grow the old one up and/or down 855 * accordingly. Discard any that are wholly contained in the old. If 856 * the resource is unique, add it to the parent. It will later go into 857 * the rman pool. 858 */ 859 bus = device_get_parent(dev); 860 dev_rl = BUS_GET_RESOURCE_LIST(bus, dev); 861 bus_sc = acpi_device_get_parent_softc(dev); 862 bus_rl = &bus_sc->sysres_rl; 863 STAILQ_FOREACH(dev_rle, dev_rl, link) { 864 if (dev_rle->type != SYS_RES_IOPORT && dev_rle->type != SYS_RES_MEMORY) 865 continue; 866 867 start = dev_rle->start; 868 end = dev_rle->end; 869 count = dev_rle->count; 870 type = dev_rle->type; 871 done = FALSE; 872 873 STAILQ_FOREACH(bus_rle, bus_rl, link) { 874 if (bus_rle->type != type) 875 continue; 876 877 /* New resource wholly contained in old, discard. */ 878 if (start >= bus_rle->start && end <= bus_rle->end) 879 break; 880 881 /* New tail overlaps old head, grow existing resource downward. */ 882 if (start < bus_rle->start && end >= bus_rle->start) { 883 bus_rle->count += bus_rle->start - start; 884 bus_rle->start = start; 885 done = TRUE; 886 } 887 888 /* New head overlaps old tail, grow existing resource upward. */ 889 if (start <= bus_rle->end && end > bus_rle->end) { 890 bus_rle->count += end - bus_rle->end; 891 bus_rle->end = end; 892 done = TRUE; 893 } 894 895 /* If we adjusted the old resource, we're finished. */ 896 if (done) 897 break; 898 } 899 900 /* If we didn't merge with anything, add this resource. */ 901 if (bus_rle == NULL) 902 resource_list_add_next(bus_rl, type, start, end, count); 903 } 904 905 /* After merging/moving resources to the parent, free the list. */ 906 resource_list_free(dev_rl); 907 908 return (0); 909 } 910