1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * drivers/acpi/resource.c - ACPI device resources interpretation. 4 * 5 * Copyright (C) 2012, Intel Corp. 6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> 7 * 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * 10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 */ 12 13 #include <linux/acpi.h> 14 #include <linux/device.h> 15 #include <linux/export.h> 16 #include <linux/ioport.h> 17 #include <linux/slab.h> 18 #include <linux/irq.h> 19 #include <linux/dmi.h> 20 21 #ifdef CONFIG_X86 22 #define valid_IRQ(i) (((i) != 0) && ((i) != 2)) 23 static inline bool acpi_iospace_resource_valid(struct resource *res) 24 { 25 /* On X86 IO space is limited to the [0 - 64K] IO port range */ 26 return res->end < 0x10003; 27 } 28 #else 29 #define valid_IRQ(i) (true) 30 /* 31 * ACPI IO descriptors on arches other than X86 contain MMIO CPU physical 32 * addresses mapping IO space in CPU physical address space, IO space 33 * resources can be placed anywhere in the 64-bit physical address space. 34 */ 35 static inline bool 36 acpi_iospace_resource_valid(struct resource *res) { return true; } 37 #endif 38 39 #if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI) 40 static inline bool is_gsi(struct acpi_resource_extended_irq *ext_irq) 41 { 42 return ext_irq->resource_source.string_length == 0 && 43 ext_irq->producer_consumer == ACPI_CONSUMER; 44 } 45 #else 46 static inline bool is_gsi(struct acpi_resource_extended_irq *ext_irq) 47 { 48 return true; 49 } 50 #endif 51 52 static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io) 53 { 54 u64 reslen = end - start + 1; 55 56 /* 57 * CHECKME: len might be required to check versus a minimum 58 * length as well. 1 for io is fine, but for memory it does 59 * not make any sense at all. 60 * Note: some BIOSes report incorrect length for ACPI address space 61 * descriptor, so remove check of 'reslen == len' to avoid regression. 62 */ 63 if (len && reslen && start <= end) 64 return true; 65 66 pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", 67 io ? "io" : "mem", start, end, len); 68 69 return false; 70 } 71 72 static void acpi_dev_memresource_flags(struct resource *res, u64 len, 73 u8 write_protect) 74 { 75 res->flags = IORESOURCE_MEM; 76 77 if (!acpi_dev_resource_len_valid(res->start, res->end, len, false)) 78 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; 79 80 if (write_protect == ACPI_READ_WRITE_MEMORY) 81 res->flags |= IORESOURCE_MEM_WRITEABLE; 82 } 83 84 static void acpi_dev_get_memresource(struct resource *res, u64 start, u64 len, 85 u8 write_protect) 86 { 87 res->start = start; 88 res->end = start + len - 1; 89 acpi_dev_memresource_flags(res, len, write_protect); 90 } 91 92 /** 93 * acpi_dev_resource_memory - Extract ACPI memory resource information. 94 * @ares: Input ACPI resource object. 95 * @res: Output generic resource object. 96 * 97 * Check if the given ACPI resource object represents a memory resource and 98 * if that's the case, use the information in it to populate the generic 99 * resource object pointed to by @res. 100 * 101 * Return: 102 * 1) false with res->flags setting to zero: not the expected resource type 103 * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource 104 * 3) true: valid assigned resource 105 */ 106 bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) 107 { 108 struct acpi_resource_memory24 *memory24; 109 struct acpi_resource_memory32 *memory32; 110 struct acpi_resource_fixed_memory32 *fixed_memory32; 111 112 switch (ares->type) { 113 case ACPI_RESOURCE_TYPE_MEMORY24: 114 memory24 = &ares->data.memory24; 115 acpi_dev_get_memresource(res, memory24->minimum << 8, 116 memory24->address_length << 8, 117 memory24->write_protect); 118 break; 119 case ACPI_RESOURCE_TYPE_MEMORY32: 120 memory32 = &ares->data.memory32; 121 acpi_dev_get_memresource(res, memory32->minimum, 122 memory32->address_length, 123 memory32->write_protect); 124 break; 125 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 126 fixed_memory32 = &ares->data.fixed_memory32; 127 acpi_dev_get_memresource(res, fixed_memory32->address, 128 fixed_memory32->address_length, 129 fixed_memory32->write_protect); 130 break; 131 default: 132 res->flags = 0; 133 return false; 134 } 135 136 return !(res->flags & IORESOURCE_DISABLED); 137 } 138 EXPORT_SYMBOL_GPL(acpi_dev_resource_memory); 139 140 static void acpi_dev_ioresource_flags(struct resource *res, u64 len, 141 u8 io_decode, u8 translation_type) 142 { 143 res->flags = IORESOURCE_IO; 144 145 if (!acpi_dev_resource_len_valid(res->start, res->end, len, true)) 146 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; 147 148 if (!acpi_iospace_resource_valid(res)) 149 res->flags |= IORESOURCE_DISABLED | IORESOURCE_UNSET; 150 151 if (io_decode == ACPI_DECODE_16) 152 res->flags |= IORESOURCE_IO_16BIT_ADDR; 153 if (translation_type == ACPI_SPARSE_TRANSLATION) 154 res->flags |= IORESOURCE_IO_SPARSE; 155 } 156 157 static void acpi_dev_get_ioresource(struct resource *res, u64 start, u64 len, 158 u8 io_decode) 159 { 160 res->start = start; 161 res->end = start + len - 1; 162 acpi_dev_ioresource_flags(res, len, io_decode, 0); 163 } 164 165 /** 166 * acpi_dev_resource_io - Extract ACPI I/O resource information. 167 * @ares: Input ACPI resource object. 168 * @res: Output generic resource object. 169 * 170 * Check if the given ACPI resource object represents an I/O resource and 171 * if that's the case, use the information in it to populate the generic 172 * resource object pointed to by @res. 173 * 174 * Return: 175 * 1) false with res->flags setting to zero: not the expected resource type 176 * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource 177 * 3) true: valid assigned resource 178 */ 179 bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) 180 { 181 struct acpi_resource_io *io; 182 struct acpi_resource_fixed_io *fixed_io; 183 184 switch (ares->type) { 185 case ACPI_RESOURCE_TYPE_IO: 186 io = &ares->data.io; 187 acpi_dev_get_ioresource(res, io->minimum, 188 io->address_length, 189 io->io_decode); 190 break; 191 case ACPI_RESOURCE_TYPE_FIXED_IO: 192 fixed_io = &ares->data.fixed_io; 193 acpi_dev_get_ioresource(res, fixed_io->address, 194 fixed_io->address_length, 195 ACPI_DECODE_10); 196 break; 197 default: 198 res->flags = 0; 199 return false; 200 } 201 202 return !(res->flags & IORESOURCE_DISABLED); 203 } 204 EXPORT_SYMBOL_GPL(acpi_dev_resource_io); 205 206 static bool acpi_decode_space(struct resource_win *win, 207 struct acpi_resource_address *addr, 208 struct acpi_address64_attribute *attr) 209 { 210 u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16; 211 bool wp = addr->info.mem.write_protect; 212 u64 len = attr->address_length; 213 u64 start, end, offset = 0; 214 struct resource *res = &win->res; 215 216 /* 217 * Filter out invalid descriptor according to ACPI Spec 5.0, section 218 * 6.4.3.5 Address Space Resource Descriptors. 219 */ 220 if ((addr->min_address_fixed != addr->max_address_fixed && len) || 221 (addr->min_address_fixed && addr->max_address_fixed && !len)) 222 pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n", 223 addr->min_address_fixed, addr->max_address_fixed, len); 224 225 /* 226 * For bridges that translate addresses across the bridge, 227 * translation_offset is the offset that must be added to the 228 * address on the secondary side to obtain the address on the 229 * primary side. Non-bridge devices must list 0 for all Address 230 * Translation offset bits. 231 */ 232 if (addr->producer_consumer == ACPI_PRODUCER) 233 offset = attr->translation_offset; 234 else if (attr->translation_offset) 235 pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n", 236 attr->translation_offset); 237 start = attr->minimum + offset; 238 end = attr->maximum + offset; 239 240 win->offset = offset; 241 res->start = start; 242 res->end = end; 243 if (sizeof(resource_size_t) < sizeof(u64) && 244 (offset != win->offset || start != res->start || end != res->end)) { 245 pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n", 246 attr->minimum, attr->maximum); 247 return false; 248 } 249 250 switch (addr->resource_type) { 251 case ACPI_MEMORY_RANGE: 252 acpi_dev_memresource_flags(res, len, wp); 253 break; 254 case ACPI_IO_RANGE: 255 acpi_dev_ioresource_flags(res, len, iodec, 256 addr->info.io.translation_type); 257 break; 258 case ACPI_BUS_NUMBER_RANGE: 259 res->flags = IORESOURCE_BUS; 260 break; 261 default: 262 return false; 263 } 264 265 if (addr->producer_consumer == ACPI_PRODUCER) 266 res->flags |= IORESOURCE_WINDOW; 267 268 if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY) 269 res->flags |= IORESOURCE_PREFETCH; 270 271 return !(res->flags & IORESOURCE_DISABLED); 272 } 273 274 /** 275 * acpi_dev_resource_address_space - Extract ACPI address space information. 276 * @ares: Input ACPI resource object. 277 * @win: Output generic resource object. 278 * 279 * Check if the given ACPI resource object represents an address space resource 280 * and if that's the case, use the information in it to populate the generic 281 * resource object pointed to by @win. 282 * 283 * Return: 284 * 1) false with win->res.flags setting to zero: not the expected resource type 285 * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned 286 * resource 287 * 3) true: valid assigned resource 288 */ 289 bool acpi_dev_resource_address_space(struct acpi_resource *ares, 290 struct resource_win *win) 291 { 292 struct acpi_resource_address64 addr; 293 294 win->res.flags = 0; 295 if (ACPI_FAILURE(acpi_resource_to_address64(ares, &addr))) 296 return false; 297 298 return acpi_decode_space(win, (struct acpi_resource_address *)&addr, 299 &addr.address); 300 } 301 EXPORT_SYMBOL_GPL(acpi_dev_resource_address_space); 302 303 /** 304 * acpi_dev_resource_ext_address_space - Extract ACPI address space information. 305 * @ares: Input ACPI resource object. 306 * @win: Output generic resource object. 307 * 308 * Check if the given ACPI resource object represents an extended address space 309 * resource and if that's the case, use the information in it to populate the 310 * generic resource object pointed to by @win. 311 * 312 * Return: 313 * 1) false with win->res.flags setting to zero: not the expected resource type 314 * 2) false with IORESOURCE_DISABLED in win->res.flags: valid unassigned 315 * resource 316 * 3) true: valid assigned resource 317 */ 318 bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, 319 struct resource_win *win) 320 { 321 struct acpi_resource_extended_address64 *ext_addr; 322 323 win->res.flags = 0; 324 if (ares->type != ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64) 325 return false; 326 327 ext_addr = &ares->data.ext_address64; 328 329 return acpi_decode_space(win, (struct acpi_resource_address *)ext_addr, 330 &ext_addr->address); 331 } 332 EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space); 333 334 /** 335 * acpi_dev_irq_flags - Determine IRQ resource flags. 336 * @triggering: Triggering type as provided by ACPI. 337 * @polarity: Interrupt polarity as provided by ACPI. 338 * @shareable: Whether or not the interrupt is shareable. 339 * @wake_capable: Wake capability as provided by ACPI. 340 */ 341 unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable) 342 { 343 unsigned long flags; 344 345 if (triggering == ACPI_LEVEL_SENSITIVE) 346 flags = polarity == ACPI_ACTIVE_LOW ? 347 IORESOURCE_IRQ_LOWLEVEL : IORESOURCE_IRQ_HIGHLEVEL; 348 else 349 flags = polarity == ACPI_ACTIVE_LOW ? 350 IORESOURCE_IRQ_LOWEDGE : IORESOURCE_IRQ_HIGHEDGE; 351 352 if (shareable == ACPI_SHARED) 353 flags |= IORESOURCE_IRQ_SHAREABLE; 354 355 if (wake_capable == ACPI_WAKE_CAPABLE) 356 flags |= IORESOURCE_IRQ_WAKECAPABLE; 357 358 return flags | IORESOURCE_IRQ; 359 } 360 EXPORT_SYMBOL_GPL(acpi_dev_irq_flags); 361 362 /** 363 * acpi_dev_get_irq_type - Determine irq type. 364 * @triggering: Triggering type as provided by ACPI. 365 * @polarity: Interrupt polarity as provided by ACPI. 366 */ 367 unsigned int acpi_dev_get_irq_type(int triggering, int polarity) 368 { 369 switch (polarity) { 370 case ACPI_ACTIVE_LOW: 371 return triggering == ACPI_EDGE_SENSITIVE ? 372 IRQ_TYPE_EDGE_FALLING : 373 IRQ_TYPE_LEVEL_LOW; 374 case ACPI_ACTIVE_HIGH: 375 return triggering == ACPI_EDGE_SENSITIVE ? 376 IRQ_TYPE_EDGE_RISING : 377 IRQ_TYPE_LEVEL_HIGH; 378 case ACPI_ACTIVE_BOTH: 379 if (triggering == ACPI_EDGE_SENSITIVE) 380 return IRQ_TYPE_EDGE_BOTH; 381 fallthrough; 382 default: 383 return IRQ_TYPE_NONE; 384 } 385 } 386 EXPORT_SYMBOL_GPL(acpi_dev_get_irq_type); 387 388 static const struct dmi_system_id medion_laptop[] = { 389 { 390 .ident = "MEDION P15651", 391 .matches = { 392 DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), 393 DMI_MATCH(DMI_BOARD_NAME, "M15T"), 394 }, 395 }, 396 { 397 .ident = "MEDION S17405", 398 .matches = { 399 DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), 400 DMI_MATCH(DMI_BOARD_NAME, "M17T"), 401 }, 402 }, 403 { } 404 }; 405 406 static const struct dmi_system_id asus_laptop[] = { 407 { 408 .ident = "Asus Vivobook K3402ZA", 409 .matches = { 410 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 411 DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"), 412 }, 413 }, 414 { 415 .ident = "Asus Vivobook K3502ZA", 416 .matches = { 417 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 418 DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"), 419 }, 420 }, 421 { 422 .ident = "Asus Vivobook S5402ZA", 423 .matches = { 424 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 425 DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"), 426 }, 427 }, 428 { 429 .ident = "Asus Vivobook S5602ZA", 430 .matches = { 431 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 432 DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), 433 }, 434 }, 435 { 436 .ident = "Asus ExpertBook B2402CBA", 437 .matches = { 438 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 439 DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"), 440 }, 441 }, 442 { 443 .ident = "Asus ExpertBook B2502", 444 .matches = { 445 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 446 DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"), 447 }, 448 }, 449 { } 450 }; 451 452 static const struct dmi_system_id lenovo_laptop[] = { 453 { 454 .ident = "LENOVO IdeaPad Flex 5 14ALC7", 455 .matches = { 456 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 457 DMI_MATCH(DMI_PRODUCT_NAME, "82R9"), 458 }, 459 }, 460 { 461 .ident = "LENOVO IdeaPad Flex 5 16ALC7", 462 .matches = { 463 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 464 DMI_MATCH(DMI_PRODUCT_NAME, "82RA"), 465 }, 466 }, 467 { } 468 }; 469 470 static const struct dmi_system_id schenker_gm_rg[] = { 471 { 472 .ident = "XMG CORE 15 (M22)", 473 .matches = { 474 DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), 475 DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"), 476 }, 477 }, 478 { } 479 }; 480 481 struct irq_override_cmp { 482 const struct dmi_system_id *system; 483 unsigned char irq; 484 unsigned char triggering; 485 unsigned char polarity; 486 unsigned char shareable; 487 bool override; 488 }; 489 490 static const struct irq_override_cmp override_table[] = { 491 { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, 492 { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, 493 { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, 494 { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, 495 { schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, 496 }; 497 498 static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, 499 u8 shareable) 500 { 501 int i; 502 503 for (i = 0; i < ARRAY_SIZE(override_table); i++) { 504 const struct irq_override_cmp *entry = &override_table[i]; 505 506 if (dmi_check_system(entry->system) && 507 entry->irq == gsi && 508 entry->triggering == triggering && 509 entry->polarity == polarity && 510 entry->shareable == shareable) 511 return entry->override; 512 } 513 514 #ifdef CONFIG_X86 515 /* 516 * IRQ override isn't needed on modern AMD Zen systems and 517 * this override breaks active low IRQs on AMD Ryzen 6000 and 518 * newer systems. Skip it. 519 */ 520 if (boot_cpu_has(X86_FEATURE_ZEN)) 521 return false; 522 #endif 523 524 return true; 525 } 526 527 static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, 528 u8 triggering, u8 polarity, u8 shareable, 529 u8 wake_capable, bool check_override) 530 { 531 int irq, p, t; 532 533 if (!valid_IRQ(gsi)) { 534 irqresource_disabled(res, gsi); 535 return; 536 } 537 538 /* 539 * In IO-APIC mode, use overridden attribute. Two reasons: 540 * 1. BIOS bug in DSDT 541 * 2. BIOS uses IO-APIC mode Interrupt Source Override 542 * 543 * We do this only if we are dealing with IRQ() or IRQNoFlags() 544 * resource (the legacy ISA resources). With modern ACPI 5 devices 545 * using extended IRQ descriptors we take the IRQ configuration 546 * from _CRS directly. 547 */ 548 if (check_override && 549 acpi_dev_irq_override(gsi, triggering, polarity, shareable) && 550 !acpi_get_override_irq(gsi, &t, &p)) { 551 u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; 552 u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; 553 554 if (triggering != trig || polarity != pol) { 555 pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi, 556 t ? "level" : "edge", 557 trig == triggering ? "" : "(!)", 558 p ? "low" : "high", 559 pol == polarity ? "" : "(!)"); 560 triggering = trig; 561 polarity = pol; 562 } 563 } 564 565 res->flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable); 566 irq = acpi_register_gsi(NULL, gsi, triggering, polarity); 567 if (irq >= 0) { 568 res->start = irq; 569 res->end = irq; 570 } else { 571 irqresource_disabled(res, gsi); 572 } 573 } 574 575 /** 576 * acpi_dev_resource_interrupt - Extract ACPI interrupt resource information. 577 * @ares: Input ACPI resource object. 578 * @index: Index into the array of GSIs represented by the resource. 579 * @res: Output generic resource object. 580 * 581 * Check if the given ACPI resource object represents an interrupt resource 582 * and @index does not exceed the resource's interrupt count (true is returned 583 * in that case regardless of the results of the other checks)). If that's the 584 * case, register the GSI corresponding to @index from the array of interrupts 585 * represented by the resource and populate the generic resource object pointed 586 * to by @res accordingly. If the registration of the GSI is not successful, 587 * IORESOURCE_DISABLED will be set it that object's flags. 588 * 589 * Return: 590 * 1) false with res->flags setting to zero: not the expected resource type 591 * 2) false with IORESOURCE_DISABLED in res->flags: valid unassigned resource 592 * 3) true: valid assigned resource 593 */ 594 bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, 595 struct resource *res) 596 { 597 struct acpi_resource_irq *irq; 598 struct acpi_resource_extended_irq *ext_irq; 599 600 switch (ares->type) { 601 case ACPI_RESOURCE_TYPE_IRQ: 602 /* 603 * Per spec, only one interrupt per descriptor is allowed in 604 * _CRS, but some firmware violates this, so parse them all. 605 */ 606 irq = &ares->data.irq; 607 if (index >= irq->interrupt_count) { 608 irqresource_disabled(res, 0); 609 return false; 610 } 611 acpi_dev_get_irqresource(res, irq->interrupts[index], 612 irq->triggering, irq->polarity, 613 irq->shareable, irq->wake_capable, 614 true); 615 break; 616 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 617 ext_irq = &ares->data.extended_irq; 618 if (index >= ext_irq->interrupt_count) { 619 irqresource_disabled(res, 0); 620 return false; 621 } 622 if (is_gsi(ext_irq)) 623 acpi_dev_get_irqresource(res, ext_irq->interrupts[index], 624 ext_irq->triggering, ext_irq->polarity, 625 ext_irq->shareable, ext_irq->wake_capable, 626 false); 627 else 628 irqresource_disabled(res, 0); 629 break; 630 default: 631 res->flags = 0; 632 return false; 633 } 634 635 return true; 636 } 637 EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt); 638 639 /** 640 * acpi_dev_free_resource_list - Free resource from %acpi_dev_get_resources(). 641 * @list: The head of the resource list to free. 642 */ 643 void acpi_dev_free_resource_list(struct list_head *list) 644 { 645 resource_list_free(list); 646 } 647 EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list); 648 649 struct res_proc_context { 650 struct list_head *list; 651 int (*preproc)(struct acpi_resource *, void *); 652 void *preproc_data; 653 int count; 654 int error; 655 }; 656 657 static acpi_status acpi_dev_new_resource_entry(struct resource_win *win, 658 struct res_proc_context *c) 659 { 660 struct resource_entry *rentry; 661 662 rentry = resource_list_create_entry(NULL, 0); 663 if (!rentry) { 664 c->error = -ENOMEM; 665 return AE_NO_MEMORY; 666 } 667 *rentry->res = win->res; 668 rentry->offset = win->offset; 669 resource_list_add_tail(rentry, c->list); 670 c->count++; 671 return AE_OK; 672 } 673 674 static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, 675 void *context) 676 { 677 struct res_proc_context *c = context; 678 struct resource_win win; 679 struct resource *res = &win.res; 680 int i; 681 682 if (c->preproc) { 683 int ret; 684 685 ret = c->preproc(ares, c->preproc_data); 686 if (ret < 0) { 687 c->error = ret; 688 return AE_ABORT_METHOD; 689 } else if (ret > 0) { 690 return AE_OK; 691 } 692 } 693 694 memset(&win, 0, sizeof(win)); 695 696 if (acpi_dev_resource_memory(ares, res) 697 || acpi_dev_resource_io(ares, res) 698 || acpi_dev_resource_address_space(ares, &win) 699 || acpi_dev_resource_ext_address_space(ares, &win)) 700 return acpi_dev_new_resource_entry(&win, c); 701 702 for (i = 0; acpi_dev_resource_interrupt(ares, i, res); i++) { 703 acpi_status status; 704 705 status = acpi_dev_new_resource_entry(&win, c); 706 if (ACPI_FAILURE(status)) 707 return status; 708 } 709 710 return AE_OK; 711 } 712 713 static int __acpi_dev_get_resources(struct acpi_device *adev, 714 struct list_head *list, 715 int (*preproc)(struct acpi_resource *, void *), 716 void *preproc_data, char *method) 717 { 718 struct res_proc_context c; 719 acpi_status status; 720 721 if (!adev || !adev->handle || !list_empty(list)) 722 return -EINVAL; 723 724 if (!acpi_has_method(adev->handle, method)) 725 return 0; 726 727 c.list = list; 728 c.preproc = preproc; 729 c.preproc_data = preproc_data; 730 c.count = 0; 731 c.error = 0; 732 status = acpi_walk_resources(adev->handle, method, 733 acpi_dev_process_resource, &c); 734 if (ACPI_FAILURE(status)) { 735 acpi_dev_free_resource_list(list); 736 return c.error ? c.error : -EIO; 737 } 738 739 return c.count; 740 } 741 742 /** 743 * acpi_dev_get_resources - Get current resources of a device. 744 * @adev: ACPI device node to get the resources for. 745 * @list: Head of the resultant list of resources (must be empty). 746 * @preproc: The caller's preprocessing routine. 747 * @preproc_data: Pointer passed to the caller's preprocessing routine. 748 * 749 * Evaluate the _CRS method for the given device node and process its output by 750 * (1) executing the @preproc() routine provided by the caller, passing the 751 * resource pointer and @preproc_data to it as arguments, for each ACPI resource 752 * returned and (2) converting all of the returned ACPI resources into struct 753 * resource objects if possible. If the return value of @preproc() in step (1) 754 * is different from 0, step (2) is not applied to the given ACPI resource and 755 * if that value is negative, the whole processing is aborted and that value is 756 * returned as the final error code. 757 * 758 * The resultant struct resource objects are put on the list pointed to by 759 * @list, that must be empty initially, as members of struct resource_entry 760 * objects. Callers of this routine should use %acpi_dev_free_resource_list() to 761 * free that list. 762 * 763 * The number of resources in the output list is returned on success, an error 764 * code reflecting the error condition is returned otherwise. 765 */ 766 int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, 767 int (*preproc)(struct acpi_resource *, void *), 768 void *preproc_data) 769 { 770 return __acpi_dev_get_resources(adev, list, preproc, preproc_data, 771 METHOD_NAME__CRS); 772 } 773 EXPORT_SYMBOL_GPL(acpi_dev_get_resources); 774 775 static int is_memory(struct acpi_resource *ares, void *not_used) 776 { 777 struct resource_win win; 778 struct resource *res = &win.res; 779 780 memset(&win, 0, sizeof(win)); 781 782 if (acpi_dev_filter_resource_type(ares, IORESOURCE_MEM)) 783 return 1; 784 785 return !(acpi_dev_resource_memory(ares, res) 786 || acpi_dev_resource_address_space(ares, &win) 787 || acpi_dev_resource_ext_address_space(ares, &win)); 788 } 789 790 /** 791 * acpi_dev_get_dma_resources - Get current DMA resources of a device. 792 * @adev: ACPI device node to get the resources for. 793 * @list: Head of the resultant list of resources (must be empty). 794 * 795 * Evaluate the _DMA method for the given device node and process its 796 * output. 797 * 798 * The resultant struct resource objects are put on the list pointed to 799 * by @list, that must be empty initially, as members of struct 800 * resource_entry objects. Callers of this routine should use 801 * %acpi_dev_free_resource_list() to free that list. 802 * 803 * The number of resources in the output list is returned on success, 804 * an error code reflecting the error condition is returned otherwise. 805 */ 806 int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list) 807 { 808 return __acpi_dev_get_resources(adev, list, is_memory, NULL, 809 METHOD_NAME__DMA); 810 } 811 EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources); 812 813 /** 814 * acpi_dev_get_memory_resources - Get current memory resources of a device. 815 * @adev: ACPI device node to get the resources for. 816 * @list: Head of the resultant list of resources (must be empty). 817 * 818 * This is a helper function that locates all memory type resources of @adev 819 * with acpi_dev_get_resources(). 820 * 821 * The number of resources in the output list is returned on success, an error 822 * code reflecting the error condition is returned otherwise. 823 */ 824 int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list) 825 { 826 return acpi_dev_get_resources(adev, list, is_memory, NULL); 827 } 828 EXPORT_SYMBOL_GPL(acpi_dev_get_memory_resources); 829 830 /** 831 * acpi_dev_filter_resource_type - Filter ACPI resource according to resource 832 * types 833 * @ares: Input ACPI resource object. 834 * @types: Valid resource types of IORESOURCE_XXX 835 * 836 * This is a helper function to support acpi_dev_get_resources(), which filters 837 * ACPI resource objects according to resource types. 838 */ 839 int acpi_dev_filter_resource_type(struct acpi_resource *ares, 840 unsigned long types) 841 { 842 unsigned long type = 0; 843 844 switch (ares->type) { 845 case ACPI_RESOURCE_TYPE_MEMORY24: 846 case ACPI_RESOURCE_TYPE_MEMORY32: 847 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 848 type = IORESOURCE_MEM; 849 break; 850 case ACPI_RESOURCE_TYPE_IO: 851 case ACPI_RESOURCE_TYPE_FIXED_IO: 852 type = IORESOURCE_IO; 853 break; 854 case ACPI_RESOURCE_TYPE_IRQ: 855 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: 856 type = IORESOURCE_IRQ; 857 break; 858 case ACPI_RESOURCE_TYPE_DMA: 859 case ACPI_RESOURCE_TYPE_FIXED_DMA: 860 type = IORESOURCE_DMA; 861 break; 862 case ACPI_RESOURCE_TYPE_GENERIC_REGISTER: 863 type = IORESOURCE_REG; 864 break; 865 case ACPI_RESOURCE_TYPE_ADDRESS16: 866 case ACPI_RESOURCE_TYPE_ADDRESS32: 867 case ACPI_RESOURCE_TYPE_ADDRESS64: 868 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: 869 if (ares->data.address.resource_type == ACPI_MEMORY_RANGE) 870 type = IORESOURCE_MEM; 871 else if (ares->data.address.resource_type == ACPI_IO_RANGE) 872 type = IORESOURCE_IO; 873 else if (ares->data.address.resource_type == 874 ACPI_BUS_NUMBER_RANGE) 875 type = IORESOURCE_BUS; 876 break; 877 default: 878 break; 879 } 880 881 return (type & types) ? 0 : 1; 882 } 883 EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type); 884 885 static int acpi_dev_consumes_res(struct acpi_device *adev, struct resource *res) 886 { 887 struct list_head resource_list; 888 struct resource_entry *rentry; 889 int ret, found = 0; 890 891 INIT_LIST_HEAD(&resource_list); 892 ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); 893 if (ret < 0) 894 return 0; 895 896 list_for_each_entry(rentry, &resource_list, node) { 897 if (resource_contains(rentry->res, res)) { 898 found = 1; 899 break; 900 } 901 902 } 903 904 acpi_dev_free_resource_list(&resource_list); 905 return found; 906 } 907 908 static acpi_status acpi_res_consumer_cb(acpi_handle handle, u32 depth, 909 void *context, void **ret) 910 { 911 struct resource *res = context; 912 struct acpi_device **consumer = (struct acpi_device **) ret; 913 struct acpi_device *adev = acpi_fetch_acpi_dev(handle); 914 915 if (!adev) 916 return AE_OK; 917 918 if (acpi_dev_consumes_res(adev, res)) { 919 *consumer = adev; 920 return AE_CTRL_TERMINATE; 921 } 922 923 return AE_OK; 924 } 925 926 /** 927 * acpi_resource_consumer - Find the ACPI device that consumes @res. 928 * @res: Resource to search for. 929 * 930 * Search the current resource settings (_CRS) of every ACPI device node 931 * for @res. If we find an ACPI device whose _CRS includes @res, return 932 * it. Otherwise, return NULL. 933 */ 934 struct acpi_device *acpi_resource_consumer(struct resource *res) 935 { 936 struct acpi_device *consumer = NULL; 937 938 acpi_get_devices(NULL, acpi_res_consumer_cb, res, (void **) &consumer); 939 return consumer; 940 } 941