1 /*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 #include "opt_acpi.h"
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/bus.h>
33 #include <sys/limits.h>
34 #include <sys/malloc.h>
35 #include <sys/module.h>
36
37 #if defined(__i386__) || defined(__amd64__)
38 #include <machine/pci_cfgreg.h>
39 #endif
40 #include <machine/bus.h>
41 #include <machine/resource.h>
42 #include <sys/rman.h>
43
44 #include <contrib/dev/acpica/include/acpi.h>
45 #include <contrib/dev/acpica/include/accommon.h>
46
47 #include <dev/acpica/acpivar.h>
48
49 #ifdef INTRNG
50 #include "acpi_bus_if.h"
51 #endif
52
53 /* Hooks for the ACPI CA debugging infrastructure */
54 #define _COMPONENT ACPI_BUS
55 ACPI_MODULE_NAME("RESOURCE")
56
57 struct lookup_irq_request {
58 ACPI_RESOURCE *acpi_res;
59 u_int irq;
60 int counter;
61 int rid;
62 int found;
63 int checkrid;
64 int trig;
65 int pol;
66 };
67
68 static char *pcilink_ids[] = { "PNP0C0F", NULL };
69
70 /*
71 * Devices with invalid memory resources
72 */
73 static char *bad_memresource_ids[] = {
74 /* PRCx on Radxa Orion O6 conflicts with the PCI resource range */
75 "CIXH2020",
76 NULL
77 };
78
79
80 static ACPI_STATUS
acpi_lookup_irq_handler(ACPI_RESOURCE * res,void * context)81 acpi_lookup_irq_handler(ACPI_RESOURCE *res, void *context)
82 {
83 struct lookup_irq_request *req;
84 size_t len;
85 u_int irqnum, trig, pol;
86 bool found;
87
88 found = false;
89 req = (struct lookup_irq_request *)context;
90
91 switch (res->Type) {
92 case ACPI_RESOURCE_TYPE_IRQ:
93 irqnum = res->Data.Irq.InterruptCount;
94 for (int i = 0; i < irqnum; i++) {
95 if (res->Data.Irq.Interrupts[i] == req->irq) {
96 found = true;
97 break;
98 }
99 }
100 len = ACPI_RS_SIZE(ACPI_RESOURCE_IRQ);
101 trig = res->Data.Irq.Triggering;
102 pol = res->Data.Irq.Polarity;
103 break;
104 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
105 irqnum = res->Data.ExtendedIrq.InterruptCount;
106 for (int i = 0; i < irqnum; i++) {
107 if (res->Data.ExtendedIrq.Interrupts[i] == req->irq) {
108 found = true;
109 break;
110 }
111 }
112 len = ACPI_RS_SIZE(ACPI_RESOURCE_EXTENDED_IRQ);
113 trig = res->Data.ExtendedIrq.Triggering;
114 pol = res->Data.ExtendedIrq.Polarity;
115 break;
116 default:
117 return (AE_OK);
118 }
119 if (!found)
120 return (AE_OK);
121 if (req->checkrid) {
122 if (req->counter != req->rid) {
123 req->counter++;
124 return (AE_OK);
125 }
126 }
127 req->found = 1;
128 req->pol = pol;
129 req->trig = trig;
130 if (req->acpi_res != NULL)
131 bcopy(res, req->acpi_res, len);
132 return (AE_CTRL_TERMINATE);
133 }
134
135 ACPI_STATUS
acpi_lookup_irq_resource(device_t dev,int rid,struct resource * res,ACPI_RESOURCE * acpi_res)136 acpi_lookup_irq_resource(device_t dev, int rid, struct resource *res,
137 ACPI_RESOURCE *acpi_res)
138 {
139 struct lookup_irq_request req;
140 ACPI_STATUS status;
141
142 req.acpi_res = acpi_res;
143 req.irq = rman_get_start(res);
144 req.counter = 0;
145 req.rid = rid;
146 req.found = 0;
147 req.checkrid = 1;
148 status = AcpiWalkResources(acpi_get_handle(dev), "_CRS",
149 acpi_lookup_irq_handler, &req);
150 if (ACPI_SUCCESS(status) && req.found == 0)
151 status = AE_NOT_FOUND;
152 return (status);
153 }
154
155 void
acpi_config_intr(device_t dev,ACPI_RESOURCE * res)156 acpi_config_intr(device_t dev, ACPI_RESOURCE *res)
157 {
158 u_int irq;
159 int pol, trig;
160
161 switch (res->Type) {
162 case ACPI_RESOURCE_TYPE_IRQ:
163 KASSERT(res->Data.Irq.InterruptCount == 1,
164 ("%s: multiple interrupts", __func__));
165 irq = res->Data.Irq.Interrupts[0];
166 trig = res->Data.Irq.Triggering;
167 pol = res->Data.Irq.Polarity;
168 break;
169 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
170 KASSERT(res->Data.ExtendedIrq.InterruptCount == 1,
171 ("%s: multiple interrupts", __func__));
172 irq = res->Data.ExtendedIrq.Interrupts[0];
173 trig = res->Data.ExtendedIrq.Triggering;
174 pol = res->Data.ExtendedIrq.Polarity;
175 break;
176 default:
177 panic("%s: bad resource type %u", __func__, res->Type);
178 }
179
180 #if defined(__amd64__) || defined(__i386__)
181 if (irq < 16 && trig == ACPI_EDGE_SENSITIVE && pol == ACPI_ACTIVE_LOW &&
182 acpi_override_isa_irq_polarity) {
183 device_printf(dev, "forcing active-hi polarity for IRQ %u\n", irq);
184 pol = ACPI_ACTIVE_HIGH;
185 }
186 #endif
187 BUS_CONFIG_INTR(dev, irq, (trig == ACPI_EDGE_SENSITIVE) ?
188 INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL, (pol == ACPI_ACTIVE_HIGH) ?
189 INTR_POLARITY_HIGH : INTR_POLARITY_LOW);
190 }
191
192 #ifdef INTRNG
193 int
acpi_map_intr(device_t dev,u_int irq,ACPI_HANDLE handle)194 acpi_map_intr(device_t dev, u_int irq, ACPI_HANDLE handle)
195 {
196 struct lookup_irq_request req;
197 int trig, pol;
198
199 trig = ACPI_LEVEL_SENSITIVE;
200 pol = ACPI_ACTIVE_HIGH;
201 if (handle != NULL) {
202 req.found = 0;
203 req.acpi_res = NULL;
204 req.irq = irq;
205 req.counter = 0;
206 req.rid = 0;
207 req.checkrid = 0;
208 AcpiWalkResources(handle, "_CRS", acpi_lookup_irq_handler, &req);
209 if (req.found != 0) {
210 trig = req.trig;
211 pol = req.pol;
212 }
213 }
214 return ACPI_BUS_MAP_INTR(device_get_parent(dev), dev, irq,
215 (trig == ACPI_EDGE_SENSITIVE) ? INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL,
216 (pol == ACPI_ACTIVE_HIGH) ? INTR_POLARITY_HIGH : INTR_POLARITY_LOW);
217 }
218 #endif
219
220 struct acpi_resource_context {
221 struct acpi_parse_resource_set *set;
222 device_t dev;
223 void *context;
224 bool ignore_producer_flag;
225 };
226
227 #ifdef ACPI_DEBUG_OUTPUT
228 static const char *
acpi_address_range_name(UINT8 ResourceType)229 acpi_address_range_name(UINT8 ResourceType)
230 {
231 static char buf[16];
232
233 switch (ResourceType) {
234 case ACPI_MEMORY_RANGE:
235 return ("Memory");
236 case ACPI_IO_RANGE:
237 return ("IO");
238 case ACPI_BUS_NUMBER_RANGE:
239 return ("Bus Number");
240 default:
241 snprintf(buf, sizeof(buf), "type %u", ResourceType);
242 return (buf);
243 }
244 }
245 #endif
246
247 static ACPI_STATUS
acpi_parse_resource(ACPI_RESOURCE * res,void * context)248 acpi_parse_resource(ACPI_RESOURCE *res, void *context)
249 {
250 struct acpi_parse_resource_set *set;
251 struct acpi_resource_context *arc;
252 UINT64 min, max, length, gran;
253 #ifdef ACPI_DEBUG
254 const char *name;
255 #endif
256 device_t dev;
257
258 arc = context;
259 dev = arc->dev;
260 set = arc->set;
261
262 switch (res->Type) {
263 case ACPI_RESOURCE_TYPE_END_TAG:
264 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "EndTag\n"));
265 break;
266 case ACPI_RESOURCE_TYPE_FIXED_IO:
267 if (res->Data.FixedIo.AddressLength <= 0)
268 break;
269 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedIo 0x%x/%d\n",
270 res->Data.FixedIo.Address, res->Data.FixedIo.AddressLength));
271 set->set_ioport(dev, arc->context, res->Data.FixedIo.Address,
272 res->Data.FixedIo.AddressLength);
273 break;
274 case ACPI_RESOURCE_TYPE_IO:
275 if (res->Data.Io.AddressLength <= 0)
276 break;
277 if (res->Data.Io.Minimum == res->Data.Io.Maximum) {
278 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x/%d\n",
279 res->Data.Io.Minimum, res->Data.Io.AddressLength));
280 set->set_ioport(dev, arc->context, res->Data.Io.Minimum,
281 res->Data.Io.AddressLength);
282 } else {
283 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x-0x%x/%d\n",
284 res->Data.Io.Minimum, res->Data.Io.Maximum,
285 res->Data.Io.AddressLength));
286 set->set_iorange(dev, arc->context, res->Data.Io.Minimum,
287 res->Data.Io.Maximum, res->Data.Io.AddressLength,
288 res->Data.Io.Alignment);
289 }
290 break;
291 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
292 if (res->Data.FixedMemory32.AddressLength <= 0)
293 break;
294 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedMemory32 0x%x/%d\n",
295 res->Data.FixedMemory32.Address,
296 res->Data.FixedMemory32.AddressLength));
297 set->set_memory(dev, arc->context, res->Data.FixedMemory32.Address,
298 res->Data.FixedMemory32.AddressLength);
299 break;
300 case ACPI_RESOURCE_TYPE_MEMORY32:
301 if (res->Data.Memory32.AddressLength <= 0)
302 break;
303 if (res->Data.Memory32.Minimum == res->Data.Memory32.Maximum) {
304 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x/%d\n",
305 res->Data.Memory32.Minimum, res->Data.Memory32.AddressLength));
306 set->set_memory(dev, arc->context, res->Data.Memory32.Minimum,
307 res->Data.Memory32.AddressLength);
308 } else {
309 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x-0x%x/%d\n",
310 res->Data.Memory32.Minimum, res->Data.Memory32.Maximum,
311 res->Data.Memory32.AddressLength));
312 set->set_memoryrange(dev, arc->context, res->Data.Memory32.Minimum,
313 res->Data.Memory32.Maximum, res->Data.Memory32.AddressLength,
314 res->Data.Memory32.Alignment);
315 }
316 break;
317 case ACPI_RESOURCE_TYPE_MEMORY24:
318 if (res->Data.Memory24.AddressLength <= 0)
319 break;
320 if (res->Data.Memory24.Minimum == res->Data.Memory24.Maximum) {
321 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x/%d\n",
322 res->Data.Memory24.Minimum, res->Data.Memory24.AddressLength));
323 set->set_memory(dev, arc->context, res->Data.Memory24.Minimum,
324 res->Data.Memory24.AddressLength);
325 } else {
326 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x-0x%x/%d\n",
327 res->Data.Memory24.Minimum, res->Data.Memory24.Maximum,
328 res->Data.Memory24.AddressLength));
329 set->set_memoryrange(dev, arc->context, res->Data.Memory24.Minimum,
330 res->Data.Memory24.Maximum, res->Data.Memory24.AddressLength,
331 res->Data.Memory24.Alignment);
332 }
333 break;
334 case ACPI_RESOURCE_TYPE_IRQ:
335 /*
336 * from 1.0b 6.4.2
337 * "This structure is repeated for each separate interrupt
338 * required"
339 */
340 set->set_irq(dev, arc->context, res->Data.Irq.Interrupts,
341 res->Data.Irq.InterruptCount, res->Data.Irq.Triggering,
342 res->Data.Irq.Polarity);
343 break;
344 case ACPI_RESOURCE_TYPE_DMA:
345 /*
346 * from 1.0b 6.4.3
347 * "This structure is repeated for each separate DMA channel
348 * required"
349 */
350 set->set_drq(dev, arc->context, res->Data.Dma.Channels,
351 res->Data.Dma.ChannelCount);
352 break;
353 case ACPI_RESOURCE_TYPE_START_DEPENDENT:
354 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "start dependent functions\n"));
355 set->set_start_dependent(dev, arc->context,
356 res->Data.StartDpf.CompatibilityPriority);
357 break;
358 case ACPI_RESOURCE_TYPE_END_DEPENDENT:
359 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "end dependent functions\n"));
360 set->set_end_dependent(dev, arc->context);
361 break;
362 case ACPI_RESOURCE_TYPE_ADDRESS16:
363 case ACPI_RESOURCE_TYPE_ADDRESS32:
364 case ACPI_RESOURCE_TYPE_ADDRESS64:
365 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
366 switch (res->Type) {
367 case ACPI_RESOURCE_TYPE_ADDRESS16:
368 gran = res->Data.Address16.Address.Granularity;
369 min = res->Data.Address16.Address.Minimum;
370 max = res->Data.Address16.Address.Maximum;
371 length = res->Data.Address16.Address.AddressLength;
372 #ifdef ACPI_DEBUG
373 name = "Address16";
374 #endif
375 break;
376 case ACPI_RESOURCE_TYPE_ADDRESS32:
377 gran = res->Data.Address32.Address.Granularity;
378 min = res->Data.Address32.Address.Minimum;
379 max = res->Data.Address32.Address.Maximum;
380 length = res->Data.Address32.Address.AddressLength;
381 #ifdef ACPI_DEBUG
382 name = "Address32";
383 #endif
384 break;
385 case ACPI_RESOURCE_TYPE_ADDRESS64:
386 gran = res->Data.Address64.Address.Granularity;
387 min = res->Data.Address64.Address.Minimum;
388 max = res->Data.Address64.Address.Maximum;
389 length = res->Data.Address64.Address.AddressLength;
390 #ifdef ACPI_DEBUG
391 name = "Address64";
392 #endif
393 break;
394 default:
395 KASSERT(res->Type == ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64,
396 ("should never happen"));
397 gran = res->Data.ExtAddress64.Address.Granularity;
398 min = res->Data.ExtAddress64.Address.Minimum;
399 max = res->Data.ExtAddress64.Address.Maximum;
400 length = res->Data.ExtAddress64.Address.AddressLength;
401 #ifdef ACPI_DEBUG
402 name = "ExtAddress64";
403 #endif
404 break;
405 }
406 if (length <= 0)
407 break;
408 if (!arc->ignore_producer_flag &&
409 res->Data.Address.ProducerConsumer != ACPI_CONSUMER) {
410 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
411 "ignored %s %s producer\n", name,
412 acpi_address_range_name(res->Data.Address.ResourceType)));
413 break;
414 }
415 if (res->Data.Address.ResourceType != ACPI_MEMORY_RANGE &&
416 res->Data.Address.ResourceType != ACPI_IO_RANGE) {
417 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
418 "ignored %s for non-memory, non-I/O\n", name));
419 break;
420 }
421
422 #ifdef __i386__
423 if (min > ULONG_MAX || (res->Data.Address.MaxAddressFixed && max >
424 ULONG_MAX)) {
425 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored %s above 4G\n",
426 name));
427 break;
428 }
429 if (max > ULONG_MAX)
430 max = ULONG_MAX;
431 #endif
432 if (res->Data.Address.MinAddressFixed == ACPI_ADDRESS_FIXED &&
433 res->Data.Address.MaxAddressFixed == ACPI_ADDRESS_FIXED) {
434 if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE) {
435 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/Memory 0x%jx/%ju\n",
436 name, (uintmax_t)min, (uintmax_t)length));
437 set->set_memory(dev, arc->context, min, length);
438 } else {
439 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx/%ju\n", name,
440 (uintmax_t)min, (uintmax_t)length));
441 set->set_ioport(dev, arc->context, min, length);
442 }
443 } else if (res->Data.Address.MinAddressFixed != ACPI_ADDRESS_FIXED &&
444 res->Data.Address.MaxAddressFixed != ACPI_ADDRESS_FIXED) {
445 /* Fixed size, variable location resource descriptor */
446 min = roundup(min, gran + 1);
447 if ((min + length - 1) > max) {
448 device_printf(dev,
449 "invalid memory range: start: %jx end: %jx max: %jx\n",
450 (uintmax_t)min, (uintmax_t)(min + length - 1),
451 (uintmax_t)max);
452 } else {
453 if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE) {
454 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
455 "%s/Memory 0x%jx/%ju\n", name, (uintmax_t)min,
456 (uintmax_t)length));
457 set->set_memory(dev, arc->context, min, length);
458 } else {
459 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx/%ju\n",
460 name, (uintmax_t)min, (uintmax_t)length));
461 set->set_ioport(dev, arc->context, min, length);
462 }
463 }
464 } else {
465 if (res->Data.Address32.ResourceType == ACPI_MEMORY_RANGE) {
466 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
467 "%s/Memory 0x%jx-0x%jx/%ju\n", name, (uintmax_t)min,
468 (uintmax_t)max, (uintmax_t)length));
469 set->set_memoryrange(dev, arc->context, min, max, length, gran);
470 } else {
471 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx-0x%jx/%ju\n",
472 name, (uintmax_t)min, (uintmax_t)max, (uintmax_t)length));
473 set->set_iorange(dev, arc->context, min, max, length, gran);
474 }
475 }
476 break;
477 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
478 if (res->Data.ExtendedIrq.ProducerConsumer != ACPI_CONSUMER) {
479 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored ExtIRQ producer\n"));
480 break;
481 }
482 set->set_ext_irq(dev, arc->context, res->Data.ExtendedIrq.Interrupts,
483 res->Data.ExtendedIrq.InterruptCount,
484 res->Data.ExtendedIrq.Triggering, res->Data.ExtendedIrq.Polarity);
485 break;
486 case ACPI_RESOURCE_TYPE_VENDOR:
487 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
488 "unimplemented VendorSpecific resource\n"));
489 break;
490 default:
491 break;
492 }
493 return (AE_OK);
494 }
495
496 /*
497 * Fetch a device's resources and associate them with the device.
498 *
499 * Note that it might be nice to also locate ACPI-specific resource items, such
500 * as GPE bits.
501 *
502 * We really need to split the resource-fetching code out from the
503 * resource-parsing code, since we may want to use the parsing
504 * code for _PRS someday.
505 */
506 ACPI_STATUS
acpi_parse_resources(device_t dev,ACPI_HANDLE handle,struct acpi_parse_resource_set * set,void * arg)507 acpi_parse_resources(device_t dev, ACPI_HANDLE handle,
508 struct acpi_parse_resource_set *set, void *arg)
509 {
510 struct acpi_resource_context arc;
511 ACPI_STATUS status;
512
513 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
514
515 set->set_init(dev, arg, &arc.context);
516 arc.set = set;
517 arc.dev = dev;
518 arc.ignore_producer_flag = false;
519
520 /*
521 * UARTs on ThunderX2 set ResourceProducer on memory resources, with
522 * 7.2 firmware.
523 */
524 if (acpi_MatchHid(handle, "ARMH0011") != ACPI_MATCHHID_NOMATCH)
525 arc.ignore_producer_flag = true;
526
527 /*
528 * ARM Coresight on N1SDP set ResourceProducer on memory resources.
529 * Coresight devices: ETM, STM, TPIU, ETF/ETR, REP, FUN.
530 */
531 if (acpi_MatchHid(handle, "ARMHC500") != ACPI_MATCHHID_NOMATCH ||
532 acpi_MatchHid(handle, "ARMHC502") != ACPI_MATCHHID_NOMATCH ||
533 acpi_MatchHid(handle, "ARMHC600") != ACPI_MATCHHID_NOMATCH ||
534 acpi_MatchHid(handle, "ARMHC979") != ACPI_MATCHHID_NOMATCH ||
535 acpi_MatchHid(handle, "ARMHC97C") != ACPI_MATCHHID_NOMATCH ||
536 acpi_MatchHid(handle, "ARMHC98D") != ACPI_MATCHHID_NOMATCH ||
537 acpi_MatchHid(handle, "ARMHC9FF") != ACPI_MATCHHID_NOMATCH ||
538 acpi_MatchHid(handle, "ARMHD620") != ACPI_MATCHHID_NOMATCH)
539 arc.ignore_producer_flag = true;
540
541 /*
542 * The DesignWare I2C Controller on Ampere Altra sets ResourceProducer on
543 * memory resources.
544 */
545 if (acpi_MatchHid(handle, "APMC0D0F") != ACPI_MATCHHID_NOMATCH)
546 arc.ignore_producer_flag = true;
547
548 status = AcpiWalkResources(handle, "_CRS", acpi_parse_resource, &arc);
549 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
550 printf("can't fetch resources for %s - %s\n",
551 acpi_name(handle), AcpiFormatException(status));
552 return_ACPI_STATUS (status);
553 }
554 set->set_done(dev, arc.context);
555 return_ACPI_STATUS (AE_OK);
556 }
557
558 /*
559 * Resource-set vectors used to attach _CRS-derived resources
560 * to an ACPI device.
561 */
562 static void acpi_res_set_init(device_t dev, void *arg, void **context);
563 static void acpi_res_set_done(device_t dev, void *context);
564 static void acpi_res_set_ioport(device_t dev, void *context,
565 uint64_t base, uint64_t length);
566 static void acpi_res_set_iorange(device_t dev, void *context,
567 uint64_t low, uint64_t high,
568 uint64_t length, uint64_t align);
569 static void acpi_res_set_memory(device_t dev, void *context,
570 uint64_t base, uint64_t length);
571 static void acpi_res_set_memoryrange(device_t dev, void *context,
572 uint64_t low, uint64_t high,
573 uint64_t length, uint64_t align);
574 static void acpi_res_set_irq(device_t dev, void *context, uint8_t *irq,
575 int count, int trig, int pol);
576 static void acpi_res_set_ext_irq(device_t dev, void *context,
577 uint32_t *irq, int count, int trig, int pol);
578 static void acpi_res_set_drq(device_t dev, void *context, uint8_t *drq,
579 int count);
580 static void acpi_res_set_start_dependent(device_t dev, void *context,
581 int preference);
582 static void acpi_res_set_end_dependent(device_t dev, void *context);
583
584 struct acpi_parse_resource_set acpi_res_parse_set = {
585 acpi_res_set_init,
586 acpi_res_set_done,
587 acpi_res_set_ioport,
588 acpi_res_set_iorange,
589 acpi_res_set_memory,
590 acpi_res_set_memoryrange,
591 acpi_res_set_irq,
592 acpi_res_set_ext_irq,
593 acpi_res_set_drq,
594 acpi_res_set_start_dependent,
595 acpi_res_set_end_dependent
596 };
597
598 struct acpi_res_context {
599 int ar_nio;
600 int ar_nmem;
601 int ar_nirq;
602 int ar_ndrq;
603 void *ar_parent;
604 };
605
606 /*
607 * Some resources reported via _CRS should not be added as bus
608 * resources. This function returns true if a resource reported via
609 * _CRS should be ignored.
610 */
611 static bool
acpi_res_ignore(device_t dev,int type,rman_res_t start,rman_res_t count)612 acpi_res_ignore(device_t dev, int type, rman_res_t start, rman_res_t count)
613 {
614 struct acpi_device *ad = device_get_ivars(dev);
615 ACPI_DEVICE_INFO *devinfo;
616 bool allow;
617
618 /* Ignore IRQ resources for PCI link devices. */
619 if (type == SYS_RES_IRQ &&
620 ACPI_ID_PROBE(device_get_parent(dev), dev, pcilink_ids, NULL) <= 0)
621 return (true);
622
623 /*
624 * Ignore most resources for PCI root bridges. Some BIOSes
625 * incorrectly enumerate the memory ranges they decode as plain
626 * memory resources instead of as ResourceProducer ranges. Other
627 * BIOSes incorrectly list system resource entries for I/O ranges
628 * under the PCI bridge. Do allow the one known-correct case on
629 * x86 of a PCI bridge claiming the I/O ports used for PCI config
630 * access.
631 */
632 if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
633 if (type == SYS_RES_MEMORY &&
634 ACPI_ID_PROBE(device_get_parent(dev), dev, bad_memresource_ids,
635 NULL) <= 0)
636 return (true);
637
638 if (ACPI_SUCCESS(AcpiGetObjectInfo(ad->ad_handle, &devinfo))) {
639 if ((devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0) {
640 #if defined(__i386__) || defined(__amd64__)
641 allow = (type == SYS_RES_IOPORT && start == CONF1_ADDR_PORT);
642 #else
643 allow = false;
644 #endif
645 if (!allow) {
646 AcpiOsFree(devinfo);
647 return (true);
648 }
649 }
650 AcpiOsFree(devinfo);
651 }
652 }
653
654 return (false);
655 }
656
657 static void
acpi_res_set_init(device_t dev,void * arg,void ** context)658 acpi_res_set_init(device_t dev, void *arg, void **context)
659 {
660 struct acpi_res_context *cp;
661
662 if ((cp = AcpiOsAllocate(sizeof(*cp))) != NULL) {
663 bzero(cp, sizeof(*cp));
664 cp->ar_parent = arg;
665 *context = cp;
666 }
667 }
668
669 static void
acpi_res_set_done(device_t dev,void * context)670 acpi_res_set_done(device_t dev, void *context)
671 {
672 struct acpi_res_context *cp = (struct acpi_res_context *)context;
673
674 if (cp == NULL)
675 return;
676 AcpiOsFree(cp);
677 }
678
679 static void
acpi_res_set_ioport(device_t dev,void * context,uint64_t base,uint64_t length)680 acpi_res_set_ioport(device_t dev, void *context, uint64_t base,
681 uint64_t length)
682 {
683 struct acpi_res_context *cp = (struct acpi_res_context *)context;
684
685 if (cp == NULL)
686 return;
687 if (acpi_res_ignore(dev, SYS_RES_IOPORT, base, length))
688 return;
689 bus_set_resource(dev, SYS_RES_IOPORT, cp->ar_nio++, base, length);
690 }
691
692 static void
acpi_res_set_iorange(device_t dev,void * context,uint64_t low,uint64_t high,uint64_t length,uint64_t align)693 acpi_res_set_iorange(device_t dev, void *context, uint64_t low,
694 uint64_t high, uint64_t length, uint64_t align)
695 {
696 struct acpi_res_context *cp = (struct acpi_res_context *)context;
697
698 if (cp == NULL)
699 return;
700
701 /*
702 * XXX: Some BIOSes contain buggy _CRS entries where fixed I/O
703 * ranges have the maximum base address (_MAX) to the end of the
704 * I/O range instead of the start. These are then treated as a
705 * relocatable I/O range rather than a fixed I/O resource. As a
706 * workaround, treat I/O resources encoded this way as fixed I/O
707 * ports.
708 */
709 if (high == (low + length)) {
710 if (bootverbose)
711 device_printf(dev,
712 "_CRS has fixed I/O port range defined as relocatable\n");
713
714 if (acpi_res_ignore(dev, SYS_RES_IOPORT, low, length))
715 return;
716 bus_set_resource(dev, SYS_RES_IOPORT, cp->ar_nio++, low, length);
717 return;
718 }
719
720 device_printf(dev, "I/O range not supported\n");
721 }
722
723 static void
acpi_res_set_memory(device_t dev,void * context,uint64_t base,uint64_t length)724 acpi_res_set_memory(device_t dev, void *context, uint64_t base,
725 uint64_t length)
726 {
727 struct acpi_res_context *cp = (struct acpi_res_context *)context;
728
729 if (cp == NULL)
730 return;
731 if (acpi_res_ignore(dev, SYS_RES_MEMORY, base, length))
732 return;
733 bus_set_resource(dev, SYS_RES_MEMORY, cp->ar_nmem++, base, length);
734 }
735
736 static void
acpi_res_set_memoryrange(device_t dev,void * context,uint64_t low,uint64_t high,uint64_t length,uint64_t align)737 acpi_res_set_memoryrange(device_t dev, void *context, uint64_t low,
738 uint64_t high, uint64_t length, uint64_t align)
739 {
740 struct acpi_res_context *cp = (struct acpi_res_context *)context;
741
742 if (cp == NULL)
743 return;
744 device_printf(dev, "memory range not supported\n");
745 }
746
747 static void
acpi_res_set_irq(device_t dev,void * context,uint8_t * irq,int count,int trig,int pol)748 acpi_res_set_irq(device_t dev, void *context, uint8_t *irq, int count,
749 int trig, int pol)
750 {
751 struct acpi_res_context *cp = (struct acpi_res_context *)context;
752 int i;
753
754 if (cp == NULL || irq == NULL)
755 return;
756
757 for (i = 0; i < count; i++) {
758 if (acpi_res_ignore(dev, SYS_RES_IRQ, irq[i], 1))
759 continue;
760 bus_set_resource(dev, SYS_RES_IRQ, cp->ar_nirq++, irq[i], 1);
761 }
762 }
763
764 static void
acpi_res_set_ext_irq(device_t dev,void * context,uint32_t * irq,int count,int trig,int pol)765 acpi_res_set_ext_irq(device_t dev, void *context, uint32_t *irq, int count,
766 int trig, int pol)
767 {
768 struct acpi_res_context *cp = (struct acpi_res_context *)context;
769 int i;
770
771 if (cp == NULL || irq == NULL)
772 return;
773
774 for (i = 0; i < count; i++) {
775 if (acpi_res_ignore(dev, SYS_RES_IRQ, irq[i], 1))
776 continue;
777 bus_set_resource(dev, SYS_RES_IRQ, cp->ar_nirq++, irq[i], 1);
778 }
779 }
780
781 static void
acpi_res_set_drq(device_t dev,void * context,uint8_t * drq,int count)782 acpi_res_set_drq(device_t dev, void *context, uint8_t *drq, int count)
783 {
784 struct acpi_res_context *cp = (struct acpi_res_context *)context;
785
786 if (cp == NULL || drq == NULL)
787 return;
788
789 /* This implements no resource relocation. */
790 if (count != 1)
791 return;
792
793 if (acpi_res_ignore(dev, SYS_RES_DRQ, *drq, 1))
794 return;
795 bus_set_resource(dev, SYS_RES_DRQ, cp->ar_ndrq++, *drq, 1);
796 }
797
798 static void
acpi_res_set_start_dependent(device_t dev,void * context,int preference)799 acpi_res_set_start_dependent(device_t dev, void *context, int preference)
800 {
801 struct acpi_res_context *cp = (struct acpi_res_context *)context;
802
803 if (cp == NULL)
804 return;
805 device_printf(dev, "dependent functions not supported\n");
806 }
807
808 static void
acpi_res_set_end_dependent(device_t dev,void * context)809 acpi_res_set_end_dependent(device_t dev, void *context)
810 {
811 struct acpi_res_context *cp = (struct acpi_res_context *)context;
812
813 if (cp == NULL)
814 return;
815 device_printf(dev, "dependent functions not supported\n");
816 }
817
818 /*
819 * Resource-owning placeholders for IO and memory pseudo-devices.
820 *
821 * This code allocates system resources that will be used by ACPI
822 * child devices. The acpi parent manages these resources through a
823 * private rman.
824 */
825
826 static int acpi_sysres_probe(device_t dev);
827 static int acpi_sysres_attach(device_t dev);
828
829 static device_method_t acpi_sysres_methods[] = {
830 /* Device interface */
831 DEVMETHOD(device_probe, acpi_sysres_probe),
832 DEVMETHOD(device_attach, acpi_sysres_attach),
833
834 DEVMETHOD_END
835 };
836
837 static driver_t acpi_sysres_driver = {
838 "acpi_sysresource",
839 acpi_sysres_methods,
840 0,
841 };
842
843 DRIVER_MODULE(acpi_sysresource, acpi, acpi_sysres_driver, 0, 0);
844 MODULE_DEPEND(acpi_sysresource, acpi, 1, 1, 1);
845
846 static int
acpi_sysres_probe(device_t dev)847 acpi_sysres_probe(device_t dev)
848 {
849 static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
850 int rv;
851
852 if (acpi_disabled("sysresource"))
853 return (ENXIO);
854 rv = ACPI_ID_PROBE(device_get_parent(dev), dev, sysres_ids, NULL);
855 if (rv > 0){
856 return (rv);
857 }
858 device_set_desc(dev, "System Resource");
859 device_quiet(dev);
860 return (rv);
861 }
862
863 static int
acpi_sysres_attach(device_t dev)864 acpi_sysres_attach(device_t dev)
865 {
866 device_t bus;
867 struct acpi_softc *bus_sc;
868 struct resource_list_entry *bus_rle, *dev_rle;
869 struct resource_list *bus_rl, *dev_rl;
870 int done, type;
871 rman_res_t start, end, count;
872
873 /*
874 * Loop through all current resources to see if the new one overlaps
875 * any existing ones. If so, grow the old one up and/or down
876 * accordingly. Discard any that are wholly contained in the old. If
877 * the resource is unique, add it to the parent. It will later go into
878 * the rman pool.
879 */
880 bus = device_get_parent(dev);
881 dev_rl = BUS_GET_RESOURCE_LIST(bus, dev);
882 bus_sc = acpi_device_get_parent_softc(dev);
883 bus_rl = &bus_sc->sysres_rl;
884 STAILQ_FOREACH(dev_rle, dev_rl, link) {
885 if (dev_rle->type != SYS_RES_IOPORT && dev_rle->type != SYS_RES_MEMORY)
886 continue;
887
888 start = dev_rle->start;
889 end = dev_rle->end;
890 count = dev_rle->count;
891 type = dev_rle->type;
892 done = FALSE;
893
894 STAILQ_FOREACH(bus_rle, bus_rl, link) {
895 if (bus_rle->type != type)
896 continue;
897
898 /* New resource wholly contained in old, discard. */
899 if (start >= bus_rle->start && end <= bus_rle->end)
900 break;
901
902 /* New tail overlaps old head, grow existing resource downward. */
903 if (start < bus_rle->start && end >= bus_rle->start) {
904 bus_rle->count += bus_rle->start - start;
905 bus_rle->start = start;
906 done = TRUE;
907 }
908
909 /* New head overlaps old tail, grow existing resource upward. */
910 if (start <= bus_rle->end && end > bus_rle->end) {
911 bus_rle->count += end - bus_rle->end;
912 bus_rle->end = end;
913 done = TRUE;
914 }
915
916 /* If we adjusted the old resource, we're finished. */
917 if (done)
918 break;
919 }
920
921 /* If we didn't merge with anything, add this resource. */
922 if (bus_rle == NULL)
923 resource_list_add_next(bus_rl, type, start, end, count);
924 }
925
926 /* After merging/moving resources to the parent, free the list. */
927 resource_list_free(dev_rl);
928
929 return (0);
930 }
931