1 /*-
2 * Copyright (c) 2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 #include "opt_acpi.h"
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/bus.h>
33 #include <sys/limits.h>
34 #include <sys/malloc.h>
35 #include <sys/module.h>
36
37 #if defined(__i386__) || defined(__amd64__)
38 #include <machine/pci_cfgreg.h>
39 #endif
40 #include <machine/bus.h>
41 #include <machine/resource.h>
42 #include <sys/rman.h>
43
44 #include <contrib/dev/acpica/include/acpi.h>
45 #include <contrib/dev/acpica/include/accommon.h>
46
47 #include <dev/acpica/acpivar.h>
48
49 #ifdef INTRNG
50 #include "acpi_bus_if.h"
51 #endif
52
53 /* Hooks for the ACPI CA debugging infrastructure */
54 #define _COMPONENT ACPI_BUS
55 ACPI_MODULE_NAME("RESOURCE")
56
57 struct lookup_irq_request {
58 ACPI_RESOURCE *acpi_res;
59 u_int irq;
60 int counter;
61 int rid;
62 int found;
63 int checkrid;
64 int trig;
65 int pol;
66 };
67
68 static char *pcilink_ids[] = { "PNP0C0F", NULL };
69
70 static ACPI_STATUS
acpi_lookup_irq_handler(ACPI_RESOURCE * res,void * context)71 acpi_lookup_irq_handler(ACPI_RESOURCE *res, void *context)
72 {
73 struct lookup_irq_request *req;
74 size_t len;
75 u_int irqnum, trig, pol;
76 bool found;
77
78 found = false;
79 req = (struct lookup_irq_request *)context;
80
81 switch (res->Type) {
82 case ACPI_RESOURCE_TYPE_IRQ:
83 irqnum = res->Data.Irq.InterruptCount;
84 for (int i = 0; i < irqnum; i++) {
85 if (res->Data.Irq.Interrupts[i] == req->irq) {
86 found = true;
87 break;
88 }
89 }
90 len = ACPI_RS_SIZE(ACPI_RESOURCE_IRQ);
91 trig = res->Data.Irq.Triggering;
92 pol = res->Data.Irq.Polarity;
93 break;
94 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
95 irqnum = res->Data.ExtendedIrq.InterruptCount;
96 for (int i = 0; i < irqnum; i++) {
97 if (res->Data.ExtendedIrq.Interrupts[i] == req->irq) {
98 found = true;
99 break;
100 }
101 }
102 len = ACPI_RS_SIZE(ACPI_RESOURCE_EXTENDED_IRQ);
103 trig = res->Data.ExtendedIrq.Triggering;
104 pol = res->Data.ExtendedIrq.Polarity;
105 break;
106 default:
107 return (AE_OK);
108 }
109 if (!found)
110 return (AE_OK);
111 if (req->checkrid) {
112 if (req->counter != req->rid) {
113 req->counter++;
114 return (AE_OK);
115 }
116 }
117 req->found = 1;
118 req->pol = pol;
119 req->trig = trig;
120 if (req->acpi_res != NULL)
121 bcopy(res, req->acpi_res, len);
122 return (AE_CTRL_TERMINATE);
123 }
124
125 ACPI_STATUS
acpi_lookup_irq_resource(device_t dev,int rid,struct resource * res,ACPI_RESOURCE * acpi_res)126 acpi_lookup_irq_resource(device_t dev, int rid, struct resource *res,
127 ACPI_RESOURCE *acpi_res)
128 {
129 struct lookup_irq_request req;
130 ACPI_STATUS status;
131
132 req.acpi_res = acpi_res;
133 req.irq = rman_get_start(res);
134 req.counter = 0;
135 req.rid = rid;
136 req.found = 0;
137 req.checkrid = 1;
138 status = AcpiWalkResources(acpi_get_handle(dev), "_CRS",
139 acpi_lookup_irq_handler, &req);
140 if (ACPI_SUCCESS(status) && req.found == 0)
141 status = AE_NOT_FOUND;
142 return (status);
143 }
144
145 void
acpi_config_intr(device_t dev,ACPI_RESOURCE * res)146 acpi_config_intr(device_t dev, ACPI_RESOURCE *res)
147 {
148 u_int irq;
149 int pol, trig;
150
151 switch (res->Type) {
152 case ACPI_RESOURCE_TYPE_IRQ:
153 KASSERT(res->Data.Irq.InterruptCount == 1,
154 ("%s: multiple interrupts", __func__));
155 irq = res->Data.Irq.Interrupts[0];
156 trig = res->Data.Irq.Triggering;
157 pol = res->Data.Irq.Polarity;
158 break;
159 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
160 KASSERT(res->Data.ExtendedIrq.InterruptCount == 1,
161 ("%s: multiple interrupts", __func__));
162 irq = res->Data.ExtendedIrq.Interrupts[0];
163 trig = res->Data.ExtendedIrq.Triggering;
164 pol = res->Data.ExtendedIrq.Polarity;
165 break;
166 default:
167 panic("%s: bad resource type %u", __func__, res->Type);
168 }
169
170 #if defined(__amd64__) || defined(__i386__)
171 if (irq < 16 && trig == ACPI_EDGE_SENSITIVE && pol == ACPI_ACTIVE_LOW &&
172 acpi_override_isa_irq_polarity) {
173 device_printf(dev, "forcing active-hi polarity for IRQ %u\n", irq);
174 pol = ACPI_ACTIVE_HIGH;
175 }
176 #endif
177 BUS_CONFIG_INTR(dev, irq, (trig == ACPI_EDGE_SENSITIVE) ?
178 INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL, (pol == ACPI_ACTIVE_HIGH) ?
179 INTR_POLARITY_HIGH : INTR_POLARITY_LOW);
180 }
181
182 #ifdef INTRNG
183 int
acpi_map_intr(device_t dev,u_int irq,ACPI_HANDLE handle)184 acpi_map_intr(device_t dev, u_int irq, ACPI_HANDLE handle)
185 {
186 struct lookup_irq_request req;
187 int trig, pol;
188
189 trig = ACPI_LEVEL_SENSITIVE;
190 pol = ACPI_ACTIVE_HIGH;
191 if (handle != NULL) {
192 req.found = 0;
193 req.acpi_res = NULL;
194 req.irq = irq;
195 req.counter = 0;
196 req.rid = 0;
197 req.checkrid = 0;
198 AcpiWalkResources(handle, "_CRS", acpi_lookup_irq_handler, &req);
199 if (req.found != 0) {
200 trig = req.trig;
201 pol = req.pol;
202 }
203 }
204 return ACPI_BUS_MAP_INTR(device_get_parent(dev), dev, irq,
205 (trig == ACPI_EDGE_SENSITIVE) ? INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL,
206 (pol == ACPI_ACTIVE_HIGH) ? INTR_POLARITY_HIGH : INTR_POLARITY_LOW);
207 }
208 #endif
209
210 struct acpi_resource_context {
211 struct acpi_parse_resource_set *set;
212 device_t dev;
213 void *context;
214 bool ignore_producer_flag;
215 };
216
217 #ifdef ACPI_DEBUG_OUTPUT
218 static const char *
acpi_address_range_name(UINT8 ResourceType)219 acpi_address_range_name(UINT8 ResourceType)
220 {
221 static char buf[16];
222
223 switch (ResourceType) {
224 case ACPI_MEMORY_RANGE:
225 return ("Memory");
226 case ACPI_IO_RANGE:
227 return ("IO");
228 case ACPI_BUS_NUMBER_RANGE:
229 return ("Bus Number");
230 default:
231 snprintf(buf, sizeof(buf), "type %u", ResourceType);
232 return (buf);
233 }
234 }
235 #endif
236
237 static ACPI_STATUS
acpi_parse_resource(ACPI_RESOURCE * res,void * context)238 acpi_parse_resource(ACPI_RESOURCE *res, void *context)
239 {
240 struct acpi_parse_resource_set *set;
241 struct acpi_resource_context *arc;
242 UINT64 min, max, length, gran;
243 #ifdef ACPI_DEBUG
244 const char *name;
245 #endif
246 device_t dev;
247
248 arc = context;
249 dev = arc->dev;
250 set = arc->set;
251
252 switch (res->Type) {
253 case ACPI_RESOURCE_TYPE_END_TAG:
254 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "EndTag\n"));
255 break;
256 case ACPI_RESOURCE_TYPE_FIXED_IO:
257 if (res->Data.FixedIo.AddressLength <= 0)
258 break;
259 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedIo 0x%x/%d\n",
260 res->Data.FixedIo.Address, res->Data.FixedIo.AddressLength));
261 set->set_ioport(dev, arc->context, res->Data.FixedIo.Address,
262 res->Data.FixedIo.AddressLength);
263 break;
264 case ACPI_RESOURCE_TYPE_IO:
265 if (res->Data.Io.AddressLength <= 0)
266 break;
267 if (res->Data.Io.Minimum == res->Data.Io.Maximum) {
268 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x/%d\n",
269 res->Data.Io.Minimum, res->Data.Io.AddressLength));
270 set->set_ioport(dev, arc->context, res->Data.Io.Minimum,
271 res->Data.Io.AddressLength);
272 } else {
273 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Io 0x%x-0x%x/%d\n",
274 res->Data.Io.Minimum, res->Data.Io.Maximum,
275 res->Data.Io.AddressLength));
276 set->set_iorange(dev, arc->context, res->Data.Io.Minimum,
277 res->Data.Io.Maximum, res->Data.Io.AddressLength,
278 res->Data.Io.Alignment);
279 }
280 break;
281 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
282 if (res->Data.FixedMemory32.AddressLength <= 0)
283 break;
284 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "FixedMemory32 0x%x/%d\n",
285 res->Data.FixedMemory32.Address,
286 res->Data.FixedMemory32.AddressLength));
287 set->set_memory(dev, arc->context, res->Data.FixedMemory32.Address,
288 res->Data.FixedMemory32.AddressLength);
289 break;
290 case ACPI_RESOURCE_TYPE_MEMORY32:
291 if (res->Data.Memory32.AddressLength <= 0)
292 break;
293 if (res->Data.Memory32.Minimum == res->Data.Memory32.Maximum) {
294 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x/%d\n",
295 res->Data.Memory32.Minimum, res->Data.Memory32.AddressLength));
296 set->set_memory(dev, arc->context, res->Data.Memory32.Minimum,
297 res->Data.Memory32.AddressLength);
298 } else {
299 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory32 0x%x-0x%x/%d\n",
300 res->Data.Memory32.Minimum, res->Data.Memory32.Maximum,
301 res->Data.Memory32.AddressLength));
302 set->set_memoryrange(dev, arc->context, res->Data.Memory32.Minimum,
303 res->Data.Memory32.Maximum, res->Data.Memory32.AddressLength,
304 res->Data.Memory32.Alignment);
305 }
306 break;
307 case ACPI_RESOURCE_TYPE_MEMORY24:
308 if (res->Data.Memory24.AddressLength <= 0)
309 break;
310 if (res->Data.Memory24.Minimum == res->Data.Memory24.Maximum) {
311 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x/%d\n",
312 res->Data.Memory24.Minimum, res->Data.Memory24.AddressLength));
313 set->set_memory(dev, arc->context, res->Data.Memory24.Minimum,
314 res->Data.Memory24.AddressLength);
315 } else {
316 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Memory24 0x%x-0x%x/%d\n",
317 res->Data.Memory24.Minimum, res->Data.Memory24.Maximum,
318 res->Data.Memory24.AddressLength));
319 set->set_memoryrange(dev, arc->context, res->Data.Memory24.Minimum,
320 res->Data.Memory24.Maximum, res->Data.Memory24.AddressLength,
321 res->Data.Memory24.Alignment);
322 }
323 break;
324 case ACPI_RESOURCE_TYPE_IRQ:
325 /*
326 * from 1.0b 6.4.2
327 * "This structure is repeated for each separate interrupt
328 * required"
329 */
330 set->set_irq(dev, arc->context, res->Data.Irq.Interrupts,
331 res->Data.Irq.InterruptCount, res->Data.Irq.Triggering,
332 res->Data.Irq.Polarity);
333 break;
334 case ACPI_RESOURCE_TYPE_DMA:
335 /*
336 * from 1.0b 6.4.3
337 * "This structure is repeated for each separate DMA channel
338 * required"
339 */
340 set->set_drq(dev, arc->context, res->Data.Dma.Channels,
341 res->Data.Dma.ChannelCount);
342 break;
343 case ACPI_RESOURCE_TYPE_START_DEPENDENT:
344 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "start dependent functions\n"));
345 set->set_start_dependent(dev, arc->context,
346 res->Data.StartDpf.CompatibilityPriority);
347 break;
348 case ACPI_RESOURCE_TYPE_END_DEPENDENT:
349 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "end dependent functions\n"));
350 set->set_end_dependent(dev, arc->context);
351 break;
352 case ACPI_RESOURCE_TYPE_ADDRESS16:
353 case ACPI_RESOURCE_TYPE_ADDRESS32:
354 case ACPI_RESOURCE_TYPE_ADDRESS64:
355 case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
356 switch (res->Type) {
357 case ACPI_RESOURCE_TYPE_ADDRESS16:
358 gran = res->Data.Address16.Address.Granularity;
359 min = res->Data.Address16.Address.Minimum;
360 max = res->Data.Address16.Address.Maximum;
361 length = res->Data.Address16.Address.AddressLength;
362 #ifdef ACPI_DEBUG
363 name = "Address16";
364 #endif
365 break;
366 case ACPI_RESOURCE_TYPE_ADDRESS32:
367 gran = res->Data.Address32.Address.Granularity;
368 min = res->Data.Address32.Address.Minimum;
369 max = res->Data.Address32.Address.Maximum;
370 length = res->Data.Address32.Address.AddressLength;
371 #ifdef ACPI_DEBUG
372 name = "Address32";
373 #endif
374 break;
375 case ACPI_RESOURCE_TYPE_ADDRESS64:
376 gran = res->Data.Address64.Address.Granularity;
377 min = res->Data.Address64.Address.Minimum;
378 max = res->Data.Address64.Address.Maximum;
379 length = res->Data.Address64.Address.AddressLength;
380 #ifdef ACPI_DEBUG
381 name = "Address64";
382 #endif
383 break;
384 default:
385 KASSERT(res->Type == ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64,
386 ("should never happen"));
387 gran = res->Data.ExtAddress64.Address.Granularity;
388 min = res->Data.ExtAddress64.Address.Minimum;
389 max = res->Data.ExtAddress64.Address.Maximum;
390 length = res->Data.ExtAddress64.Address.AddressLength;
391 #ifdef ACPI_DEBUG
392 name = "ExtAddress64";
393 #endif
394 break;
395 }
396 if (length <= 0)
397 break;
398 if (!arc->ignore_producer_flag &&
399 res->Data.Address.ProducerConsumer != ACPI_CONSUMER) {
400 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
401 "ignored %s %s producer\n", name,
402 acpi_address_range_name(res->Data.Address.ResourceType)));
403 break;
404 }
405 if (res->Data.Address.ResourceType != ACPI_MEMORY_RANGE &&
406 res->Data.Address.ResourceType != ACPI_IO_RANGE) {
407 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
408 "ignored %s for non-memory, non-I/O\n", name));
409 break;
410 }
411
412 #ifdef __i386__
413 if (min > ULONG_MAX || (res->Data.Address.MaxAddressFixed && max >
414 ULONG_MAX)) {
415 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored %s above 4G\n",
416 name));
417 break;
418 }
419 if (max > ULONG_MAX)
420 max = ULONG_MAX;
421 #endif
422 if (res->Data.Address.MinAddressFixed == ACPI_ADDRESS_FIXED &&
423 res->Data.Address.MaxAddressFixed == ACPI_ADDRESS_FIXED) {
424 if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE) {
425 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/Memory 0x%jx/%ju\n",
426 name, (uintmax_t)min, (uintmax_t)length));
427 set->set_memory(dev, arc->context, min, length);
428 } else {
429 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx/%ju\n", name,
430 (uintmax_t)min, (uintmax_t)length));
431 set->set_ioport(dev, arc->context, min, length);
432 }
433 } else if (res->Data.Address.MinAddressFixed != ACPI_ADDRESS_FIXED &&
434 res->Data.Address.MaxAddressFixed != ACPI_ADDRESS_FIXED) {
435 /* Fixed size, variable location resource descriptor */
436 min = roundup(min, gran + 1);
437 if ((min + length - 1) > max) {
438 device_printf(dev,
439 "invalid memory range: start: %jx end: %jx max: %jx\n",
440 (uintmax_t)min, (uintmax_t)(min + length - 1),
441 (uintmax_t)max);
442 } else {
443 if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE) {
444 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
445 "%s/Memory 0x%jx/%ju\n", name, (uintmax_t)min,
446 (uintmax_t)length));
447 set->set_memory(dev, arc->context, min, length);
448 } else {
449 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx/%ju\n",
450 name, (uintmax_t)min, (uintmax_t)length));
451 set->set_ioport(dev, arc->context, min, length);
452 }
453 }
454 } else {
455 if (res->Data.Address32.ResourceType == ACPI_MEMORY_RANGE) {
456 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
457 "%s/Memory 0x%jx-0x%jx/%ju\n", name, (uintmax_t)min,
458 (uintmax_t)max, (uintmax_t)length));
459 set->set_memoryrange(dev, arc->context, min, max, length, gran);
460 } else {
461 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "%s/IO 0x%jx-0x%jx/%ju\n",
462 name, (uintmax_t)min, (uintmax_t)max, (uintmax_t)length));
463 set->set_iorange(dev, arc->context, min, max, length, gran);
464 }
465 }
466 break;
467 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
468 if (res->Data.ExtendedIrq.ProducerConsumer != ACPI_CONSUMER) {
469 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "ignored ExtIRQ producer\n"));
470 break;
471 }
472 set->set_ext_irq(dev, arc->context, res->Data.ExtendedIrq.Interrupts,
473 res->Data.ExtendedIrq.InterruptCount,
474 res->Data.ExtendedIrq.Triggering, res->Data.ExtendedIrq.Polarity);
475 break;
476 case ACPI_RESOURCE_TYPE_VENDOR:
477 ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
478 "unimplemented VendorSpecific resource\n"));
479 break;
480 default:
481 break;
482 }
483 return (AE_OK);
484 }
485
486 /*
487 * Fetch a device's resources and associate them with the device.
488 *
489 * Note that it might be nice to also locate ACPI-specific resource items, such
490 * as GPE bits.
491 *
492 * We really need to split the resource-fetching code out from the
493 * resource-parsing code, since we may want to use the parsing
494 * code for _PRS someday.
495 */
496 ACPI_STATUS
acpi_parse_resources(device_t dev,ACPI_HANDLE handle,struct acpi_parse_resource_set * set,void * arg)497 acpi_parse_resources(device_t dev, ACPI_HANDLE handle,
498 struct acpi_parse_resource_set *set, void *arg)
499 {
500 struct acpi_resource_context arc;
501 ACPI_STATUS status;
502
503 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
504
505 set->set_init(dev, arg, &arc.context);
506 arc.set = set;
507 arc.dev = dev;
508 arc.ignore_producer_flag = false;
509
510 /*
511 * UARTs on ThunderX2 set ResourceProducer on memory resources, with
512 * 7.2 firmware.
513 */
514 if (acpi_MatchHid(handle, "ARMH0011") != ACPI_MATCHHID_NOMATCH)
515 arc.ignore_producer_flag = true;
516
517 /*
518 * ARM Coresight on N1SDP set ResourceProducer on memory resources.
519 * Coresight devices: ETM, STM, TPIU, ETF/ETR, REP, FUN.
520 */
521 if (acpi_MatchHid(handle, "ARMHC500") != ACPI_MATCHHID_NOMATCH ||
522 acpi_MatchHid(handle, "ARMHC502") != ACPI_MATCHHID_NOMATCH ||
523 acpi_MatchHid(handle, "ARMHC600") != ACPI_MATCHHID_NOMATCH ||
524 acpi_MatchHid(handle, "ARMHC979") != ACPI_MATCHHID_NOMATCH ||
525 acpi_MatchHid(handle, "ARMHC97C") != ACPI_MATCHHID_NOMATCH ||
526 acpi_MatchHid(handle, "ARMHC98D") != ACPI_MATCHHID_NOMATCH ||
527 acpi_MatchHid(handle, "ARMHC9FF") != ACPI_MATCHHID_NOMATCH ||
528 acpi_MatchHid(handle, "ARMHD620") != ACPI_MATCHHID_NOMATCH)
529 arc.ignore_producer_flag = true;
530
531 /*
532 * The DesignWare I2C Controller on Ampere Altra sets ResourceProducer on
533 * memory resources.
534 */
535 if (acpi_MatchHid(handle, "APMC0D0F") != ACPI_MATCHHID_NOMATCH)
536 arc.ignore_producer_flag = true;
537
538 status = AcpiWalkResources(handle, "_CRS", acpi_parse_resource, &arc);
539 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
540 printf("can't fetch resources for %s - %s\n",
541 acpi_name(handle), AcpiFormatException(status));
542 return_ACPI_STATUS (status);
543 }
544 set->set_done(dev, arc.context);
545 return_ACPI_STATUS (AE_OK);
546 }
547
548 /*
549 * Resource-set vectors used to attach _CRS-derived resources
550 * to an ACPI device.
551 */
552 static void acpi_res_set_init(device_t dev, void *arg, void **context);
553 static void acpi_res_set_done(device_t dev, void *context);
554 static void acpi_res_set_ioport(device_t dev, void *context,
555 uint64_t base, uint64_t length);
556 static void acpi_res_set_iorange(device_t dev, void *context,
557 uint64_t low, uint64_t high,
558 uint64_t length, uint64_t align);
559 static void acpi_res_set_memory(device_t dev, void *context,
560 uint64_t base, uint64_t length);
561 static void acpi_res_set_memoryrange(device_t dev, void *context,
562 uint64_t low, uint64_t high,
563 uint64_t length, uint64_t align);
564 static void acpi_res_set_irq(device_t dev, void *context, uint8_t *irq,
565 int count, int trig, int pol);
566 static void acpi_res_set_ext_irq(device_t dev, void *context,
567 uint32_t *irq, int count, int trig, int pol);
568 static void acpi_res_set_drq(device_t dev, void *context, uint8_t *drq,
569 int count);
570 static void acpi_res_set_start_dependent(device_t dev, void *context,
571 int preference);
572 static void acpi_res_set_end_dependent(device_t dev, void *context);
573
574 struct acpi_parse_resource_set acpi_res_parse_set = {
575 acpi_res_set_init,
576 acpi_res_set_done,
577 acpi_res_set_ioport,
578 acpi_res_set_iorange,
579 acpi_res_set_memory,
580 acpi_res_set_memoryrange,
581 acpi_res_set_irq,
582 acpi_res_set_ext_irq,
583 acpi_res_set_drq,
584 acpi_res_set_start_dependent,
585 acpi_res_set_end_dependent
586 };
587
588 struct acpi_res_context {
589 int ar_nio;
590 int ar_nmem;
591 int ar_nirq;
592 int ar_ndrq;
593 void *ar_parent;
594 };
595
596 /*
597 * Some resources reported via _CRS should not be added as bus
598 * resources. This function returns true if a resource reported via
599 * _CRS should be ignored.
600 */
601 static bool
acpi_res_ignore(device_t dev,int type,rman_res_t start,rman_res_t count)602 acpi_res_ignore(device_t dev, int type, rman_res_t start, rman_res_t count)
603 {
604 struct acpi_device *ad = device_get_ivars(dev);
605 ACPI_DEVICE_INFO *devinfo;
606 bool allow;
607
608 /* Ignore IRQ resources for PCI link devices. */
609 if (type == SYS_RES_IRQ &&
610 ACPI_ID_PROBE(device_get_parent(dev), dev, pcilink_ids, NULL) <= 0)
611 return (true);
612
613 /*
614 * Ignore most resources for PCI root bridges. Some BIOSes
615 * incorrectly enumerate the memory ranges they decode as plain
616 * memory resources instead of as ResourceProducer ranges. Other
617 * BIOSes incorrectly list system resource entries for I/O ranges
618 * under the PCI bridge. Do allow the one known-correct case on
619 * x86 of a PCI bridge claiming the I/O ports used for PCI config
620 * access.
621 */
622 if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
623 if (ACPI_SUCCESS(AcpiGetObjectInfo(ad->ad_handle, &devinfo))) {
624 if ((devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0) {
625 #if defined(__i386__) || defined(__amd64__)
626 allow = (type == SYS_RES_IOPORT && start == CONF1_ADDR_PORT);
627 #else
628 allow = false;
629 #endif
630 if (!allow) {
631 AcpiOsFree(devinfo);
632 return (true);
633 }
634 }
635 AcpiOsFree(devinfo);
636 }
637 }
638
639 return (false);
640 }
641
642 static void
acpi_res_set_init(device_t dev,void * arg,void ** context)643 acpi_res_set_init(device_t dev, void *arg, void **context)
644 {
645 struct acpi_res_context *cp;
646
647 if ((cp = AcpiOsAllocate(sizeof(*cp))) != NULL) {
648 bzero(cp, sizeof(*cp));
649 cp->ar_parent = arg;
650 *context = cp;
651 }
652 }
653
654 static void
acpi_res_set_done(device_t dev,void * context)655 acpi_res_set_done(device_t dev, void *context)
656 {
657 struct acpi_res_context *cp = (struct acpi_res_context *)context;
658
659 if (cp == NULL)
660 return;
661 AcpiOsFree(cp);
662 }
663
664 static void
acpi_res_set_ioport(device_t dev,void * context,uint64_t base,uint64_t length)665 acpi_res_set_ioport(device_t dev, void *context, uint64_t base,
666 uint64_t length)
667 {
668 struct acpi_res_context *cp = (struct acpi_res_context *)context;
669
670 if (cp == NULL)
671 return;
672 if (acpi_res_ignore(dev, SYS_RES_IOPORT, base, length))
673 return;
674 bus_set_resource(dev, SYS_RES_IOPORT, cp->ar_nio++, base, length);
675 }
676
677 static void
acpi_res_set_iorange(device_t dev,void * context,uint64_t low,uint64_t high,uint64_t length,uint64_t align)678 acpi_res_set_iorange(device_t dev, void *context, uint64_t low,
679 uint64_t high, uint64_t length, uint64_t align)
680 {
681 struct acpi_res_context *cp = (struct acpi_res_context *)context;
682
683 if (cp == NULL)
684 return;
685
686 /*
687 * XXX: Some BIOSes contain buggy _CRS entries where fixed I/O
688 * ranges have the maximum base address (_MAX) to the end of the
689 * I/O range instead of the start. These are then treated as a
690 * relocatable I/O range rather than a fixed I/O resource. As a
691 * workaround, treat I/O resources encoded this way as fixed I/O
692 * ports.
693 */
694 if (high == (low + length)) {
695 if (bootverbose)
696 device_printf(dev,
697 "_CRS has fixed I/O port range defined as relocatable\n");
698
699 if (acpi_res_ignore(dev, SYS_RES_IOPORT, low, length))
700 return;
701 bus_set_resource(dev, SYS_RES_IOPORT, cp->ar_nio++, low, length);
702 return;
703 }
704
705 device_printf(dev, "I/O range not supported\n");
706 }
707
708 static void
acpi_res_set_memory(device_t dev,void * context,uint64_t base,uint64_t length)709 acpi_res_set_memory(device_t dev, void *context, uint64_t base,
710 uint64_t length)
711 {
712 struct acpi_res_context *cp = (struct acpi_res_context *)context;
713
714 if (cp == NULL)
715 return;
716 if (acpi_res_ignore(dev, SYS_RES_MEMORY, base, length))
717 return;
718 bus_set_resource(dev, SYS_RES_MEMORY, cp->ar_nmem++, base, length);
719 }
720
721 static void
acpi_res_set_memoryrange(device_t dev,void * context,uint64_t low,uint64_t high,uint64_t length,uint64_t align)722 acpi_res_set_memoryrange(device_t dev, void *context, uint64_t low,
723 uint64_t high, uint64_t length, uint64_t align)
724 {
725 struct acpi_res_context *cp = (struct acpi_res_context *)context;
726
727 if (cp == NULL)
728 return;
729 device_printf(dev, "memory range not supported\n");
730 }
731
732 static void
acpi_res_set_irq(device_t dev,void * context,uint8_t * irq,int count,int trig,int pol)733 acpi_res_set_irq(device_t dev, void *context, uint8_t *irq, int count,
734 int trig, int pol)
735 {
736 struct acpi_res_context *cp = (struct acpi_res_context *)context;
737 int i;
738
739 if (cp == NULL || irq == NULL)
740 return;
741
742 for (i = 0; i < count; i++) {
743 if (acpi_res_ignore(dev, SYS_RES_IRQ, irq[i], 1))
744 continue;
745 bus_set_resource(dev, SYS_RES_IRQ, cp->ar_nirq++, irq[i], 1);
746 }
747 }
748
749 static void
acpi_res_set_ext_irq(device_t dev,void * context,uint32_t * irq,int count,int trig,int pol)750 acpi_res_set_ext_irq(device_t dev, void *context, uint32_t *irq, int count,
751 int trig, int pol)
752 {
753 struct acpi_res_context *cp = (struct acpi_res_context *)context;
754 int i;
755
756 if (cp == NULL || irq == NULL)
757 return;
758
759 for (i = 0; i < count; i++) {
760 if (acpi_res_ignore(dev, SYS_RES_IRQ, irq[i], 1))
761 continue;
762 bus_set_resource(dev, SYS_RES_IRQ, cp->ar_nirq++, irq[i], 1);
763 }
764 }
765
766 static void
acpi_res_set_drq(device_t dev,void * context,uint8_t * drq,int count)767 acpi_res_set_drq(device_t dev, void *context, uint8_t *drq, int count)
768 {
769 struct acpi_res_context *cp = (struct acpi_res_context *)context;
770
771 if (cp == NULL || drq == NULL)
772 return;
773
774 /* This implements no resource relocation. */
775 if (count != 1)
776 return;
777
778 if (acpi_res_ignore(dev, SYS_RES_DRQ, *drq, 1))
779 return;
780 bus_set_resource(dev, SYS_RES_DRQ, cp->ar_ndrq++, *drq, 1);
781 }
782
783 static void
acpi_res_set_start_dependent(device_t dev,void * context,int preference)784 acpi_res_set_start_dependent(device_t dev, void *context, int preference)
785 {
786 struct acpi_res_context *cp = (struct acpi_res_context *)context;
787
788 if (cp == NULL)
789 return;
790 device_printf(dev, "dependent functions not supported\n");
791 }
792
793 static void
acpi_res_set_end_dependent(device_t dev,void * context)794 acpi_res_set_end_dependent(device_t dev, void *context)
795 {
796 struct acpi_res_context *cp = (struct acpi_res_context *)context;
797
798 if (cp == NULL)
799 return;
800 device_printf(dev, "dependent functions not supported\n");
801 }
802
803 /*
804 * Resource-owning placeholders for IO and memory pseudo-devices.
805 *
806 * This code allocates system resources that will be used by ACPI
807 * child devices. The acpi parent manages these resources through a
808 * private rman.
809 */
810
811 static int acpi_sysres_probe(device_t dev);
812 static int acpi_sysres_attach(device_t dev);
813
814 static device_method_t acpi_sysres_methods[] = {
815 /* Device interface */
816 DEVMETHOD(device_probe, acpi_sysres_probe),
817 DEVMETHOD(device_attach, acpi_sysres_attach),
818
819 DEVMETHOD_END
820 };
821
822 static driver_t acpi_sysres_driver = {
823 "acpi_sysresource",
824 acpi_sysres_methods,
825 0,
826 };
827
828 DRIVER_MODULE(acpi_sysresource, acpi, acpi_sysres_driver, 0, 0);
829 MODULE_DEPEND(acpi_sysresource, acpi, 1, 1, 1);
830
831 static int
acpi_sysres_probe(device_t dev)832 acpi_sysres_probe(device_t dev)
833 {
834 static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
835 int rv;
836
837 if (acpi_disabled("sysresource"))
838 return (ENXIO);
839 rv = ACPI_ID_PROBE(device_get_parent(dev), dev, sysres_ids, NULL);
840 if (rv > 0){
841 return (rv);
842 }
843 device_set_desc(dev, "System Resource");
844 device_quiet(dev);
845 return (rv);
846 }
847
848 static int
acpi_sysres_attach(device_t dev)849 acpi_sysres_attach(device_t dev)
850 {
851 device_t bus;
852 struct acpi_softc *bus_sc;
853 struct resource_list_entry *bus_rle, *dev_rle;
854 struct resource_list *bus_rl, *dev_rl;
855 int done, type;
856 rman_res_t start, end, count;
857
858 /*
859 * Loop through all current resources to see if the new one overlaps
860 * any existing ones. If so, grow the old one up and/or down
861 * accordingly. Discard any that are wholly contained in the old. If
862 * the resource is unique, add it to the parent. It will later go into
863 * the rman pool.
864 */
865 bus = device_get_parent(dev);
866 dev_rl = BUS_GET_RESOURCE_LIST(bus, dev);
867 bus_sc = acpi_device_get_parent_softc(dev);
868 bus_rl = &bus_sc->sysres_rl;
869 STAILQ_FOREACH(dev_rle, dev_rl, link) {
870 if (dev_rle->type != SYS_RES_IOPORT && dev_rle->type != SYS_RES_MEMORY)
871 continue;
872
873 start = dev_rle->start;
874 end = dev_rle->end;
875 count = dev_rle->count;
876 type = dev_rle->type;
877 done = FALSE;
878
879 STAILQ_FOREACH(bus_rle, bus_rl, link) {
880 if (bus_rle->type != type)
881 continue;
882
883 /* New resource wholly contained in old, discard. */
884 if (start >= bus_rle->start && end <= bus_rle->end)
885 break;
886
887 /* New tail overlaps old head, grow existing resource downward. */
888 if (start < bus_rle->start && end >= bus_rle->start) {
889 bus_rle->count += bus_rle->start - start;
890 bus_rle->start = start;
891 done = TRUE;
892 }
893
894 /* New head overlaps old tail, grow existing resource upward. */
895 if (start <= bus_rle->end && end > bus_rle->end) {
896 bus_rle->count += end - bus_rle->end;
897 bus_rle->end = end;
898 done = TRUE;
899 }
900
901 /* If we adjusted the old resource, we're finished. */
902 if (done)
903 break;
904 }
905
906 /* If we didn't merge with anything, add this resource. */
907 if (bus_rle == NULL)
908 resource_list_add_next(bus_rl, type, start, end, count);
909 }
910
911 /* After merging/moving resources to the parent, free the list. */
912 resource_list_free(dev_rl);
913
914 return (0);
915 }
916