xref: /freebsd/sys/dev/pci/pci_host_generic.c (revision ee5cf11617a9b7f034d95c639bd4d27d1f09e848)
1 /*-
2  * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * This software was developed by Semihalf under
7  * the sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in the
16  * documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /* Generic ECAM PCIe driver */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_platform.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/rman.h>
43 #include <sys/module.h>
44 #include <sys/bus.h>
45 #include <sys/endian.h>
46 #include <sys/cpuset.h>
47 #include <sys/rwlock.h>
48 
49 #if defined(INTRNG)
50 #include <machine/intr.h>
51 #endif
52 
53 #include <dev/ofw/openfirm.h>
54 #include <dev/ofw/ofw_bus.h>
55 #include <dev/ofw/ofw_bus_subr.h>
56 #include <dev/ofw/ofw_pci.h>
57 #include <dev/pci/pcivar.h>
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcib_private.h>
60 #include <dev/pci/pci_host_generic.h>
61 
62 #include <machine/cpu.h>
63 #include <machine/bus.h>
64 #include <machine/intr.h>
65 #include <vm/vm_page.h>
66 
67 #include "pcib_if.h"
68 
69 /* Assembling ECAM Configuration Address */
70 #define	PCIE_BUS_SHIFT		20
71 #define	PCIE_SLOT_SHIFT		15
72 #define	PCIE_FUNC_SHIFT		12
73 #define	PCIE_BUS_MASK		0xFF
74 #define	PCIE_SLOT_MASK		0x1F
75 #define	PCIE_FUNC_MASK		0x07
76 #define	PCIE_REG_MASK		0xFFF
77 
78 #define	PCIE_ADDR_OFFSET(bus, slot, func, reg)			\
79 	((((bus) & PCIE_BUS_MASK) << PCIE_BUS_SHIFT)	|	\
80 	(((slot) & PCIE_SLOT_MASK) << PCIE_SLOT_SHIFT)	|	\
81 	(((func) & PCIE_FUNC_MASK) << PCIE_FUNC_SHIFT)	|	\
82 	((reg) & PCIE_REG_MASK))
83 
84 #define	PCI_IO_WINDOW_OFFSET	0x1000
85 
86 #define	SPACE_CODE_SHIFT	24
87 #define	SPACE_CODE_MASK		0x3
88 #define	SPACE_CODE_IO_SPACE	0x1
89 #define	PROPS_CELL_SIZE		1
90 #define	PCI_ADDR_CELL_SIZE	2
91 
92 /* OFW bus interface */
93 struct generic_pcie_ofw_devinfo {
94 	struct ofw_bus_devinfo	di_dinfo;
95 	struct resource_list	di_rl;
96 };
97 
98 /* Forward prototypes */
99 
100 static int generic_pcie_probe(device_t dev);
101 static int parse_pci_mem_ranges(struct generic_pcie_softc *sc);
102 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
103     u_int func, u_int reg, int bytes);
104 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
105     u_int func, u_int reg, uint32_t val, int bytes);
106 static int generic_pcie_maxslots(device_t dev);
107 static int generic_pcie_read_ivar(device_t dev, device_t child, int index,
108     uintptr_t *result);
109 static int generic_pcie_write_ivar(device_t dev, device_t child, int index,
110     uintptr_t value);
111 static struct resource *generic_pcie_alloc_resource_ofw(device_t, device_t,
112     int, int *, rman_res_t, rman_res_t, rman_res_t, u_int);
113 static struct resource *generic_pcie_alloc_resource_pcie(device_t dev,
114     device_t child, int type, int *rid, rman_res_t start, rman_res_t end,
115     rman_res_t count, u_int flags);
116 static int generic_pcie_release_resource(device_t dev, device_t child,
117     int type, int rid, struct resource *res);
118 static int generic_pcie_release_resource_ofw(device_t, device_t, int, int,
119     struct resource *);
120 static int generic_pcie_release_resource_pcie(device_t, device_t, int, int,
121     struct resource *);
122 static int generic_pcie_ofw_bus_attach(device_t);
123 static const struct ofw_bus_devinfo *generic_pcie_ofw_get_devinfo(device_t,
124     device_t);
125 
126 static __inline void
127 get_addr_size_cells(phandle_t node, pcell_t *addr_cells, pcell_t *size_cells)
128 {
129 
130 	*addr_cells = 2;
131 	/* Find address cells if present */
132 	OF_getencprop(node, "#address-cells", addr_cells, sizeof(*addr_cells));
133 
134 	*size_cells = 2;
135 	/* Find size cells if present */
136 	OF_getencprop(node, "#size-cells", size_cells, sizeof(*size_cells));
137 }
138 
139 static int
140 generic_pcie_probe(device_t dev)
141 {
142 
143 	if (!ofw_bus_status_okay(dev))
144 		return (ENXIO);
145 
146 	if (ofw_bus_is_compatible(dev, "pci-host-ecam-generic")) {
147 		device_set_desc(dev, "Generic PCI host controller");
148 		return (BUS_PROBE_GENERIC);
149 	}
150 	if (ofw_bus_is_compatible(dev, "arm,gem5_pcie")) {
151 		device_set_desc(dev, "GEM5 PCIe host controller");
152 		return (BUS_PROBE_DEFAULT);
153 	}
154 
155 	return (ENXIO);
156 }
157 
158 int
159 pci_host_generic_attach(device_t dev)
160 {
161 	struct generic_pcie_softc *sc;
162 	uint64_t phys_base;
163 	uint64_t pci_base;
164 	uint64_t size;
165 	phandle_t node;
166 	int error;
167 	int tuple;
168 	int rid;
169 
170 	sc = device_get_softc(dev);
171 	sc->dev = dev;
172 
173 	/* Retrieve 'ranges' property from FDT */
174 	if (bootverbose)
175 		device_printf(dev, "parsing FDT for ECAM%d:\n",
176 		    sc->ecam);
177 	if (parse_pci_mem_ranges(sc))
178 		return (ENXIO);
179 
180 	/* Attach OFW bus */
181 	if (generic_pcie_ofw_bus_attach(dev) != 0)
182 		return (ENXIO);
183 
184 	rid = 0;
185 	sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
186 	if (sc->res == NULL) {
187 		device_printf(dev, "could not map memory.\n");
188 		return (ENXIO);
189 	}
190 
191 	sc->bst = rman_get_bustag(sc->res);
192 	sc->bsh = rman_get_bushandle(sc->res);
193 
194 	sc->mem_rman.rm_type = RMAN_ARRAY;
195 	sc->mem_rman.rm_descr = "PCIe Memory";
196 	sc->io_rman.rm_type = RMAN_ARRAY;
197 	sc->io_rman.rm_descr = "PCIe IO window";
198 
199 	/* Initialize rman and allocate memory regions */
200 	error = rman_init(&sc->mem_rman);
201 	if (error) {
202 		device_printf(dev, "rman_init() failed. error = %d\n", error);
203 		return (error);
204 	}
205 
206 	error = rman_init(&sc->io_rman);
207 	if (error) {
208 		device_printf(dev, "rman_init() failed. error = %d\n", error);
209 		return (error);
210 	}
211 
212 	for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
213 		phys_base = sc->ranges[tuple].phys_base;
214 		pci_base = sc->ranges[tuple].pci_base;
215 		size = sc->ranges[tuple].size;
216 		if (phys_base == 0 || size == 0)
217 			continue; /* empty range element */
218 		if (sc->ranges[tuple].flags & FLAG_MEM) {
219 			error = rman_manage_region(&sc->mem_rman,
220 			   phys_base, phys_base + size - 1);
221 		} else if (sc->ranges[tuple].flags & FLAG_IO) {
222 			error = rman_manage_region(&sc->io_rman,
223 			   pci_base + PCI_IO_WINDOW_OFFSET,
224 			   pci_base + PCI_IO_WINDOW_OFFSET + size - 1);
225 		} else
226 			continue;
227 		if (error) {
228 			device_printf(dev, "rman_manage_region() failed."
229 						"error = %d\n", error);
230 			rman_fini(&sc->mem_rman);
231 			return (error);
232 		}
233 	}
234 
235 	node = ofw_bus_get_node(dev);
236 	ofw_bus_setup_iinfo(node, &sc->pci_iinfo, sizeof(cell_t));
237 
238 	device_add_child(dev, "pci", -1);
239 	return (bus_generic_attach(dev));
240 }
241 
242 static int
243 parse_pci_mem_ranges(struct generic_pcie_softc *sc)
244 {
245 	pcell_t pci_addr_cells, parent_addr_cells;
246 	pcell_t attributes, size_cells;
247 	cell_t *base_ranges;
248 	int nbase_ranges;
249 	phandle_t node;
250 	int i, j, k;
251 	int tuple;
252 
253 	node = ofw_bus_get_node(sc->dev);
254 
255 	OF_getencprop(node, "#address-cells", &pci_addr_cells,
256 					sizeof(pci_addr_cells));
257 	OF_getencprop(node, "#size-cells", &size_cells,
258 					sizeof(size_cells));
259 	OF_getencprop(OF_parent(node), "#address-cells", &parent_addr_cells,
260 					sizeof(parent_addr_cells));
261 
262 	if (parent_addr_cells != 2 || pci_addr_cells != 3 || size_cells != 2) {
263 		device_printf(sc->dev,
264 		    "Unexpected number of address or size cells in FDT\n");
265 		return (ENXIO);
266 	}
267 
268 	nbase_ranges = OF_getproplen(node, "ranges");
269 	sc->nranges = nbase_ranges / sizeof(cell_t) /
270 	    (parent_addr_cells + pci_addr_cells + size_cells);
271 	base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK);
272 	OF_getencprop(node, "ranges", base_ranges, nbase_ranges);
273 
274 	for (i = 0, j = 0; i < sc->nranges; i++) {
275 		attributes = (base_ranges[j++] >> SPACE_CODE_SHIFT) & \
276 							SPACE_CODE_MASK;
277 		if (attributes == SPACE_CODE_IO_SPACE) {
278 			sc->ranges[i].flags |= FLAG_IO;
279 		} else {
280 			sc->ranges[i].flags |= FLAG_MEM;
281 		}
282 
283 		sc->ranges[i].pci_base = 0;
284 		for (k = 0; k < (pci_addr_cells - 1); k++) {
285 			sc->ranges[i].pci_base <<= 32;
286 			sc->ranges[i].pci_base |= base_ranges[j++];
287 		}
288 		sc->ranges[i].phys_base = 0;
289 		for (k = 0; k < parent_addr_cells; k++) {
290 			sc->ranges[i].phys_base <<= 32;
291 			sc->ranges[i].phys_base |= base_ranges[j++];
292 		}
293 		sc->ranges[i].size = 0;
294 		for (k = 0; k < size_cells; k++) {
295 			sc->ranges[i].size <<= 32;
296 			sc->ranges[i].size |= base_ranges[j++];
297 		}
298 	}
299 
300 	for (; i < MAX_RANGES_TUPLES; i++) {
301 		/* zero-fill remaining tuples to mark empty elements in array */
302 		sc->ranges[i].pci_base = 0;
303 		sc->ranges[i].phys_base = 0;
304 		sc->ranges[i].size = 0;
305 	}
306 
307 	if (bootverbose) {
308 		for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
309 			device_printf(sc->dev,
310 			    "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx\n",
311 			    sc->ranges[tuple].pci_base,
312 			    sc->ranges[tuple].phys_base,
313 			    sc->ranges[tuple].size);
314 		}
315 	}
316 
317 	free(base_ranges, M_DEVBUF);
318 	return (0);
319 }
320 
321 static uint32_t
322 generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
323     u_int func, u_int reg, int bytes)
324 {
325 	struct generic_pcie_softc *sc;
326 	bus_space_handle_t h;
327 	bus_space_tag_t	t;
328 	uint64_t offset;
329 	uint32_t data;
330 
331 	if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) ||
332 	    (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX))
333 		return (~0U);
334 
335 	sc = device_get_softc(dev);
336 
337 	offset = PCIE_ADDR_OFFSET(bus, slot, func, reg);
338 	t = sc->bst;
339 	h = sc->bsh;
340 
341 	switch (bytes) {
342 	case 1:
343 		data = bus_space_read_1(t, h, offset);
344 		break;
345 	case 2:
346 		data = le16toh(bus_space_read_2(t, h, offset));
347 		break;
348 	case 4:
349 		data = le32toh(bus_space_read_4(t, h, offset));
350 		break;
351 	default:
352 		return (~0U);
353 	}
354 
355 	return (data);
356 }
357 
358 static void
359 generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
360     u_int func, u_int reg, uint32_t val, int bytes)
361 {
362 	struct generic_pcie_softc *sc;
363 	bus_space_handle_t h;
364 	bus_space_tag_t t;
365 	uint64_t offset;
366 
367 	if ((bus > PCI_BUSMAX) || (slot > PCI_SLOTMAX) ||
368 	    (func > PCI_FUNCMAX) || (reg > PCIE_REGMAX))
369 		return;
370 
371 	sc = device_get_softc(dev);
372 
373 	offset = PCIE_ADDR_OFFSET(bus, slot, func, reg);
374 
375 	t = sc->bst;
376 	h = sc->bsh;
377 
378 	switch (bytes) {
379 	case 1:
380 		bus_space_write_1(t, h, offset, val);
381 		break;
382 	case 2:
383 		bus_space_write_2(t, h, offset, htole16(val));
384 		break;
385 	case 4:
386 		bus_space_write_4(t, h, offset, htole32(val));
387 		break;
388 	default:
389 		return;
390 	}
391 }
392 
393 static int
394 generic_pcie_maxslots(device_t dev)
395 {
396 
397 	return (31); /* max slots per bus acc. to standard */
398 }
399 
400 static int
401 generic_pcie_route_interrupt(device_t bus, device_t dev, int pin)
402 {
403 	struct generic_pcie_softc *sc;
404 	struct ofw_pci_register reg;
405 	uint32_t pintr, mintr[2];
406 	phandle_t iparent;
407 	int intrcells;
408 
409 	sc = device_get_softc(bus);
410 	pintr = pin;
411 
412 	bzero(&reg, sizeof(reg));
413 	reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) |
414 	    (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) |
415 	    (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT);
416 
417 	intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev),
418 	    &sc->pci_iinfo, &reg, sizeof(reg), &pintr, sizeof(pintr),
419 	    mintr, sizeof(mintr), &iparent);
420 	if (intrcells) {
421 		pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr);
422 		return (pintr);
423 	}
424 
425 	device_printf(bus, "could not route pin %d for device %d.%d\n",
426 	    pin, pci_get_slot(dev), pci_get_function(dev));
427 	return (PCI_INVALID_IRQ);
428 }
429 
430 
431 static int
432 generic_pcie_read_ivar(device_t dev, device_t child, int index,
433     uintptr_t *result)
434 {
435 	struct generic_pcie_softc *sc;
436 	int secondary_bus;
437 
438 	sc = device_get_softc(dev);
439 
440 	if (index == PCIB_IVAR_BUS) {
441 		/* this pcib adds only pci bus 0 as child */
442 		secondary_bus = 0;
443 		*result = secondary_bus;
444 		return (0);
445 
446 	}
447 
448 	if (index == PCIB_IVAR_DOMAIN) {
449 		*result = sc->ecam;
450 		return (0);
451 	}
452 
453 	if (bootverbose)
454 		device_printf(dev, "ERROR: Unknown index %d.\n", index);
455 	return (ENOENT);
456 }
457 
458 static int
459 generic_pcie_write_ivar(device_t dev, device_t child, int index,
460     uintptr_t value)
461 {
462 
463 	return (ENOENT);
464 }
465 
466 static struct rman *
467 generic_pcie_rman(struct generic_pcie_softc *sc, int type)
468 {
469 
470 	switch (type) {
471 	case SYS_RES_IOPORT:
472 		return (&sc->io_rman);
473 	case SYS_RES_MEMORY:
474 		return (&sc->mem_rman);
475 	default:
476 		break;
477 	}
478 
479 	return (NULL);
480 }
481 
482 static int
483 generic_pcie_release_resource_pcie(device_t dev, device_t child, int type,
484     int rid, struct resource *res)
485 {
486 	struct generic_pcie_softc *sc;
487 	struct rman *rm;
488 
489 	sc = device_get_softc(dev);
490 
491 	rm = generic_pcie_rman(sc, type);
492 	if (rm != NULL) {
493 		KASSERT(rman_is_region_manager(res, rm), ("rman mismatch"));
494 		rman_release_resource(res);
495 	}
496 
497 	return (bus_generic_release_resource(dev, child, type, rid, res));
498 }
499 
500 static int
501 generic_pcie_release_resource(device_t dev, device_t child, int type,
502     int rid, struct resource *res)
503 {
504 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
505 	struct generic_pcie_softc *sc;
506 
507 	if (type == PCI_RES_BUS) {
508 		sc = device_get_softc(dev);
509 		return (pci_domain_release_bus(sc->ecam, child, rid, res));
510 	}
511 #endif
512 	/* For PCIe devices that do not have FDT nodes, use PCIB method */
513 	if ((int)ofw_bus_get_node(child) <= 0) {
514 		return (generic_pcie_release_resource_pcie(dev,
515 		    child, type, rid, res));
516 	}
517 
518 	/* For other devices use OFW method */
519 	return (generic_pcie_release_resource_ofw(dev,
520 	    child, type, rid, res));
521 }
522 
523 struct resource *
524 pci_host_generic_alloc_resource(device_t dev, device_t child, int type, int *rid,
525     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
526 {
527 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
528 	struct generic_pcie_softc *sc;
529 
530 	if (type == PCI_RES_BUS) {
531 		sc = device_get_softc(dev);
532 		return (pci_domain_alloc_bus(sc->ecam, child, rid, start, end,
533 		    count, flags));
534 	}
535 #endif
536 	/* For PCIe devices that do not have FDT nodes, use PCIB method */
537 	if ((int)ofw_bus_get_node(child) <= 0)
538 		return (generic_pcie_alloc_resource_pcie(dev, child, type, rid,
539 		    start, end, count, flags));
540 
541 	/* For other devices use OFW method */
542 	return (generic_pcie_alloc_resource_ofw(dev, child, type, rid,
543 	    start, end, count, flags));
544 }
545 
546 static struct resource *
547 generic_pcie_alloc_resource_pcie(device_t dev, device_t child, int type, int *rid,
548     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
549 {
550 	struct generic_pcie_softc *sc;
551 	struct resource *res;
552 	struct rman *rm;
553 
554 	sc = device_get_softc(dev);
555 
556 	rm = generic_pcie_rman(sc, type);
557 	if (rm == NULL)
558 		return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
559 		    type, rid, start, end, count, flags));
560 
561 	if (bootverbose) {
562 		device_printf(dev,
563 		    "rman_reserve_resource: start=%#jx, end=%#jx, count=%#jx\n",
564 		    start, end, count);
565 	}
566 
567 	res = rman_reserve_resource(rm, start, end, count, flags, child);
568 	if (res == NULL)
569 		goto fail;
570 
571 	rman_set_rid(res, *rid);
572 
573 	if (flags & RF_ACTIVE)
574 		if (bus_activate_resource(child, type, *rid, res)) {
575 			rman_release_resource(res);
576 			goto fail;
577 		}
578 
579 	return (res);
580 
581 fail:
582 	device_printf(dev, "%s FAIL: type=%d, rid=%d, "
583 	    "start=%016jx, end=%016jx, count=%016jx, flags=%x\n",
584 	    __func__, type, *rid, start, end, count, flags);
585 
586 	return (NULL);
587 }
588 
589 static int
590 generic_pcie_adjust_resource(device_t dev, device_t child, int type,
591     struct resource *res, rman_res_t start, rman_res_t end)
592 {
593 	struct generic_pcie_softc *sc;
594 	struct rman *rm;
595 
596 	sc = device_get_softc(dev);
597 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
598 	if (type == PCI_RES_BUS)
599 		return (pci_domain_adjust_bus(sc->ecam, child, res, start,
600 		    end));
601 #endif
602 
603 	rm = generic_pcie_rman(sc, type);
604 	if (rm != NULL)
605 		return (rman_adjust_resource(res, start, end));
606 	return (bus_generic_adjust_resource(dev, child, type, res, start, end));
607 }
608 
609 static int
610 generic_pcie_activate_resource(device_t dev, device_t child, int type, int rid,
611     struct resource *r)
612 {
613 	struct generic_pcie_softc *sc;
614 	uint64_t phys_base;
615 	uint64_t pci_base;
616 	uint64_t size;
617 	int found;
618 	int res;
619 	int i;
620 
621 	sc = device_get_softc(dev);
622 
623 	if ((res = rman_activate_resource(r)) != 0)
624 		return (res);
625 
626 	switch(type) {
627 	case SYS_RES_IOPORT:
628 		found = 0;
629 		for (i = 0; i < MAX_RANGES_TUPLES; i++) {
630 			pci_base = sc->ranges[i].pci_base;
631 			phys_base = sc->ranges[i].phys_base;
632 			size = sc->ranges[i].size;
633 
634 			if ((rid > pci_base) && (rid < (pci_base + size))) {
635 				found = 1;
636 				break;
637 			}
638 		}
639 		if (found) {
640 			rman_set_start(r, rman_get_start(r) + phys_base);
641 			rman_set_end(r, rman_get_end(r) + phys_base);
642 			BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child,
643 						type, rid, r);
644 		} else {
645 			device_printf(dev, "Failed to activate IOPORT resource\n");
646 			res = 0;
647 		}
648 		break;
649 	case SYS_RES_MEMORY:
650 		BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child, type, rid, r);
651 		break;
652 	default:
653 		break;
654 	}
655 
656 	return (res);
657 }
658 
659 static int
660 generic_pcie_deactivate_resource(device_t dev, device_t child, int type, int rid,
661     struct resource *r)
662 {
663 	struct generic_pcie_softc *sc;
664 	vm_offset_t vaddr;
665 	int res;
666 
667 	sc = device_get_softc(dev);
668 
669 	if ((res = rman_deactivate_resource(r)) != 0)
670 		return (res);
671 
672 	switch(type) {
673 	case SYS_RES_IOPORT:
674 	case SYS_RES_MEMORY:
675 		vaddr = (vm_offset_t)rman_get_virtual(r);
676 		pmap_unmapdev(vaddr, rman_get_size(r));
677 		break;
678 	default:
679 		break;
680 	}
681 
682 	return (res);
683 }
684 
685 static int
686 generic_pcie_alloc_msi(device_t pci, device_t child, int count, int maxcount,
687     int *irqs)
688 {
689 #if defined(INTRNG)
690 	phandle_t msi_parent;
691 
692 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
693 	    NULL);
694 	return (intr_alloc_msi(pci, child, msi_parent, count, maxcount,
695 	    irqs));
696 #elif defined(__aarch64__)
697 	return (arm_alloc_msi(pci, child, count, maxcount, irqs));
698 #else
699 	return (ENXIO);
700 #endif
701 }
702 
703 static int
704 generic_pcie_release_msi(device_t pci, device_t child, int count, int *irqs)
705 {
706 #if defined(INTRNG)
707 	phandle_t msi_parent;
708 
709 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
710 	    NULL);
711 	return (intr_release_msi(pci, child, msi_parent, count, irqs));
712 #elif defined(__aarch64__)
713 	return (arm_release_msi(pci, child, count, irqs));
714 #else
715 	return (ENXIO);
716 #endif
717 }
718 
719 static int
720 generic_pcie_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
721     uint32_t *data)
722 {
723 #if defined(INTRNG)
724 	phandle_t msi_parent;
725 
726 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
727 	    NULL);
728 	return (intr_map_msi(pci, child, msi_parent, irq, addr, data));
729 #elif defined(__aarch64__)
730 	return (arm_map_msi(pci, child, irq, addr, data));
731 #else
732 	return (ENXIO);
733 #endif
734 }
735 
736 static int
737 generic_pcie_alloc_msix(device_t pci, device_t child, int *irq)
738 {
739 #if defined(INTRNG)
740 	phandle_t msi_parent;
741 
742 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
743 	    NULL);
744 	return (intr_alloc_msix(pci, child, msi_parent, irq));
745 #elif defined(__aarch64__)
746 	return (arm_alloc_msix(pci, child, irq));
747 #else
748 	return (ENXIO);
749 #endif
750 }
751 
752 static int
753 generic_pcie_release_msix(device_t pci, device_t child, int irq)
754 {
755 #if defined(INTRNG)
756 	phandle_t msi_parent;
757 
758 	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
759 	    NULL);
760 	return (intr_release_msix(pci, child, msi_parent, irq));
761 #elif defined(__aarch64__)
762 	return (arm_release_msix(pci, child, irq));
763 #else
764 	return (ENXIO);
765 #endif
766 }
767 
768 int
769 generic_pcie_get_id(device_t pci, device_t child, enum pci_id_type type,
770     uintptr_t *id)
771 {
772 	phandle_t node;
773 	uint32_t rid;
774 	uint16_t pci_rid;
775 
776 	if (type != PCI_ID_MSI)
777 		return (pcib_get_id(pci, child, type, id));
778 
779 	node = ofw_bus_get_node(pci);
780 	pci_rid = pci_get_rid(child);
781 
782 	ofw_bus_msimap(node, pci_rid, NULL, &rid);
783 	*id = rid;
784 
785 	return (0);
786 }
787 
788 static device_method_t generic_pcie_methods[] = {
789 	DEVMETHOD(device_probe,			generic_pcie_probe),
790 	DEVMETHOD(device_attach,		pci_host_generic_attach),
791 	DEVMETHOD(bus_read_ivar,		generic_pcie_read_ivar),
792 	DEVMETHOD(bus_write_ivar,		generic_pcie_write_ivar),
793 	DEVMETHOD(bus_alloc_resource,		pci_host_generic_alloc_resource),
794 	DEVMETHOD(bus_adjust_resource,		generic_pcie_adjust_resource),
795 	DEVMETHOD(bus_release_resource,		generic_pcie_release_resource),
796 	DEVMETHOD(bus_activate_resource,	generic_pcie_activate_resource),
797 	DEVMETHOD(bus_deactivate_resource,	generic_pcie_deactivate_resource),
798 	DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
799 	DEVMETHOD(bus_teardown_intr,		bus_generic_teardown_intr),
800 
801 	/* pcib interface */
802 	DEVMETHOD(pcib_maxslots,		generic_pcie_maxslots),
803 	DEVMETHOD(pcib_route_interrupt,		generic_pcie_route_interrupt),
804 	DEVMETHOD(pcib_read_config,		generic_pcie_read_config),
805 	DEVMETHOD(pcib_write_config,		generic_pcie_write_config),
806 	DEVMETHOD(pcib_alloc_msi,		generic_pcie_alloc_msi),
807 	DEVMETHOD(pcib_release_msi,		generic_pcie_release_msi),
808 	DEVMETHOD(pcib_alloc_msix,		generic_pcie_alloc_msix),
809 	DEVMETHOD(pcib_release_msix,		generic_pcie_release_msix),
810 	DEVMETHOD(pcib_map_msi,			generic_pcie_map_msi),
811 	DEVMETHOD(pcib_get_id,			generic_pcie_get_id),
812 
813 	/* ofw_bus interface */
814 	DEVMETHOD(ofw_bus_get_devinfo,		generic_pcie_ofw_get_devinfo),
815 	DEVMETHOD(ofw_bus_get_compat,		ofw_bus_gen_get_compat),
816 	DEVMETHOD(ofw_bus_get_model,		ofw_bus_gen_get_model),
817 	DEVMETHOD(ofw_bus_get_name,		ofw_bus_gen_get_name),
818 	DEVMETHOD(ofw_bus_get_node,		ofw_bus_gen_get_node),
819 	DEVMETHOD(ofw_bus_get_type,		ofw_bus_gen_get_type),
820 
821 	DEVMETHOD_END
822 };
823 
824 static const struct ofw_bus_devinfo *
825 generic_pcie_ofw_get_devinfo(device_t bus __unused, device_t child)
826 {
827 	struct generic_pcie_ofw_devinfo *di;
828 
829 	di = device_get_ivars(child);
830 	return (&di->di_dinfo);
831 }
832 
833 static struct resource *
834 generic_pcie_alloc_resource_ofw(device_t bus, device_t child, int type, int *rid,
835     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
836 {
837 	struct generic_pcie_softc *sc;
838 	struct generic_pcie_ofw_devinfo *di;
839 	struct resource_list_entry *rle;
840 	int i;
841 
842 	sc = device_get_softc(bus);
843 
844 	if (RMAN_IS_DEFAULT_RANGE(start, end)) {
845 		if ((di = device_get_ivars(child)) == NULL)
846 			return (NULL);
847 		if (type == SYS_RES_IOPORT)
848 		    type = SYS_RES_MEMORY;
849 
850 		/* Find defaults for this rid */
851 		rle = resource_list_find(&di->di_rl, type, *rid);
852 		if (rle == NULL)
853 			return (NULL);
854 
855 		start = rle->start;
856 		end = rle->end;
857 		count = rle->count;
858 	}
859 
860 	if (type == SYS_RES_MEMORY) {
861 		/* Remap through ranges property */
862 		for (i = 0; i < MAX_RANGES_TUPLES; i++) {
863 			if (start >= sc->ranges[i].phys_base && end <
864 			    sc->ranges[i].pci_base + sc->ranges[i].size) {
865 				start -= sc->ranges[i].phys_base;
866 				start += sc->ranges[i].pci_base;
867 				end -= sc->ranges[i].phys_base;
868 				end += sc->ranges[i].pci_base;
869 				break;
870 			}
871 		}
872 
873 		if (i == MAX_RANGES_TUPLES) {
874 			device_printf(bus, "Could not map resource "
875 			    "%#jx-%#jx\n", start, end);
876 			return (NULL);
877 		}
878 	}
879 
880 	return (bus_generic_alloc_resource(bus, child, type, rid, start, end,
881 	    count, flags));
882 }
883 
884 static int
885 generic_pcie_release_resource_ofw(device_t bus, device_t child, int type, int rid,
886     struct resource *res)
887 {
888 
889 	return (bus_generic_release_resource(bus, child, type, rid, res));
890 }
891 
892 /* Helper functions */
893 
894 static int
895 generic_pcie_ofw_bus_attach(device_t dev)
896 {
897 	struct generic_pcie_ofw_devinfo *di;
898 	device_t child;
899 	phandle_t parent, node;
900 	pcell_t addr_cells, size_cells;
901 
902 	parent = ofw_bus_get_node(dev);
903 	if (parent > 0) {
904 		get_addr_size_cells(parent, &addr_cells, &size_cells);
905 		/* Iterate through all bus subordinates */
906 		for (node = OF_child(parent); node > 0; node = OF_peer(node)) {
907 
908 			/* Allocate and populate devinfo. */
909 			di = malloc(sizeof(*di), M_DEVBUF, M_WAITOK | M_ZERO);
910 			if (ofw_bus_gen_setup_devinfo(&di->di_dinfo, node) != 0) {
911 				free(di, M_DEVBUF);
912 				continue;
913 			}
914 
915 			/* Initialize and populate resource list. */
916 			resource_list_init(&di->di_rl);
917 			ofw_bus_reg_to_rl(dev, node, addr_cells, size_cells,
918 			    &di->di_rl);
919 			ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL);
920 
921 			/* Add newbus device for this FDT node */
922 			child = device_add_child(dev, NULL, -1);
923 			if (child == NULL) {
924 				resource_list_free(&di->di_rl);
925 				ofw_bus_gen_destroy_devinfo(&di->di_dinfo);
926 				free(di, M_DEVBUF);
927 				continue;
928 			}
929 
930 			device_set_ivars(child, di);
931 		}
932 	}
933 
934 	return (0);
935 }
936 
937 DEFINE_CLASS_0(pcib, generic_pcie_driver,
938     generic_pcie_methods, sizeof(struct generic_pcie_softc));
939 
940 devclass_t generic_pcie_devclass;
941 
942 DRIVER_MODULE(pcib, simplebus, generic_pcie_driver,
943     generic_pcie_devclass, 0, 0);
944 DRIVER_MODULE(pcib, ofwbus, generic_pcie_driver,
945     generic_pcie_devclass, 0, 0);
946 
947