xref: /freebsd/sys/dev/pci/pci_host_generic.c (revision 9a5aaa97cbae024f90bb626f78c3dbde28653c58)
1 /*-
2  * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com>
3  * Copyright (c) 2014 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * This software was developed by Semihalf under
7  * the sponsorship of the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in the
16  * documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /* Generic ECAM PCIe driver */
32 
33 #include <sys/cdefs.h>
34 #include "opt_platform.h"
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/rman.h>
41 #include <sys/module.h>
42 #include <sys/bus.h>
43 #include <sys/endian.h>
44 
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcib_private.h>
48 #include <dev/pci/pci_host_generic.h>
49 
50 #include <machine/bus.h>
51 #include <machine/intr.h>
52 
53 #include "pcib_if.h"
54 
55 #if defined(VM_MEMATTR_DEVICE_NP)
56 #define	PCI_UNMAPPED
57 #define	PCI_RF_FLAGS	RF_UNMAPPED
58 #else
59 #define	PCI_RF_FLAGS	0
60 #endif
61 
62 
63 /* Forward prototypes */
64 
65 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
66     u_int func, u_int reg, int bytes);
67 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
68     u_int func, u_int reg, uint32_t val, int bytes);
69 static int generic_pcie_maxslots(device_t dev);
70 static int generic_pcie_read_ivar(device_t dev, device_t child, int index,
71     uintptr_t *result);
72 static int generic_pcie_write_ivar(device_t dev, device_t child, int index,
73     uintptr_t value);
74 
75 int
76 pci_host_generic_core_attach(device_t dev)
77 {
78 #ifdef PCI_UNMAPPED
79 	struct resource_map_request req;
80 	struct resource_map map;
81 #endif
82 	struct generic_pcie_core_softc *sc;
83 	uint64_t phys_base;
84 	uint64_t pci_base;
85 	uint64_t size;
86 	char buf[64];
87 	int domain, error;
88 	int flags, rid, tuple, type;
89 
90 	sc = device_get_softc(dev);
91 	sc->dev = dev;
92 
93 	/* Create the parent DMA tag to pass down the coherent flag */
94 	error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
95 	    1, 0,				/* alignment, bounds */
96 	    BUS_SPACE_MAXADDR,			/* lowaddr */
97 	    BUS_SPACE_MAXADDR,			/* highaddr */
98 	    NULL, NULL,				/* filter, filterarg */
99 	    BUS_SPACE_MAXSIZE,			/* maxsize */
100 	    BUS_SPACE_UNRESTRICTED,		/* nsegments */
101 	    BUS_SPACE_MAXSIZE,			/* maxsegsize */
102 	    sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */
103 	    NULL, NULL,				/* lockfunc, lockarg */
104 	    &sc->dmat);
105 	if (error != 0)
106 		return (error);
107 
108 	/*
109 	 * Attempt to set the domain. If it's missing, or we are unable to
110 	 * set it then memory allocations may be placed in the wrong domain.
111 	 */
112 	if (bus_get_domain(dev, &domain) == 0)
113 		(void)bus_dma_tag_set_domain(sc->dmat, domain);
114 
115 	if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) {
116 		rid = 0;
117 		sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
118 		    PCI_RF_FLAGS | RF_ACTIVE);
119 		if (sc->res == NULL) {
120 			device_printf(dev, "could not allocate memory.\n");
121 			error = ENXIO;
122 			goto err_resource;
123 		}
124 #ifdef PCI_UNMAPPED
125 		resource_init_map_request(&req);
126 		req.memattr = VM_MEMATTR_DEVICE_NP;
127 		error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req,
128 		    &map);
129 		if (error != 0) {
130 			device_printf(dev, "could not map memory.\n");
131 			return (error);
132 		}
133 		rman_set_mapping(sc->res, &map);
134 #endif
135 	}
136 
137 	sc->has_pmem = false;
138 	sc->pmem_rman.rm_type = RMAN_ARRAY;
139 	snprintf(buf, sizeof(buf), "%s prefetch window",
140 	    device_get_nameunit(dev));
141 	sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF);
142 
143 	sc->mem_rman.rm_type = RMAN_ARRAY;
144 	snprintf(buf, sizeof(buf), "%s memory window",
145 	    device_get_nameunit(dev));
146 	sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF);
147 
148 	sc->io_rman.rm_type = RMAN_ARRAY;
149 	snprintf(buf, sizeof(buf), "%s I/O port window",
150 	    device_get_nameunit(dev));
151 	sc->io_rman.rm_descr = strdup(buf, M_DEVBUF);
152 
153 	/* Initialize rman and allocate memory regions */
154 	error = rman_init(&sc->pmem_rman);
155 	if (error) {
156 		device_printf(dev, "rman_init() failed. error = %d\n", error);
157 		goto err_pmem_rman;
158 	}
159 
160 	error = rman_init(&sc->mem_rman);
161 	if (error) {
162 		device_printf(dev, "rman_init() failed. error = %d\n", error);
163 		goto err_mem_rman;
164 	}
165 
166 	error = rman_init(&sc->io_rman);
167 	if (error) {
168 		device_printf(dev, "rman_init() failed. error = %d\n", error);
169 		goto err_io_rman;
170 	}
171 
172 	for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
173 		phys_base = sc->ranges[tuple].phys_base;
174 		pci_base = sc->ranges[tuple].pci_base;
175 		size = sc->ranges[tuple].size;
176 		rid = tuple + 1;
177 		if (size == 0)
178 			continue; /* empty range element */
179 		switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
180 		case FLAG_TYPE_PMEM:
181 			sc->has_pmem = true;
182 			flags = RF_PREFETCHABLE;
183 			type = SYS_RES_MEMORY;
184 			error = rman_manage_region(&sc->pmem_rman,
185 			   pci_base, pci_base + size - 1);
186 			break;
187 		case FLAG_TYPE_MEM:
188 			flags = 0;
189 			type = SYS_RES_MEMORY;
190 			error = rman_manage_region(&sc->mem_rman,
191 			   pci_base, pci_base + size - 1);
192 			break;
193 		case FLAG_TYPE_IO:
194 			flags = 0;
195 			type = SYS_RES_IOPORT;
196 			error = rman_manage_region(&sc->io_rman,
197 			   pci_base, pci_base + size - 1);
198 			break;
199 		default:
200 			continue;
201 		}
202 		if (error) {
203 			device_printf(dev, "rman_manage_region() failed."
204 						"error = %d\n", error);
205 			goto err_rman_manage;
206 		}
207 		error = bus_set_resource(dev, type, rid, phys_base, size);
208 		if (error != 0) {
209 			device_printf(dev,
210 			    "failed to set resource for range %d: %d\n", tuple,
211 			    error);
212 			goto err_rman_manage;
213 		}
214 		sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid,
215 		    RF_ACTIVE | RF_UNMAPPED | flags);
216 		if (sc->ranges[tuple].res == NULL) {
217 			device_printf(dev,
218 			    "failed to allocate resource for range %d\n", tuple);
219 			error = ENXIO;
220 			goto err_rman_manage;
221 		}
222 	}
223 
224 	return (0);
225 
226 err_rman_manage:
227 	for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
228 		if (sc->ranges[tuple].size == 0)
229 			continue; /* empty range element */
230 		switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
231 		case FLAG_TYPE_PMEM:
232 		case FLAG_TYPE_MEM:
233 			type = SYS_RES_MEMORY;
234 			break;
235 		case FLAG_TYPE_IO:
236 			type = SYS_RES_IOPORT;
237 			break;
238 		default:
239 			continue;
240 		}
241 		if (sc->ranges[tuple].res != NULL)
242 			bus_release_resource(dev, type, tuple + 1,
243 			    sc->ranges[tuple].res);
244 		bus_delete_resource(dev, type, tuple + 1);
245 	}
246 	rman_fini(&sc->io_rman);
247 err_io_rman:
248 	rman_fini(&sc->mem_rman);
249 err_mem_rman:
250 	rman_fini(&sc->pmem_rman);
251 err_pmem_rman:
252 	free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF);
253 	free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF);
254 	free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF);
255 	if (sc->res != NULL)
256 		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
257 err_resource:
258 	bus_dma_tag_destroy(sc->dmat);
259 	return (error);
260 }
261 
262 int
263 pci_host_generic_core_detach(device_t dev)
264 {
265 	struct generic_pcie_core_softc *sc;
266 	int error, tuple, type;
267 
268 	sc = device_get_softc(dev);
269 
270 	error = bus_generic_detach(dev);
271 	if (error != 0)
272 		return (error);
273 
274 	for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
275 		if (sc->ranges[tuple].size == 0)
276 			continue; /* empty range element */
277 		switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
278 		case FLAG_TYPE_PMEM:
279 		case FLAG_TYPE_MEM:
280 			type = SYS_RES_MEMORY;
281 			break;
282 		case FLAG_TYPE_IO:
283 			type = SYS_RES_IOPORT;
284 			break;
285 		default:
286 			continue;
287 		}
288 		if (sc->ranges[tuple].res != NULL)
289 			bus_release_resource(dev, type, tuple + 1,
290 			    sc->ranges[tuple].res);
291 		bus_delete_resource(dev, type, tuple + 1);
292 	}
293 	rman_fini(&sc->io_rman);
294 	rman_fini(&sc->mem_rman);
295 	rman_fini(&sc->pmem_rman);
296 	free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF);
297 	free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF);
298 	free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF);
299 	if (sc->res != NULL)
300 		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
301 	bus_dma_tag_destroy(sc->dmat);
302 
303 	return (0);
304 }
305 
306 static uint32_t
307 generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
308     u_int func, u_int reg, int bytes)
309 {
310 	struct generic_pcie_core_softc *sc;
311 	uint64_t offset;
312 	uint32_t data;
313 
314 	sc = device_get_softc(dev);
315 	if ((bus < sc->bus_start) || (bus > sc->bus_end))
316 		return (~0U);
317 	if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
318 	    (reg > PCIE_REGMAX))
319 		return (~0U);
320 	if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0)
321 		return (~0U);
322 
323 	offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
324 
325 	switch (bytes) {
326 	case 1:
327 		data = bus_read_1(sc->res, offset);
328 		break;
329 	case 2:
330 		data = le16toh(bus_read_2(sc->res, offset));
331 		break;
332 	case 4:
333 		data = le32toh(bus_read_4(sc->res, offset));
334 		break;
335 	default:
336 		return (~0U);
337 	}
338 
339 	return (data);
340 }
341 
342 static void
343 generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
344     u_int func, u_int reg, uint32_t val, int bytes)
345 {
346 	struct generic_pcie_core_softc *sc;
347 	uint64_t offset;
348 
349 	sc = device_get_softc(dev);
350 	if ((bus < sc->bus_start) || (bus > sc->bus_end))
351 		return;
352 	if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
353 	    (reg > PCIE_REGMAX))
354 		return;
355 
356 	offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
357 
358 	switch (bytes) {
359 	case 1:
360 		bus_write_1(sc->res, offset, val);
361 		break;
362 	case 2:
363 		bus_write_2(sc->res, offset, htole16(val));
364 		break;
365 	case 4:
366 		bus_write_4(sc->res, offset, htole32(val));
367 		break;
368 	default:
369 		return;
370 	}
371 }
372 
373 static int
374 generic_pcie_maxslots(device_t dev)
375 {
376 
377 	return (31); /* max slots per bus acc. to standard */
378 }
379 
380 static int
381 generic_pcie_read_ivar(device_t dev, device_t child, int index,
382     uintptr_t *result)
383 {
384 	struct generic_pcie_core_softc *sc;
385 
386 	sc = device_get_softc(dev);
387 
388 	if (index == PCIB_IVAR_BUS) {
389 		*result = sc->bus_start;
390 		return (0);
391 	}
392 
393 	if (index == PCIB_IVAR_DOMAIN) {
394 		*result = sc->ecam;
395 		return (0);
396 	}
397 
398 	if (bootverbose)
399 		device_printf(dev, "ERROR: Unknown index %d.\n", index);
400 	return (ENOENT);
401 }
402 
403 static int
404 generic_pcie_write_ivar(device_t dev, device_t child, int index,
405     uintptr_t value)
406 {
407 
408 	return (ENOENT);
409 }
410 
411 static struct rman *
412 generic_pcie_get_rman(device_t dev, int type, u_int flags)
413 {
414 	struct generic_pcie_core_softc *sc = device_get_softc(dev);
415 
416 	switch (type) {
417 	case SYS_RES_IOPORT:
418 		return (&sc->io_rman);
419 	case SYS_RES_MEMORY:
420 		if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0)
421 			return (&sc->pmem_rman);
422 		return (&sc->mem_rman);
423 	default:
424 		break;
425 	}
426 
427 	return (NULL);
428 }
429 
430 int
431 pci_host_generic_core_release_resource(device_t dev, device_t child, int type,
432     int rid, struct resource *res)
433 {
434 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
435 	struct generic_pcie_core_softc *sc;
436 
437 	sc = device_get_softc(dev);
438 #endif
439 	switch (type) {
440 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
441 	case PCI_RES_BUS:
442 		return (pci_domain_release_bus(sc->ecam, child, rid, res));
443 #endif
444 	case SYS_RES_IOPORT:
445 	case SYS_RES_MEMORY:
446 		return (bus_generic_rman_release_resource(dev, child, type, rid,
447 		    res));
448 	default:
449 		return (bus_generic_release_resource(dev, child, type, rid,
450 		    res));
451 	}
452 }
453 
454 static struct pcie_range *
455 generic_pcie_containing_range(device_t dev, int type, rman_res_t start,
456     rman_res_t end)
457 {
458 	struct generic_pcie_core_softc *sc = device_get_softc(dev);
459 	uint64_t pci_base;
460 	uint64_t size;
461 	int i, space;
462 
463 	switch (type) {
464 	case SYS_RES_IOPORT:
465 	case SYS_RES_MEMORY:
466 		break;
467 	default:
468 		return (NULL);
469 	}
470 
471 	for (i = 0; i < MAX_RANGES_TUPLES; i++) {
472 		pci_base = sc->ranges[i].pci_base;
473 		size = sc->ranges[i].size;
474 		if (size == 0)
475 			continue; /* empty range element */
476 
477 		if (start < pci_base || end >= pci_base + size)
478 			continue;
479 
480 		switch (FLAG_TYPE(sc->ranges[i].flags)) {
481 		case FLAG_TYPE_MEM:
482 		case FLAG_TYPE_PMEM:
483 			space = SYS_RES_MEMORY;
484 			break;
485 		case FLAG_TYPE_IO:
486 			space = SYS_RES_IOPORT;
487 			break;
488 		default:
489 			continue;
490 		}
491 
492 		if (type == space)
493 			return (&sc->ranges[i]);
494 	}
495 	return (NULL);
496 }
497 
498 static int
499 generic_pcie_translate_resource_common(device_t dev, int type, rman_res_t start,
500     rman_res_t end, rman_res_t *new_start, rman_res_t *new_end)
501 {
502 	struct pcie_range *range;
503 
504 	/* Translate the address from a PCI address to a physical address */
505 	switch (type) {
506 	case SYS_RES_IOPORT:
507 	case SYS_RES_MEMORY:
508 		range = generic_pcie_containing_range(dev, type, start, end);
509 		if (range == NULL)
510 			return (ENOENT);
511 		if (range != NULL) {
512 			*new_start = start - range->pci_base + range->phys_base;
513 			*new_end = end - range->pci_base + range->phys_base;
514 		}
515 		break;
516 	default:
517 		/* No translation for non-memory types */
518 		*new_start = start;
519 		*new_end = end;
520 		break;
521 	}
522 
523 	return (0);
524 }
525 
526 static int
527 generic_pcie_translate_resource(device_t bus, int type,
528     rman_res_t start, rman_res_t *newstart)
529 {
530 	rman_res_t newend; /* unused */
531 
532 	return (generic_pcie_translate_resource_common(
533 	    bus, type, start, 0, newstart, &newend));
534 }
535 
536 struct resource *
537 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
538     int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
539 {
540 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
541 	struct generic_pcie_core_softc *sc;
542 #endif
543 	struct resource *res;
544 
545 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
546 	sc = device_get_softc(dev);
547 #endif
548 
549 	switch (type) {
550 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
551 	case PCI_RES_BUS:
552 		res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end,
553 		    count, flags);
554 		break;
555 #endif
556 	case SYS_RES_IOPORT:
557 	case SYS_RES_MEMORY:
558 		res = bus_generic_rman_alloc_resource(dev, child, type, rid,
559 		    start, end, count, flags);
560 		break;
561 	default:
562 		res = bus_generic_alloc_resource(dev, child, type, rid, start,
563 		    end, count, flags);
564 		break;
565 	}
566 	if (res == NULL) {
567 		device_printf(dev, "%s FAIL: type=%d, rid=%d, "
568 		    "start=%016jx, end=%016jx, count=%016jx, flags=%x\n",
569 		    __func__, type, *rid, start, end, count, flags);
570 	}
571 	return (res);
572 }
573 
574 static int
575 generic_pcie_activate_resource(device_t dev, device_t child, int type,
576     int rid, struct resource *r)
577 {
578 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
579 	struct generic_pcie_core_softc *sc;
580 
581 	sc = device_get_softc(dev);
582 #endif
583 	switch (type) {
584 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
585 	case PCI_RES_BUS:
586 		return (pci_domain_activate_bus(sc->ecam, child, rid, r));
587 #endif
588 	case SYS_RES_IOPORT:
589 	case SYS_RES_MEMORY:
590 		return (bus_generic_rman_activate_resource(dev, child, type,
591 		    rid, r));
592 	default:
593 		return (bus_generic_activate_resource(dev, child, type, rid,
594 		    r));
595 	}
596 }
597 
598 static int
599 generic_pcie_deactivate_resource(device_t dev, device_t child, int type,
600     int rid, struct resource *r)
601 {
602 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
603 	struct generic_pcie_core_softc *sc;
604 
605 	sc = device_get_softc(dev);
606 #endif
607 	switch (type) {
608 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
609 	case PCI_RES_BUS:
610 		return (pci_domain_deactivate_bus(sc->ecam, child, rid, r));
611 #endif
612 	case SYS_RES_IOPORT:
613 	case SYS_RES_MEMORY:
614 		return (bus_generic_rman_deactivate_resource(dev, child, type,
615 		    rid, r));
616 	default:
617 		return (bus_generic_deactivate_resource(dev, child, type, rid,
618 		    r));
619 	}
620 }
621 
622 static int
623 generic_pcie_adjust_resource(device_t dev, device_t child, int type,
624     struct resource *res, rman_res_t start, rman_res_t end)
625 {
626 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
627 	struct generic_pcie_core_softc *sc;
628 
629 	sc = device_get_softc(dev);
630 #endif
631 	switch (type) {
632 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
633 	case PCI_RES_BUS:
634 		return (pci_domain_adjust_bus(sc->ecam, child, res, start,
635 		    end));
636 #endif
637 	case SYS_RES_IOPORT:
638 	case SYS_RES_MEMORY:
639 		return (bus_generic_rman_adjust_resource(dev, child, type, res,
640 		    start, end));
641 	default:
642 		return (bus_generic_adjust_resource(dev, child, type, res,
643 		    start, end));
644 	}
645 }
646 
647 static int
648 generic_pcie_map_resource(device_t dev, device_t child, int type,
649     struct resource *r, struct resource_map_request *argsp,
650     struct resource_map *map)
651 {
652 	struct resource_map_request args;
653 	struct pcie_range *range;
654 	rman_res_t length, start;
655 	int error;
656 
657 	switch (type) {
658 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
659 	case PCI_RES_BUS:
660 		return (EINVAL);
661 #endif
662 	case SYS_RES_IOPORT:
663 	case SYS_RES_MEMORY:
664 		break;
665 	default:
666 		return (bus_generic_map_resource(dev, child, type, r, argsp,
667 		    map));
668 	}
669 
670 	/* Resources must be active to be mapped. */
671 	if (!(rman_get_flags(r) & RF_ACTIVE))
672 		return (ENXIO);
673 
674 	resource_init_map_request(&args);
675 	error = resource_validate_map_request(r, argsp, &args, &start, &length);
676 	if (error)
677 		return (error);
678 
679 	range = generic_pcie_containing_range(dev, type, rman_get_start(r),
680 	    rman_get_end(r));
681 	if (range == NULL || range->res == NULL)
682 		return (ENOENT);
683 
684 	args.offset = start - range->pci_base;
685 	args.length = length;
686 	return (bus_generic_map_resource(dev, child, type, range->res, &args,
687 	    map));
688 }
689 
690 static int
691 generic_pcie_unmap_resource(device_t dev, device_t child, int type,
692     struct resource *r, struct resource_map *map)
693 {
694 	struct pcie_range *range;
695 
696 	switch (type) {
697 #if defined(NEW_PCIB) && defined(PCI_RES_BUS)
698 	case PCI_RES_BUS:
699 		return (EINVAL);
700 #endif
701 	case SYS_RES_IOPORT:
702 	case SYS_RES_MEMORY:
703 		range = generic_pcie_containing_range(dev, type,
704 		    rman_get_start(r), rman_get_end(r));
705 		if (range == NULL || range->res == NULL)
706 			return (ENOENT);
707 		r = range->res;
708 		break;
709 	default:
710 		break;
711 	}
712 	return (bus_generic_unmap_resource(dev, child, type, r, map));
713 }
714 
715 static bus_dma_tag_t
716 generic_pcie_get_dma_tag(device_t dev, device_t child)
717 {
718 	struct generic_pcie_core_softc *sc;
719 
720 	sc = device_get_softc(dev);
721 	return (sc->dmat);
722 }
723 
724 static device_method_t generic_pcie_methods[] = {
725 	DEVMETHOD(device_attach,		pci_host_generic_core_attach),
726 	DEVMETHOD(device_detach,		pci_host_generic_core_detach),
727 
728 	DEVMETHOD(bus_get_rman,			generic_pcie_get_rman),
729 	DEVMETHOD(bus_read_ivar,		generic_pcie_read_ivar),
730 	DEVMETHOD(bus_write_ivar,		generic_pcie_write_ivar),
731 	DEVMETHOD(bus_alloc_resource,		pci_host_generic_core_alloc_resource),
732 	DEVMETHOD(bus_adjust_resource,		generic_pcie_adjust_resource),
733 	DEVMETHOD(bus_activate_resource,	generic_pcie_activate_resource),
734 	DEVMETHOD(bus_deactivate_resource,	generic_pcie_deactivate_resource),
735 	DEVMETHOD(bus_release_resource,		pci_host_generic_core_release_resource),
736 	DEVMETHOD(bus_translate_resource,	generic_pcie_translate_resource),
737 	DEVMETHOD(bus_map_resource,		generic_pcie_map_resource),
738 	DEVMETHOD(bus_unmap_resource,		generic_pcie_unmap_resource),
739 	DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
740 	DEVMETHOD(bus_teardown_intr,		bus_generic_teardown_intr),
741 
742 	DEVMETHOD(bus_get_dma_tag,		generic_pcie_get_dma_tag),
743 
744 	/* pcib interface */
745 	DEVMETHOD(pcib_maxslots,		generic_pcie_maxslots),
746 	DEVMETHOD(pcib_read_config,		generic_pcie_read_config),
747 	DEVMETHOD(pcib_write_config,		generic_pcie_write_config),
748 
749 	DEVMETHOD_END
750 };
751 
752 DEFINE_CLASS_0(pcib, generic_pcie_core_driver,
753     generic_pcie_methods, sizeof(struct generic_pcie_core_softc));
754