1 /*-
2 * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com>
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Semihalf under
7 * the sponsorship of the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /* Generic ECAM PCIe driver */
32
33 #include <sys/cdefs.h>
34 #include "opt_platform.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/rman.h>
41 #include <sys/module.h>
42 #include <sys/bus.h>
43 #include <sys/endian.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcib_private.h>
48 #include <dev/pci/pci_host_generic.h>
49
50 #include <machine/bus.h>
51 #include <machine/intr.h>
52
53 #include "pcib_if.h"
54
55 #if defined(VM_MEMATTR_DEVICE_NP)
56 #define PCI_UNMAPPED
57 #define PCI_RF_FLAGS RF_UNMAPPED
58 #else
59 #define PCI_RF_FLAGS 0
60 #endif
61
62 /*
63 * We allocate "ranges" specified mappings higher up in the rid space to avoid
64 * conflicts with various definitions in the wild that may have other registers
65 * attributed to the controller besides just the config space.
66 */
67 #define RANGE_RID(idx) ((idx) + 100)
68
69 /* Forward prototypes */
70
71 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
72 u_int func, u_int reg, int bytes);
73 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
74 u_int func, u_int reg, uint32_t val, int bytes);
75 static int generic_pcie_maxslots(device_t dev);
76 static int generic_pcie_write_ivar(device_t dev, device_t child, int index,
77 uintptr_t value);
78
79 int
pci_host_generic_core_attach(device_t dev)80 pci_host_generic_core_attach(device_t dev)
81 {
82 #ifdef PCI_UNMAPPED
83 struct resource_map_request req;
84 struct resource_map map;
85 #endif
86 struct generic_pcie_core_softc *sc;
87 struct rman *rm;
88 uint64_t phys_base;
89 uint64_t pci_base;
90 uint64_t size;
91 const char *range_descr;
92 char buf[64];
93 int domain, error;
94 int flags, rid, tuple;
95
96 sc = device_get_softc(dev);
97 sc->dev = dev;
98
99 /* Create the parent DMA tag to pass down the coherent flag */
100 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
101 1, 0, /* alignment, bounds */
102 BUS_SPACE_MAXADDR, /* lowaddr */
103 BUS_SPACE_MAXADDR, /* highaddr */
104 NULL, NULL, /* filter, filterarg */
105 BUS_SPACE_MAXSIZE, /* maxsize */
106 BUS_SPACE_UNRESTRICTED, /* nsegments */
107 BUS_SPACE_MAXSIZE, /* maxsegsize */
108 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */
109 NULL, NULL, /* lockfunc, lockarg */
110 &sc->dmat);
111 if (error != 0)
112 return (error);
113
114 /*
115 * Attempt to set the domain. If it's missing, or we are unable to
116 * set it then memory allocations may be placed in the wrong domain.
117 */
118 if (bus_get_domain(dev, &domain) == 0)
119 (void)bus_dma_tag_set_domain(sc->dmat, domain);
120
121 if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) {
122 rid = 0;
123 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
124 PCI_RF_FLAGS | RF_ACTIVE);
125 if (sc->res == NULL) {
126 device_printf(dev, "could not allocate memory.\n");
127 error = ENXIO;
128 goto err_resource;
129 }
130 #ifdef PCI_UNMAPPED
131 resource_init_map_request(&req);
132 req.memattr = VM_MEMATTR_DEVICE_NP;
133 error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req,
134 &map);
135 if (error != 0) {
136 device_printf(dev, "could not map memory.\n");
137 return (error);
138 }
139 rman_set_mapping(sc->res, &map);
140 #endif
141 }
142
143 sc->has_pmem = false;
144 sc->pmem_rman.rm_type = RMAN_ARRAY;
145 snprintf(buf, sizeof(buf), "%s prefetch window",
146 device_get_nameunit(dev));
147 sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF);
148
149 sc->mem_rman.rm_type = RMAN_ARRAY;
150 snprintf(buf, sizeof(buf), "%s memory window",
151 device_get_nameunit(dev));
152 sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF);
153
154 sc->io_rman.rm_type = RMAN_ARRAY;
155 snprintf(buf, sizeof(buf), "%s I/O port window",
156 device_get_nameunit(dev));
157 sc->io_rman.rm_descr = strdup(buf, M_DEVBUF);
158
159 /* Initialize rman and allocate memory regions */
160 error = rman_init(&sc->pmem_rman);
161 if (error) {
162 device_printf(dev, "rman_init() failed. error = %d\n", error);
163 goto err_pmem_rman;
164 }
165
166 error = rman_init(&sc->mem_rman);
167 if (error) {
168 device_printf(dev, "rman_init() failed. error = %d\n", error);
169 goto err_mem_rman;
170 }
171
172 error = rman_init(&sc->io_rman);
173 if (error) {
174 device_printf(dev, "rman_init() failed. error = %d\n", error);
175 goto err_io_rman;
176 }
177
178 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
179 phys_base = sc->ranges[tuple].phys_base;
180 pci_base = sc->ranges[tuple].pci_base;
181 size = sc->ranges[tuple].size;
182 rid = RANGE_RID(tuple);
183 if (size == 0)
184 continue; /* empty range element */
185 switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
186 case FLAG_TYPE_PMEM:
187 sc->has_pmem = true;
188 range_descr = "prefetch";
189 flags = RF_PREFETCHABLE;
190 rm = &sc->pmem_rman;
191 break;
192 case FLAG_TYPE_MEM:
193 range_descr = "memory";
194 flags = 0;
195 rm = &sc->mem_rman;
196 break;
197 case FLAG_TYPE_IO:
198 range_descr = "I/O port";
199 flags = 0;
200 rm = &sc->io_rman;
201 break;
202 default:
203 continue;
204 }
205 if (bootverbose)
206 device_printf(dev,
207 "PCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Type: %s\n",
208 pci_base, phys_base, size, range_descr);
209 error = bus_set_resource(dev, SYS_RES_MEMORY, rid, phys_base,
210 size);
211 if (error != 0) {
212 device_printf(dev,
213 "failed to set resource for range %d: %d\n", tuple,
214 error);
215 continue;
216 }
217 sc->ranges[tuple].rid = rid;
218 sc->ranges[tuple].res = bus_alloc_resource_any(dev,
219 SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_UNMAPPED | flags);
220 if (sc->ranges[tuple].res == NULL) {
221 device_printf(dev,
222 "failed to allocate resource for range %d\n", tuple);
223 continue;
224 }
225 error = rman_manage_region(rm, pci_base, pci_base + size - 1);
226 if (error) {
227 device_printf(dev, "rman_manage_region() failed."
228 "error = %d\n", error);
229 continue;
230 }
231 }
232
233 return (0);
234
235 err_io_rman:
236 rman_fini(&sc->mem_rman);
237 err_mem_rman:
238 rman_fini(&sc->pmem_rman);
239 err_pmem_rman:
240 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF);
241 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF);
242 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF);
243 if (sc->res != NULL)
244 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
245 err_resource:
246 bus_dma_tag_destroy(sc->dmat);
247 return (error);
248 }
249
250 int
pci_host_generic_core_detach(device_t dev)251 pci_host_generic_core_detach(device_t dev)
252 {
253 struct generic_pcie_core_softc *sc;
254 int error, rid, tuple;
255
256 sc = device_get_softc(dev);
257
258 error = bus_generic_detach(dev);
259 if (error != 0)
260 return (error);
261
262 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
263 rid = sc->ranges[tuple].rid;
264 if (sc->ranges[tuple].size == 0) {
265 MPASS(sc->ranges[tuple].res == NULL);
266 continue; /* empty range element */
267 }
268
269 MPASS(rid != -1);
270 switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
271 case FLAG_TYPE_PMEM:
272 case FLAG_TYPE_MEM:
273 case FLAG_TYPE_IO:
274 break;
275 default:
276 continue;
277 }
278 if (sc->ranges[tuple].res != NULL)
279 bus_release_resource(dev, SYS_RES_MEMORY, rid,
280 sc->ranges[tuple].res);
281 bus_delete_resource(dev, SYS_RES_MEMORY, rid);
282 }
283 rman_fini(&sc->io_rman);
284 rman_fini(&sc->mem_rman);
285 rman_fini(&sc->pmem_rman);
286 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF);
287 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF);
288 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF);
289 if (sc->res != NULL)
290 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
291 bus_dma_tag_destroy(sc->dmat);
292
293 return (0);
294 }
295
296 static uint32_t
generic_pcie_read_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,int bytes)297 generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
298 u_int func, u_int reg, int bytes)
299 {
300 struct generic_pcie_core_softc *sc;
301 uint64_t offset;
302 uint32_t data;
303
304 sc = device_get_softc(dev);
305 if ((bus < sc->bus_start) || (bus > sc->bus_end))
306 return (~0U);
307 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
308 (reg > PCIE_REGMAX))
309 return (~0U);
310 if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0)
311 return (~0U);
312
313 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
314
315 switch (bytes) {
316 case 1:
317 data = bus_read_1(sc->res, offset);
318 break;
319 case 2:
320 data = le16toh(bus_read_2(sc->res, offset));
321 break;
322 case 4:
323 data = le32toh(bus_read_4(sc->res, offset));
324 break;
325 default:
326 return (~0U);
327 }
328
329 return (data);
330 }
331
332 static void
generic_pcie_write_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,uint32_t val,int bytes)333 generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
334 u_int func, u_int reg, uint32_t val, int bytes)
335 {
336 struct generic_pcie_core_softc *sc;
337 uint64_t offset;
338
339 sc = device_get_softc(dev);
340 if ((bus < sc->bus_start) || (bus > sc->bus_end))
341 return;
342 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
343 (reg > PCIE_REGMAX))
344 return;
345
346 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
347
348 switch (bytes) {
349 case 1:
350 bus_write_1(sc->res, offset, val);
351 break;
352 case 2:
353 bus_write_2(sc->res, offset, htole16(val));
354 break;
355 case 4:
356 bus_write_4(sc->res, offset, htole32(val));
357 break;
358 default:
359 return;
360 }
361 }
362
363 static int
generic_pcie_maxslots(device_t dev)364 generic_pcie_maxslots(device_t dev)
365 {
366
367 return (31); /* max slots per bus acc. to standard */
368 }
369
370 int
generic_pcie_read_ivar(device_t dev,device_t child,int index,uintptr_t * result)371 generic_pcie_read_ivar(device_t dev, device_t child, int index,
372 uintptr_t *result)
373 {
374 struct generic_pcie_core_softc *sc;
375
376 sc = device_get_softc(dev);
377 switch (index) {
378 case PCIB_IVAR_BUS:
379 *result = sc->bus_start;
380 return (0);
381 case PCIB_IVAR_DOMAIN:
382 *result = sc->ecam;
383 return (0);
384 }
385
386 if (bootverbose)
387 device_printf(dev, "ERROR: Unknown index %d.\n", index);
388 return (ENOENT);
389 }
390
391 static int
generic_pcie_write_ivar(device_t dev,device_t child,int index,uintptr_t value)392 generic_pcie_write_ivar(device_t dev, device_t child, int index,
393 uintptr_t value)
394 {
395
396 return (ENOENT);
397 }
398
399 static struct rman *
generic_pcie_get_rman(device_t dev,int type,u_int flags)400 generic_pcie_get_rman(device_t dev, int type, u_int flags)
401 {
402 struct generic_pcie_core_softc *sc = device_get_softc(dev);
403
404 switch (type) {
405 case SYS_RES_IOPORT:
406 return (&sc->io_rman);
407 case SYS_RES_MEMORY:
408 if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0)
409 return (&sc->pmem_rman);
410 return (&sc->mem_rman);
411 default:
412 break;
413 }
414
415 return (NULL);
416 }
417
418 int
pci_host_generic_core_release_resource(device_t dev,device_t child,struct resource * res)419 pci_host_generic_core_release_resource(device_t dev, device_t child,
420 struct resource *res)
421 {
422 struct generic_pcie_core_softc *sc;
423
424 sc = device_get_softc(dev);
425 switch (rman_get_type(res)) {
426 case PCI_RES_BUS:
427 return (pci_domain_release_bus(sc->ecam, child, res));
428 case SYS_RES_IOPORT:
429 case SYS_RES_MEMORY:
430 return (bus_generic_rman_release_resource(dev, child, res));
431 default:
432 return (bus_generic_release_resource(dev, child, res));
433 }
434 }
435
436 static struct pcie_range *
generic_pcie_containing_range(device_t dev,int type,rman_res_t start,rman_res_t end)437 generic_pcie_containing_range(device_t dev, int type, rman_res_t start,
438 rman_res_t end)
439 {
440 struct generic_pcie_core_softc *sc = device_get_softc(dev);
441 uint64_t pci_base;
442 uint64_t size;
443 int i, space;
444
445 switch (type) {
446 case SYS_RES_IOPORT:
447 case SYS_RES_MEMORY:
448 break;
449 default:
450 return (NULL);
451 }
452
453 for (i = 0; i < MAX_RANGES_TUPLES; i++) {
454 pci_base = sc->ranges[i].pci_base;
455 size = sc->ranges[i].size;
456 if (size == 0)
457 continue; /* empty range element */
458
459 if (start < pci_base || end >= pci_base + size)
460 continue;
461
462 switch (FLAG_TYPE(sc->ranges[i].flags)) {
463 case FLAG_TYPE_MEM:
464 case FLAG_TYPE_PMEM:
465 space = SYS_RES_MEMORY;
466 break;
467 case FLAG_TYPE_IO:
468 space = SYS_RES_IOPORT;
469 break;
470 default:
471 continue;
472 }
473
474 if (type == space)
475 return (&sc->ranges[i]);
476 }
477 return (NULL);
478 }
479
480 static int
generic_pcie_translate_resource(device_t dev,int type,rman_res_t start,rman_res_t * new_start)481 generic_pcie_translate_resource(device_t dev, int type, rman_res_t start,
482 rman_res_t *new_start)
483 {
484 struct pcie_range *range;
485
486 /* Translate the address from a PCI address to a physical address */
487 switch (type) {
488 case SYS_RES_IOPORT:
489 case SYS_RES_MEMORY:
490 range = generic_pcie_containing_range(dev, type, start, start);
491 if (range == NULL)
492 return (ENOENT);
493 *new_start = start - range->pci_base + range->phys_base;
494 break;
495 default:
496 /* No translation for non-memory types */
497 *new_start = start;
498 break;
499 }
500
501 return (0);
502 }
503
504 struct resource *
pci_host_generic_core_alloc_resource(device_t dev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)505 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
506 int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
507 {
508 struct generic_pcie_core_softc *sc;
509 struct resource *res;
510
511 sc = device_get_softc(dev);
512
513 switch (type) {
514 case PCI_RES_BUS:
515 res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end,
516 count, flags);
517 break;
518 case SYS_RES_IOPORT:
519 case SYS_RES_MEMORY:
520 res = bus_generic_rman_alloc_resource(dev, child, type, rid,
521 start, end, count, flags);
522 break;
523 default:
524 res = bus_generic_alloc_resource(dev, child, type, rid, start,
525 end, count, flags);
526 break;
527 }
528 if (res == NULL) {
529 device_printf(dev, "%s FAIL: type=%d, rid=%d, "
530 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n",
531 __func__, type, *rid, start, end, count, flags);
532 }
533 return (res);
534 }
535
536 static int
generic_pcie_activate_resource(device_t dev,device_t child,struct resource * r)537 generic_pcie_activate_resource(device_t dev, device_t child, struct resource *r)
538 {
539 struct generic_pcie_core_softc *sc;
540
541 sc = device_get_softc(dev);
542 switch (rman_get_type(r)) {
543 case PCI_RES_BUS:
544 return (pci_domain_activate_bus(sc->ecam, child, r));
545 case SYS_RES_IOPORT:
546 case SYS_RES_MEMORY:
547 return (bus_generic_rman_activate_resource(dev, child, r));
548 default:
549 return (bus_generic_activate_resource(dev, child, r));
550 }
551 }
552
553 static int
generic_pcie_deactivate_resource(device_t dev,device_t child,struct resource * r)554 generic_pcie_deactivate_resource(device_t dev, device_t child,
555 struct resource *r)
556 {
557 struct generic_pcie_core_softc *sc;
558
559 sc = device_get_softc(dev);
560 switch (rman_get_type(r)) {
561 case PCI_RES_BUS:
562 return (pci_domain_deactivate_bus(sc->ecam, child, r));
563 case SYS_RES_IOPORT:
564 case SYS_RES_MEMORY:
565 return (bus_generic_rman_deactivate_resource(dev, child, r));
566 default:
567 return (bus_generic_deactivate_resource(dev, child, r));
568 }
569 }
570
571 static int
generic_pcie_adjust_resource(device_t dev,device_t child,struct resource * res,rman_res_t start,rman_res_t end)572 generic_pcie_adjust_resource(device_t dev, device_t child,
573 struct resource *res, rman_res_t start, rman_res_t end)
574 {
575 struct generic_pcie_core_softc *sc;
576
577 sc = device_get_softc(dev);
578 switch (rman_get_type(res)) {
579 case PCI_RES_BUS:
580 return (pci_domain_adjust_bus(sc->ecam, child, res, start,
581 end));
582 case SYS_RES_IOPORT:
583 case SYS_RES_MEMORY:
584 return (bus_generic_rman_adjust_resource(dev, child, res,
585 start, end));
586 default:
587 return (bus_generic_adjust_resource(dev, child, res, start,
588 end));
589 }
590 }
591
592 static int
generic_pcie_map_resource(device_t dev,device_t child,struct resource * r,struct resource_map_request * argsp,struct resource_map * map)593 generic_pcie_map_resource(device_t dev, device_t child, struct resource *r,
594 struct resource_map_request *argsp, struct resource_map *map)
595 {
596 struct resource_map_request args;
597 struct pcie_range *range;
598 rman_res_t length, start;
599 int error, type;
600
601 type = rman_get_type(r);
602 switch (type) {
603 case PCI_RES_BUS:
604 return (EINVAL);
605 case SYS_RES_IOPORT:
606 case SYS_RES_MEMORY:
607 break;
608 default:
609 return (bus_generic_map_resource(dev, child, r, argsp, map));
610 }
611
612 /* Resources must be active to be mapped. */
613 if (!(rman_get_flags(r) & RF_ACTIVE))
614 return (ENXIO);
615
616 resource_init_map_request(&args);
617 error = resource_validate_map_request(r, argsp, &args, &start, &length);
618 if (error)
619 return (error);
620
621 range = generic_pcie_containing_range(dev, type, rman_get_start(r),
622 rman_get_end(r));
623 if (range == NULL || range->res == NULL)
624 return (ENOENT);
625
626 args.offset = start - range->pci_base;
627 args.length = length;
628 return (bus_map_resource(dev, range->res, &args, map));
629 }
630
631 static int
generic_pcie_unmap_resource(device_t dev,device_t child,struct resource * r,struct resource_map * map)632 generic_pcie_unmap_resource(device_t dev, device_t child, struct resource *r,
633 struct resource_map *map)
634 {
635 struct pcie_range *range;
636 int type;
637
638 type = rman_get_type(r);
639 switch (type) {
640 case PCI_RES_BUS:
641 return (EINVAL);
642 case SYS_RES_IOPORT:
643 case SYS_RES_MEMORY:
644 break;
645 default:
646 return (bus_generic_unmap_resource(dev, child, r, map));
647 }
648
649 range = generic_pcie_containing_range(dev, type, rman_get_start(r),
650 rman_get_end(r));
651 if (range == NULL || range->res == NULL)
652 return (ENOENT);
653 return (bus_unmap_resource(dev, range->res, map));
654 }
655
656 static bus_dma_tag_t
generic_pcie_get_dma_tag(device_t dev,device_t child)657 generic_pcie_get_dma_tag(device_t dev, device_t child)
658 {
659 struct generic_pcie_core_softc *sc;
660
661 sc = device_get_softc(dev);
662 return (sc->dmat);
663 }
664
665 static device_method_t generic_pcie_methods[] = {
666 DEVMETHOD(device_attach, pci_host_generic_core_attach),
667 DEVMETHOD(device_detach, pci_host_generic_core_detach),
668
669 DEVMETHOD(bus_get_rman, generic_pcie_get_rman),
670 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar),
671 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar),
672 DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource),
673 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource),
674 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource),
675 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource),
676 DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource),
677 DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource),
678 DEVMETHOD(bus_map_resource, generic_pcie_map_resource),
679 DEVMETHOD(bus_unmap_resource, generic_pcie_unmap_resource),
680 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
681 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
682
683 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag),
684
685 /* pcib interface */
686 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots),
687 DEVMETHOD(pcib_read_config, generic_pcie_read_config),
688 DEVMETHOD(pcib_write_config, generic_pcie_write_config),
689
690 DEVMETHOD_END
691 };
692
693 DEFINE_CLASS_0(pcib, generic_pcie_core_driver,
694 generic_pcie_methods, sizeof(struct generic_pcie_core_softc));
695