1 /*-
2 * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com>
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Semihalf under
7 * the sponsorship of the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 /* Generic ECAM PCIe driver */
32
33 #include <sys/cdefs.h>
34 #include "opt_platform.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/rman.h>
41 #include <sys/module.h>
42 #include <sys/bus.h>
43 #include <sys/endian.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 #include <dev/pci/pcib_private.h>
48 #include <dev/pci/pci_host_generic.h>
49
50 #include <machine/bus.h>
51 #include <machine/intr.h>
52
53 #include "pcib_if.h"
54
55 #if defined(VM_MEMATTR_DEVICE_NP)
56 #define PCI_UNMAPPED
57 #define PCI_RF_FLAGS RF_UNMAPPED
58 #else
59 #define PCI_RF_FLAGS 0
60 #endif
61
62 /*
63 * We allocate "ranges" specified mappings higher up in the rid space to avoid
64 * conflicts with various definitions in the wild that may have other registers
65 * attributed to the controller besides just the config space.
66 */
67 #define RANGE_RID(idx) ((idx) + 100)
68
69 /* Forward prototypes */
70
71 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
72 u_int func, u_int reg, int bytes);
73 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
74 u_int func, u_int reg, uint32_t val, int bytes);
75 static int generic_pcie_maxslots(device_t dev);
76 static int generic_pcie_write_ivar(device_t dev, device_t child, int index,
77 uintptr_t value);
78
79 int
pci_host_generic_core_attach(device_t dev)80 pci_host_generic_core_attach(device_t dev)
81 {
82 #ifdef PCI_UNMAPPED
83 struct resource_map_request req;
84 struct resource_map map;
85 #endif
86 struct generic_pcie_core_softc *sc;
87 struct rman *rm;
88 uint64_t phys_base;
89 uint64_t pci_base;
90 uint64_t size;
91 const char *range_descr;
92 char buf[64];
93 int domain, error;
94 int flags, rid, tuple, type;
95
96 sc = device_get_softc(dev);
97 sc->dev = dev;
98
99 /* Create the parent DMA tag to pass down the coherent flag */
100 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
101 1, 0, /* alignment, bounds */
102 BUS_SPACE_MAXADDR, /* lowaddr */
103 BUS_SPACE_MAXADDR, /* highaddr */
104 NULL, NULL, /* filter, filterarg */
105 BUS_SPACE_MAXSIZE, /* maxsize */
106 BUS_SPACE_UNRESTRICTED, /* nsegments */
107 BUS_SPACE_MAXSIZE, /* maxsegsize */
108 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */
109 NULL, NULL, /* lockfunc, lockarg */
110 &sc->dmat);
111 if (error != 0)
112 return (error);
113
114 /*
115 * Attempt to set the domain. If it's missing, or we are unable to
116 * set it then memory allocations may be placed in the wrong domain.
117 */
118 if (bus_get_domain(dev, &domain) == 0)
119 (void)bus_dma_tag_set_domain(sc->dmat, domain);
120
121 if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) {
122 rid = 0;
123 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
124 PCI_RF_FLAGS | RF_ACTIVE);
125 if (sc->res == NULL) {
126 device_printf(dev, "could not allocate memory.\n");
127 error = ENXIO;
128 goto err_resource;
129 }
130 #ifdef PCI_UNMAPPED
131 resource_init_map_request(&req);
132 req.memattr = VM_MEMATTR_DEVICE_NP;
133 error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req,
134 &map);
135 if (error != 0) {
136 device_printf(dev, "could not map memory.\n");
137 return (error);
138 }
139 rman_set_mapping(sc->res, &map);
140 #endif
141 }
142
143 sc->has_pmem = false;
144 sc->pmem_rman.rm_type = RMAN_ARRAY;
145 snprintf(buf, sizeof(buf), "%s prefetch window",
146 device_get_nameunit(dev));
147 sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF);
148
149 sc->mem_rman.rm_type = RMAN_ARRAY;
150 snprintf(buf, sizeof(buf), "%s memory window",
151 device_get_nameunit(dev));
152 sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF);
153
154 sc->io_rman.rm_type = RMAN_ARRAY;
155 snprintf(buf, sizeof(buf), "%s I/O port window",
156 device_get_nameunit(dev));
157 sc->io_rman.rm_descr = strdup(buf, M_DEVBUF);
158
159 /* Initialize rman and allocate memory regions */
160 error = rman_init(&sc->pmem_rman);
161 if (error) {
162 device_printf(dev, "rman_init() failed. error = %d\n", error);
163 goto err_pmem_rman;
164 }
165
166 error = rman_init(&sc->mem_rman);
167 if (error) {
168 device_printf(dev, "rman_init() failed. error = %d\n", error);
169 goto err_mem_rman;
170 }
171
172 error = rman_init(&sc->io_rman);
173 if (error) {
174 device_printf(dev, "rman_init() failed. error = %d\n", error);
175 goto err_io_rman;
176 }
177
178 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
179 phys_base = sc->ranges[tuple].phys_base;
180 pci_base = sc->ranges[tuple].pci_base;
181 size = sc->ranges[tuple].size;
182 rid = RANGE_RID(tuple);
183 if (size == 0)
184 continue; /* empty range element */
185 switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
186 case FLAG_TYPE_PMEM:
187 sc->has_pmem = true;
188 range_descr = "prefetch";
189 flags = RF_PREFETCHABLE;
190 type = SYS_RES_MEMORY;
191 rm = &sc->pmem_rman;
192 break;
193 case FLAG_TYPE_MEM:
194 range_descr = "memory";
195 flags = 0;
196 type = SYS_RES_MEMORY;
197 rm = &sc->mem_rman;
198 break;
199 case FLAG_TYPE_IO:
200 range_descr = "I/O port";
201 flags = 0;
202 type = SYS_RES_IOPORT;
203 rm = &sc->io_rman;
204 break;
205 default:
206 continue;
207 }
208 if (bootverbose)
209 device_printf(dev,
210 "PCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Type: %s\n",
211 pci_base, phys_base, size, range_descr);
212 error = bus_set_resource(dev, type, rid, phys_base, size);
213 if (error != 0) {
214 device_printf(dev,
215 "failed to set resource for range %d: %d\n", tuple,
216 error);
217 continue;
218 }
219 sc->ranges[tuple].rid = rid;
220 sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid,
221 RF_ACTIVE | RF_UNMAPPED | flags);
222 if (sc->ranges[tuple].res == NULL) {
223 device_printf(dev,
224 "failed to allocate resource for range %d\n", tuple);
225 continue;
226 }
227 error = rman_manage_region(rm, pci_base, pci_base + size - 1);
228 if (error) {
229 device_printf(dev, "rman_manage_region() failed."
230 "error = %d\n", error);
231 continue;
232 }
233 }
234
235 return (0);
236
237 err_io_rman:
238 rman_fini(&sc->mem_rman);
239 err_mem_rman:
240 rman_fini(&sc->pmem_rman);
241 err_pmem_rman:
242 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF);
243 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF);
244 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF);
245 if (sc->res != NULL)
246 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
247 err_resource:
248 bus_dma_tag_destroy(sc->dmat);
249 return (error);
250 }
251
252 int
pci_host_generic_core_detach(device_t dev)253 pci_host_generic_core_detach(device_t dev)
254 {
255 struct generic_pcie_core_softc *sc;
256 int error, rid, tuple, type;
257
258 sc = device_get_softc(dev);
259
260 error = bus_generic_detach(dev);
261 if (error != 0)
262 return (error);
263
264 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
265 rid = sc->ranges[tuple].rid;
266 if (sc->ranges[tuple].size == 0) {
267 MPASS(sc->ranges[tuple].res == NULL);
268 continue; /* empty range element */
269 }
270
271 MPASS(rid != -1);
272 switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
273 case FLAG_TYPE_PMEM:
274 case FLAG_TYPE_MEM:
275 type = SYS_RES_MEMORY;
276 break;
277 case FLAG_TYPE_IO:
278 type = SYS_RES_IOPORT;
279 break;
280 default:
281 continue;
282 }
283 if (sc->ranges[tuple].res != NULL)
284 bus_release_resource(dev, type, rid,
285 sc->ranges[tuple].res);
286 bus_delete_resource(dev, type, rid);
287 }
288 rman_fini(&sc->io_rman);
289 rman_fini(&sc->mem_rman);
290 rman_fini(&sc->pmem_rman);
291 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF);
292 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF);
293 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF);
294 if (sc->res != NULL)
295 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res);
296 bus_dma_tag_destroy(sc->dmat);
297
298 return (0);
299 }
300
301 static uint32_t
generic_pcie_read_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,int bytes)302 generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
303 u_int func, u_int reg, int bytes)
304 {
305 struct generic_pcie_core_softc *sc;
306 uint64_t offset;
307 uint32_t data;
308
309 sc = device_get_softc(dev);
310 if ((bus < sc->bus_start) || (bus > sc->bus_end))
311 return (~0U);
312 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
313 (reg > PCIE_REGMAX))
314 return (~0U);
315 if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0)
316 return (~0U);
317
318 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
319
320 switch (bytes) {
321 case 1:
322 data = bus_read_1(sc->res, offset);
323 break;
324 case 2:
325 data = le16toh(bus_read_2(sc->res, offset));
326 break;
327 case 4:
328 data = le32toh(bus_read_4(sc->res, offset));
329 break;
330 default:
331 return (~0U);
332 }
333
334 return (data);
335 }
336
337 static void
generic_pcie_write_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,uint32_t val,int bytes)338 generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
339 u_int func, u_int reg, uint32_t val, int bytes)
340 {
341 struct generic_pcie_core_softc *sc;
342 uint64_t offset;
343
344 sc = device_get_softc(dev);
345 if ((bus < sc->bus_start) || (bus > sc->bus_end))
346 return;
347 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
348 (reg > PCIE_REGMAX))
349 return;
350
351 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
352
353 switch (bytes) {
354 case 1:
355 bus_write_1(sc->res, offset, val);
356 break;
357 case 2:
358 bus_write_2(sc->res, offset, htole16(val));
359 break;
360 case 4:
361 bus_write_4(sc->res, offset, htole32(val));
362 break;
363 default:
364 return;
365 }
366 }
367
368 static int
generic_pcie_maxslots(device_t dev)369 generic_pcie_maxslots(device_t dev)
370 {
371
372 return (31); /* max slots per bus acc. to standard */
373 }
374
375 int
generic_pcie_read_ivar(device_t dev,device_t child,int index,uintptr_t * result)376 generic_pcie_read_ivar(device_t dev, device_t child, int index,
377 uintptr_t *result)
378 {
379 struct generic_pcie_core_softc *sc;
380
381 sc = device_get_softc(dev);
382 switch (index) {
383 case PCIB_IVAR_BUS:
384 *result = sc->bus_start;
385 return (0);
386 case PCIB_IVAR_DOMAIN:
387 *result = sc->ecam;
388 return (0);
389 }
390
391 if (bootverbose)
392 device_printf(dev, "ERROR: Unknown index %d.\n", index);
393 return (ENOENT);
394 }
395
396 static int
generic_pcie_write_ivar(device_t dev,device_t child,int index,uintptr_t value)397 generic_pcie_write_ivar(device_t dev, device_t child, int index,
398 uintptr_t value)
399 {
400
401 return (ENOENT);
402 }
403
404 static struct rman *
generic_pcie_get_rman(device_t dev,int type,u_int flags)405 generic_pcie_get_rman(device_t dev, int type, u_int flags)
406 {
407 struct generic_pcie_core_softc *sc = device_get_softc(dev);
408
409 switch (type) {
410 case SYS_RES_IOPORT:
411 return (&sc->io_rman);
412 case SYS_RES_MEMORY:
413 if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0)
414 return (&sc->pmem_rman);
415 return (&sc->mem_rman);
416 default:
417 break;
418 }
419
420 return (NULL);
421 }
422
423 int
pci_host_generic_core_release_resource(device_t dev,device_t child,struct resource * res)424 pci_host_generic_core_release_resource(device_t dev, device_t child,
425 struct resource *res)
426 {
427 struct generic_pcie_core_softc *sc;
428
429 sc = device_get_softc(dev);
430 switch (rman_get_type(res)) {
431 case PCI_RES_BUS:
432 return (pci_domain_release_bus(sc->ecam, child, res));
433 case SYS_RES_IOPORT:
434 case SYS_RES_MEMORY:
435 return (bus_generic_rman_release_resource(dev, child, res));
436 default:
437 return (bus_generic_release_resource(dev, child, res));
438 }
439 }
440
441 static struct pcie_range *
generic_pcie_containing_range(device_t dev,int type,rman_res_t start,rman_res_t end)442 generic_pcie_containing_range(device_t dev, int type, rman_res_t start,
443 rman_res_t end)
444 {
445 struct generic_pcie_core_softc *sc = device_get_softc(dev);
446 uint64_t pci_base;
447 uint64_t size;
448 int i, space;
449
450 switch (type) {
451 case SYS_RES_IOPORT:
452 case SYS_RES_MEMORY:
453 break;
454 default:
455 return (NULL);
456 }
457
458 for (i = 0; i < MAX_RANGES_TUPLES; i++) {
459 pci_base = sc->ranges[i].pci_base;
460 size = sc->ranges[i].size;
461 if (size == 0)
462 continue; /* empty range element */
463
464 if (start < pci_base || end >= pci_base + size)
465 continue;
466
467 switch (FLAG_TYPE(sc->ranges[i].flags)) {
468 case FLAG_TYPE_MEM:
469 case FLAG_TYPE_PMEM:
470 space = SYS_RES_MEMORY;
471 break;
472 case FLAG_TYPE_IO:
473 space = SYS_RES_IOPORT;
474 break;
475 default:
476 continue;
477 }
478
479 if (type == space)
480 return (&sc->ranges[i]);
481 }
482 return (NULL);
483 }
484
485 static int
generic_pcie_translate_resource(device_t dev,int type,rman_res_t start,rman_res_t * new_start)486 generic_pcie_translate_resource(device_t dev, int type, rman_res_t start,
487 rman_res_t *new_start)
488 {
489 struct pcie_range *range;
490
491 /* Translate the address from a PCI address to a physical address */
492 switch (type) {
493 case SYS_RES_IOPORT:
494 case SYS_RES_MEMORY:
495 range = generic_pcie_containing_range(dev, type, start, start);
496 if (range == NULL)
497 return (ENOENT);
498 *new_start = start - range->pci_base + range->phys_base;
499 break;
500 default:
501 /* No translation for non-memory types */
502 *new_start = start;
503 break;
504 }
505
506 return (0);
507 }
508
509 struct resource *
pci_host_generic_core_alloc_resource(device_t dev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)510 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
511 int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
512 {
513 struct generic_pcie_core_softc *sc;
514 struct resource *res;
515
516 sc = device_get_softc(dev);
517
518 switch (type) {
519 case PCI_RES_BUS:
520 res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end,
521 count, flags);
522 break;
523 case SYS_RES_IOPORT:
524 case SYS_RES_MEMORY:
525 res = bus_generic_rman_alloc_resource(dev, child, type, rid,
526 start, end, count, flags);
527 break;
528 default:
529 res = bus_generic_alloc_resource(dev, child, type, rid, start,
530 end, count, flags);
531 break;
532 }
533 if (res == NULL) {
534 device_printf(dev, "%s FAIL: type=%d, rid=%d, "
535 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n",
536 __func__, type, *rid, start, end, count, flags);
537 }
538 return (res);
539 }
540
541 static int
generic_pcie_activate_resource(device_t dev,device_t child,struct resource * r)542 generic_pcie_activate_resource(device_t dev, device_t child, struct resource *r)
543 {
544 struct generic_pcie_core_softc *sc;
545
546 sc = device_get_softc(dev);
547 switch (rman_get_type(r)) {
548 case PCI_RES_BUS:
549 return (pci_domain_activate_bus(sc->ecam, child, r));
550 case SYS_RES_IOPORT:
551 case SYS_RES_MEMORY:
552 return (bus_generic_rman_activate_resource(dev, child, r));
553 default:
554 return (bus_generic_activate_resource(dev, child, r));
555 }
556 }
557
558 static int
generic_pcie_deactivate_resource(device_t dev,device_t child,struct resource * r)559 generic_pcie_deactivate_resource(device_t dev, device_t child,
560 struct resource *r)
561 {
562 struct generic_pcie_core_softc *sc;
563
564 sc = device_get_softc(dev);
565 switch (rman_get_type(r)) {
566 case PCI_RES_BUS:
567 return (pci_domain_deactivate_bus(sc->ecam, child, r));
568 case SYS_RES_IOPORT:
569 case SYS_RES_MEMORY:
570 return (bus_generic_rman_deactivate_resource(dev, child, r));
571 default:
572 return (bus_generic_deactivate_resource(dev, child, r));
573 }
574 }
575
576 static int
generic_pcie_adjust_resource(device_t dev,device_t child,struct resource * res,rman_res_t start,rman_res_t end)577 generic_pcie_adjust_resource(device_t dev, device_t child,
578 struct resource *res, rman_res_t start, rman_res_t end)
579 {
580 struct generic_pcie_core_softc *sc;
581
582 sc = device_get_softc(dev);
583 switch (rman_get_type(res)) {
584 case PCI_RES_BUS:
585 return (pci_domain_adjust_bus(sc->ecam, child, res, start,
586 end));
587 case SYS_RES_IOPORT:
588 case SYS_RES_MEMORY:
589 return (bus_generic_rman_adjust_resource(dev, child, res,
590 start, end));
591 default:
592 return (bus_generic_adjust_resource(dev, child, res, start,
593 end));
594 }
595 }
596
597 static int
generic_pcie_map_resource(device_t dev,device_t child,struct resource * r,struct resource_map_request * argsp,struct resource_map * map)598 generic_pcie_map_resource(device_t dev, device_t child, struct resource *r,
599 struct resource_map_request *argsp, struct resource_map *map)
600 {
601 struct resource_map_request args;
602 struct pcie_range *range;
603 rman_res_t length, start;
604 int error, type;
605
606 type = rman_get_type(r);
607 switch (type) {
608 case PCI_RES_BUS:
609 return (EINVAL);
610 case SYS_RES_IOPORT:
611 case SYS_RES_MEMORY:
612 break;
613 default:
614 return (bus_generic_map_resource(dev, child, r, argsp, map));
615 }
616
617 /* Resources must be active to be mapped. */
618 if (!(rman_get_flags(r) & RF_ACTIVE))
619 return (ENXIO);
620
621 resource_init_map_request(&args);
622 error = resource_validate_map_request(r, argsp, &args, &start, &length);
623 if (error)
624 return (error);
625
626 range = generic_pcie_containing_range(dev, type, rman_get_start(r),
627 rman_get_end(r));
628 if (range == NULL || range->res == NULL)
629 return (ENOENT);
630
631 args.offset = start - range->pci_base;
632 args.length = length;
633 return (bus_map_resource(dev, range->res, &args, map));
634 }
635
636 static int
generic_pcie_unmap_resource(device_t dev,device_t child,struct resource * r,struct resource_map * map)637 generic_pcie_unmap_resource(device_t dev, device_t child, struct resource *r,
638 struct resource_map *map)
639 {
640 struct pcie_range *range;
641 int type;
642
643 type = rman_get_type(r);
644 switch (type) {
645 case PCI_RES_BUS:
646 return (EINVAL);
647 case SYS_RES_IOPORT:
648 case SYS_RES_MEMORY:
649 break;
650 default:
651 return (bus_generic_unmap_resource(dev, child, r, map));
652 }
653
654 range = generic_pcie_containing_range(dev, type, rman_get_start(r),
655 rman_get_end(r));
656 if (range == NULL || range->res == NULL)
657 return (ENOENT);
658 return (bus_unmap_resource(dev, range->res, map));
659 }
660
661 static bus_dma_tag_t
generic_pcie_get_dma_tag(device_t dev,device_t child)662 generic_pcie_get_dma_tag(device_t dev, device_t child)
663 {
664 struct generic_pcie_core_softc *sc;
665
666 sc = device_get_softc(dev);
667 return (sc->dmat);
668 }
669
670 static device_method_t generic_pcie_methods[] = {
671 DEVMETHOD(device_attach, pci_host_generic_core_attach),
672 DEVMETHOD(device_detach, pci_host_generic_core_detach),
673
674 DEVMETHOD(bus_get_rman, generic_pcie_get_rman),
675 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar),
676 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar),
677 DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource),
678 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource),
679 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource),
680 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource),
681 DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource),
682 DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource),
683 DEVMETHOD(bus_map_resource, generic_pcie_map_resource),
684 DEVMETHOD(bus_unmap_resource, generic_pcie_unmap_resource),
685 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
686 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
687
688 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag),
689
690 /* pcib interface */
691 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots),
692 DEVMETHOD(pcib_read_config, generic_pcie_read_config),
693 DEVMETHOD(pcib_write_config, generic_pcie_write_config),
694
695 DEVMETHOD_END
696 };
697
698 DEFINE_CLASS_0(pcib, generic_pcie_core_driver,
699 generic_pcie_methods, sizeof(struct generic_pcie_core_softc));
700