1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2008 MARVELL INTERNATIONAL LTD.
5 * Copyright (c) 2010 The FreeBSD Foundation
6 * Copyright (c) 2010-2015 Semihalf
7 * All rights reserved.
8 *
9 * Developed by Semihalf.
10 *
11 * Portions of this software were developed by Semihalf
12 * under sponsorship from the FreeBSD Foundation.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. Neither the name of MARVELL nor the names of contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 */
38
39 /*
40 * Marvell integrated PCI/PCI-Express controller driver.
41 */
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/module.h>
49 #include <sys/mutex.h>
50 #include <sys/queue.h>
51 #include <sys/bus.h>
52 #include <sys/rman.h>
53 #include <sys/endian.h>
54 #include <sys/devmap.h>
55
56 #include <machine/fdt.h>
57 #include <machine/intr.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61
62 #include <dev/fdt/fdt_common.h>
63 #include <dev/ofw/ofw_bus.h>
64 #include <dev/ofw/ofw_bus_subr.h>
65 #include <dev/ofw/ofw_pci.h>
66 #include <dev/pci/pcivar.h>
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcib_private.h>
69
70 #include "ofw_bus_if.h"
71 #include "pcib_if.h"
72
73 #include <machine/resource.h>
74 #include <machine/bus.h>
75
76 #include <arm/mv/mvreg.h>
77 #include <arm/mv/mvvar.h>
78 #include <arm/mv/mvwin.h>
79
80 #ifdef DEBUG
81 #define debugf(fmt, args...) do { printf(fmt,##args); } while (0)
82 #else
83 #define debugf(fmt, args...)
84 #endif
85
86 /*
87 * Code and data related to fdt-based PCI configuration.
88 *
89 * This stuff used to be in dev/fdt/fdt_pci.c and fdt_common.h, but it was
90 * always Marvell-specific so that was deleted and the code now lives here.
91 */
92
93 struct mv_pci_range {
94 u_long base_pci;
95 u_long base_parent;
96 u_long len;
97 };
98
99 #define FDT_RANGES_CELLS ((3 + 3 + 2) * 2)
100 #define PCI_SPACE_LEN 0x00400000
101
102 static void
mv_pci_range_dump(struct mv_pci_range * range)103 mv_pci_range_dump(struct mv_pci_range *range)
104 {
105 #ifdef DEBUG
106 printf("\n");
107 printf(" base_pci = 0x%08lx\n", range->base_pci);
108 printf(" base_par = 0x%08lx\n", range->base_parent);
109 printf(" len = 0x%08lx\n", range->len);
110 #endif
111 }
112
113 static int
mv_pci_ranges_decode(phandle_t node,struct mv_pci_range * io_space,struct mv_pci_range * mem_space)114 mv_pci_ranges_decode(phandle_t node, struct mv_pci_range *io_space,
115 struct mv_pci_range *mem_space)
116 {
117 pcell_t ranges[FDT_RANGES_CELLS];
118 struct mv_pci_range *pci_space;
119 pcell_t addr_cells, size_cells, par_addr_cells;
120 pcell_t *rangesptr;
121 pcell_t cell0, cell2;
122 int tuple_size, tuples, i, rv, offset_cells, len;
123 int portid, is_io_space;
124
125 /*
126 * Retrieve 'ranges' property.
127 */
128 if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0)
129 return (EINVAL);
130 if (addr_cells != 3 || size_cells != 2)
131 return (ERANGE);
132
133 par_addr_cells = fdt_parent_addr_cells(node);
134 if (par_addr_cells > 3)
135 return (ERANGE);
136
137 len = OF_getproplen(node, "ranges");
138 if (len > sizeof(ranges))
139 return (ENOMEM);
140
141 if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0)
142 return (EINVAL);
143
144 tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells +
145 size_cells);
146 tuples = len / tuple_size;
147
148 /*
149 * Initialize the ranges so that we don't have to worry about
150 * having them all defined in the FDT. In particular, it is
151 * perfectly fine not to want I/O space on PCI buses.
152 */
153 bzero(io_space, sizeof(*io_space));
154 bzero(mem_space, sizeof(*mem_space));
155
156 rangesptr = &ranges[0];
157 offset_cells = 0;
158 for (i = 0; i < tuples; i++) {
159 cell0 = fdt_data_get((void *)rangesptr, 1);
160 rangesptr++;
161 /* cell1 */
162 rangesptr++;
163 cell2 = fdt_data_get((void *)rangesptr, 1);
164 rangesptr++;
165 portid = fdt_data_get((void *)(rangesptr+1), 1);
166
167 if (cell0 & 0x02000000) {
168 pci_space = mem_space;
169 is_io_space = 0;
170 } else if (cell0 & 0x01000000) {
171 pci_space = io_space;
172 is_io_space = 1;
173 } else {
174 rv = ERANGE;
175 goto out;
176 }
177
178 if (par_addr_cells == 3) {
179 /*
180 * This is a PCI subnode 'ranges'. Skip cell0 and
181 * cell1 of this entry and only use cell2.
182 */
183 offset_cells = 2;
184 rangesptr += offset_cells;
185 }
186
187 if ((par_addr_cells - offset_cells) > 2) {
188 rv = ERANGE;
189 goto out;
190 }
191 pci_space->base_parent = fdt_data_get((void *)rangesptr,
192 par_addr_cells - offset_cells);
193 rangesptr += par_addr_cells - offset_cells;
194
195 if (size_cells > 2) {
196 rv = ERANGE;
197 goto out;
198 }
199 pci_space->len = fdt_data_get((void *)rangesptr, size_cells);
200 rangesptr += size_cells;
201
202 pci_space->base_pci = cell2;
203
204 if (pci_space->len == 0) {
205 pci_space->len = PCI_SPACE_LEN;
206 pci_space->base_parent = fdt_immr_va +
207 PCI_SPACE_LEN * ( 2 * portid + is_io_space);
208 }
209 }
210 rv = 0;
211 out:
212 return (rv);
213 }
214
215 static int
mv_pci_ranges(phandle_t node,struct mv_pci_range * io_space,struct mv_pci_range * mem_space)216 mv_pci_ranges(phandle_t node, struct mv_pci_range *io_space,
217 struct mv_pci_range *mem_space)
218 {
219 int err;
220
221 debugf("Processing PCI node: %x\n", node);
222 if ((err = mv_pci_ranges_decode(node, io_space, mem_space)) != 0) {
223 debugf("could not decode parent PCI node 'ranges'\n");
224 return (err);
225 }
226
227 debugf("Post fixup dump:\n");
228 mv_pci_range_dump(io_space);
229 mv_pci_range_dump(mem_space);
230 return (0);
231 }
232
233 int
mv_pci_devmap(phandle_t node,struct devmap_entry * devmap,vm_offset_t io_va,vm_offset_t mem_va)234 mv_pci_devmap(phandle_t node, struct devmap_entry *devmap, vm_offset_t io_va,
235 vm_offset_t mem_va)
236 {
237 struct mv_pci_range io_space, mem_space;
238 int error;
239
240 if ((error = mv_pci_ranges_decode(node, &io_space, &mem_space)) != 0)
241 return (error);
242
243 devmap->pd_va = (io_va ? io_va : io_space.base_parent);
244 devmap->pd_pa = io_space.base_parent;
245 devmap->pd_size = io_space.len;
246 devmap++;
247
248 devmap->pd_va = (mem_va ? mem_va : mem_space.base_parent);
249 devmap->pd_pa = mem_space.base_parent;
250 devmap->pd_size = mem_space.len;
251 return (0);
252 }
253
254 /*
255 * Code and data related to the Marvell pcib driver.
256 */
257
258 #define PCI_CFG_ENA (1U << 31)
259 #define PCI_CFG_BUS(bus) (((bus) & 0xff) << 16)
260 #define PCI_CFG_DEV(dev) (((dev) & 0x1f) << 11)
261 #define PCI_CFG_FUN(fun) (((fun) & 0x7) << 8)
262 #define PCI_CFG_PCIE_REG(reg) ((reg) & 0xfc)
263
264 #define PCI_REG_CFG_ADDR 0x0C78
265 #define PCI_REG_CFG_DATA 0x0C7C
266
267 #define PCIE_REG_CFG_ADDR 0x18F8
268 #define PCIE_REG_CFG_DATA 0x18FC
269 #define PCIE_REG_CONTROL 0x1A00
270 #define PCIE_CTRL_LINK1X 0x00000001
271 #define PCIE_REG_STATUS 0x1A04
272 #define PCIE_REG_IRQ_MASK 0x1910
273
274 #define PCIE_CONTROL_ROOT_CMPLX (1 << 1)
275 #define PCIE_CONTROL_HOT_RESET (1 << 24)
276
277 #define PCIE_LINK_TIMEOUT 1000000
278
279 #define PCIE_STATUS_LINK_DOWN 1
280 #define PCIE_STATUS_DEV_OFFS 16
281
282 /* Minimum PCI Memory and I/O allocations taken from PCI spec (in bytes) */
283 #define PCI_MIN_IO_ALLOC 4
284 #define PCI_MIN_MEM_ALLOC 16
285
286 #define BITS_PER_UINT32 (NBBY * sizeof(uint32_t))
287
288 struct mv_pcib_softc {
289 device_t sc_dev;
290
291 struct rman sc_mem_rman;
292 bus_addr_t sc_mem_base;
293 bus_addr_t sc_mem_size;
294 uint32_t sc_mem_map[MV_PCI_MEM_SLICE_SIZE /
295 (PCI_MIN_MEM_ALLOC * BITS_PER_UINT32)];
296 int sc_win_target;
297 int sc_mem_win_attr;
298
299 struct rman sc_io_rman;
300 bus_addr_t sc_io_base;
301 bus_addr_t sc_io_size;
302 uint32_t sc_io_map[MV_PCI_IO_SLICE_SIZE /
303 (PCI_MIN_IO_ALLOC * BITS_PER_UINT32)];
304 int sc_io_win_attr;
305
306 struct resource *sc_res;
307 bus_space_handle_t sc_bsh;
308 bus_space_tag_t sc_bst;
309 int sc_rid;
310
311 struct mtx sc_msi_mtx;
312 uint32_t sc_msi_bitmap;
313
314 int sc_busnr; /* Host bridge bus number */
315 int sc_devnr; /* Host bridge device number */
316 int sc_type;
317 int sc_mode; /* Endpoint / Root Complex */
318
319 int sc_msi_supported;
320 int sc_skip_enable_procedure;
321 int sc_enable_find_root_slot;
322 struct ofw_bus_iinfo sc_pci_iinfo;
323
324 int ap_segment; /* PCI domain */
325 };
326
327 /* Local forward prototypes */
328 static int mv_pcib_decode_win(phandle_t, struct mv_pcib_softc *);
329 static void mv_pcib_hw_cfginit(void);
330 static uint32_t mv_pcib_hw_cfgread(struct mv_pcib_softc *, u_int, u_int,
331 u_int, u_int, int);
332 static void mv_pcib_hw_cfgwrite(struct mv_pcib_softc *, u_int, u_int,
333 u_int, u_int, uint32_t, int);
334 static int mv_pcib_init(struct mv_pcib_softc *, int, int);
335 static int mv_pcib_init_all_bars(struct mv_pcib_softc *, int, int, int, int);
336 static void mv_pcib_init_bridge(struct mv_pcib_softc *, int, int, int);
337 static inline void pcib_write_irq_mask(struct mv_pcib_softc *, uint32_t);
338 static void mv_pcib_enable(struct mv_pcib_softc *, uint32_t);
339 static int mv_pcib_mem_init(struct mv_pcib_softc *);
340
341 /* Forward prototypes */
342 static int mv_pcib_probe(device_t);
343 static int mv_pcib_attach(device_t);
344
345 static struct rman *mv_pcib_get_rman(device_t, int, u_int);
346 static struct resource *mv_pcib_alloc_resource(device_t, device_t, int, int *,
347 rman_res_t, rman_res_t, rman_res_t, u_int);
348 static int mv_pcib_adjust_resource(device_t, device_t, struct resource *,
349 rman_res_t, rman_res_t);
350 static int mv_pcib_release_resource(device_t, device_t, struct resource *);
351 static int mv_pcib_activate_resource(device_t, device_t, struct resource *);
352 static int mv_pcib_deactivate_resource(device_t, device_t, struct resource *);
353 static int mv_pcib_map_resource(device_t, device_t, struct resource *,
354 struct resource_map_request *, struct resource_map *);
355 static int mv_pcib_unmap_resource(device_t, device_t, struct resource *,
356 struct resource_map *);
357 static int mv_pcib_read_ivar(device_t, device_t, int, uintptr_t *);
358 static int mv_pcib_write_ivar(device_t, device_t, int, uintptr_t);
359
360 static int mv_pcib_maxslots(device_t);
361 static uint32_t mv_pcib_read_config(device_t, u_int, u_int, u_int, u_int, int);
362 static void mv_pcib_write_config(device_t, u_int, u_int, u_int, u_int,
363 uint32_t, int);
364 static int mv_pcib_route_interrupt(device_t, device_t, int);
365
366 static int mv_pcib_alloc_msi(device_t, device_t, int, int, int *);
367 static int mv_pcib_map_msi(device_t, device_t, int, uint64_t *, uint32_t *);
368 static int mv_pcib_release_msi(device_t, device_t, int, int *);
369
370 /*
371 * Bus interface definitions.
372 */
373 static device_method_t mv_pcib_methods[] = {
374 /* Device interface */
375 DEVMETHOD(device_probe, mv_pcib_probe),
376 DEVMETHOD(device_attach, mv_pcib_attach),
377
378 /* Bus interface */
379 DEVMETHOD(bus_read_ivar, mv_pcib_read_ivar),
380 DEVMETHOD(bus_write_ivar, mv_pcib_write_ivar),
381 DEVMETHOD(bus_get_rman, mv_pcib_get_rman),
382 DEVMETHOD(bus_alloc_resource, mv_pcib_alloc_resource),
383 DEVMETHOD(bus_adjust_resource, mv_pcib_adjust_resource),
384 DEVMETHOD(bus_release_resource, mv_pcib_release_resource),
385 DEVMETHOD(bus_activate_resource, mv_pcib_activate_resource),
386 DEVMETHOD(bus_deactivate_resource, mv_pcib_deactivate_resource),
387 DEVMETHOD(bus_map_resource, mv_pcib_map_resource),
388 DEVMETHOD(bus_unmap_resource, mv_pcib_unmap_resource),
389 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
390 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
391
392 /* pcib interface */
393 DEVMETHOD(pcib_maxslots, mv_pcib_maxslots),
394 DEVMETHOD(pcib_read_config, mv_pcib_read_config),
395 DEVMETHOD(pcib_write_config, mv_pcib_write_config),
396 DEVMETHOD(pcib_route_interrupt, mv_pcib_route_interrupt),
397 DEVMETHOD(pcib_request_feature, pcib_request_feature_allow),
398
399 DEVMETHOD(pcib_alloc_msi, mv_pcib_alloc_msi),
400 DEVMETHOD(pcib_release_msi, mv_pcib_release_msi),
401 DEVMETHOD(pcib_map_msi, mv_pcib_map_msi),
402
403 /* OFW bus interface */
404 DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat),
405 DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model),
406 DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name),
407 DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node),
408 DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type),
409
410 DEVMETHOD_END
411 };
412
413 static driver_t mv_pcib_driver = {
414 "pcib",
415 mv_pcib_methods,
416 sizeof(struct mv_pcib_softc),
417 };
418
419 DRIVER_MODULE(mv_pcib, ofwbus, mv_pcib_driver, 0, 0);
420 DRIVER_MODULE(mv_pcib, pcib_ctrl, mv_pcib_driver, 0, 0);
421
422 static struct mtx pcicfg_mtx;
423
424 static int
mv_pcib_probe(device_t self)425 mv_pcib_probe(device_t self)
426 {
427 phandle_t node;
428
429 node = ofw_bus_get_node(self);
430 if (!mv_fdt_is_type(node, "pci"))
431 return (ENXIO);
432
433 if (!(ofw_bus_is_compatible(self, "mrvl,pcie") ||
434 ofw_bus_is_compatible(self, "mrvl,pci") ||
435 ofw_bus_node_is_compatible(
436 OF_parent(node), "marvell,armada-370-pcie")))
437 return (ENXIO);
438
439 if (!ofw_bus_status_okay(self))
440 return (ENXIO);
441
442 device_set_desc(self, "Marvell Integrated PCI/PCI-E Controller");
443 return (BUS_PROBE_DEFAULT);
444 }
445
446 static int
mv_pcib_attach(device_t self)447 mv_pcib_attach(device_t self)
448 {
449 struct mv_pcib_softc *sc;
450 phandle_t node, parnode;
451 uint32_t val, reg0;
452 int err, bus, devfn, port_id;
453
454 sc = device_get_softc(self);
455 sc->sc_dev = self;
456
457 node = ofw_bus_get_node(self);
458 parnode = OF_parent(node);
459
460 if (OF_getencprop(node, "marvell,pcie-port", &(port_id),
461 sizeof(port_id)) <= 0) {
462 /* If port ID does not exist in the FDT set value to 0 */
463 if (!OF_hasprop(node, "marvell,pcie-port"))
464 port_id = 0;
465 else
466 return(ENXIO);
467 }
468
469 sc->ap_segment = port_id;
470
471 if (ofw_bus_node_is_compatible(node, "mrvl,pcie")) {
472 sc->sc_type = MV_TYPE_PCIE;
473 sc->sc_win_target = MV_WIN_PCIE_TARGET(port_id);
474 sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR(port_id);
475 sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR(port_id);
476 sc->sc_skip_enable_procedure = 1;
477 } else if (ofw_bus_node_is_compatible(parnode, "marvell,armada-370-pcie")) {
478 sc->sc_type = MV_TYPE_PCIE;
479 sc->sc_win_target = MV_WIN_PCIE_TARGET_ARMADA38X(port_id);
480 sc->sc_mem_win_attr = MV_WIN_PCIE_MEM_ATTR_ARMADA38X(port_id);
481 sc->sc_io_win_attr = MV_WIN_PCIE_IO_ATTR_ARMADA38X(port_id);
482 sc->sc_enable_find_root_slot = 1;
483 } else if (ofw_bus_node_is_compatible(node, "mrvl,pci")) {
484 sc->sc_type = MV_TYPE_PCI;
485 sc->sc_win_target = MV_WIN_PCI_TARGET;
486 sc->sc_mem_win_attr = MV_WIN_PCI_MEM_ATTR;
487 sc->sc_io_win_attr = MV_WIN_PCI_IO_ATTR;
488 } else
489 return (ENXIO);
490
491 /*
492 * Retrieve our mem-mapped registers range.
493 */
494 sc->sc_rid = 0;
495 sc->sc_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &sc->sc_rid,
496 RF_ACTIVE);
497 if (sc->sc_res == NULL) {
498 device_printf(self, "could not map memory\n");
499 return (ENXIO);
500 }
501 sc->sc_bst = rman_get_bustag(sc->sc_res);
502 sc->sc_bsh = rman_get_bushandle(sc->sc_res);
503
504 val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_CONTROL);
505 sc->sc_mode = (val & PCIE_CONTROL_ROOT_CMPLX ? MV_MODE_ROOT :
506 MV_MODE_ENDPOINT);
507
508 /*
509 * Get PCI interrupt info.
510 */
511 if (sc->sc_mode == MV_MODE_ROOT)
512 ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(pcell_t));
513
514 /*
515 * Configure decode windows for PCI(E) access.
516 */
517 if (mv_pcib_decode_win(node, sc) != 0)
518 return (ENXIO);
519
520 mv_pcib_hw_cfginit();
521
522 /*
523 * Enable PCIE device.
524 */
525 mv_pcib_enable(sc, port_id);
526
527 /*
528 * Memory management.
529 */
530 err = mv_pcib_mem_init(sc);
531 if (err)
532 return (err);
533
534 /*
535 * Preliminary bus enumeration to find first linked devices and set
536 * appropriate bus number from which should start the actual enumeration
537 */
538 for (bus = 0; bus < PCI_BUSMAX; bus++) {
539 for (devfn = 0; devfn < mv_pcib_maxslots(self); devfn++) {
540 reg0 = mv_pcib_read_config(self, bus, devfn, devfn & 0x7, 0x0, 4);
541 if (reg0 == (~0U))
542 continue; /* no device */
543 else {
544 sc->sc_busnr = bus; /* update bus number */
545 break;
546 }
547 }
548 }
549
550 if (sc->sc_mode == MV_MODE_ROOT) {
551 err = mv_pcib_init(sc, sc->sc_busnr,
552 mv_pcib_maxslots(sc->sc_dev));
553 if (err)
554 goto error;
555
556 device_add_child(self, "pci", DEVICE_UNIT_ANY);
557 } else {
558 sc->sc_devnr = 1;
559 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
560 PCIE_REG_STATUS, 1 << PCIE_STATUS_DEV_OFFS);
561 device_add_child(self, "pci_ep", DEVICE_UNIT_ANY);
562 }
563
564 mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF);
565 bus_attach_children(self);
566 return (0);
567
568 error:
569 /* XXX SYS_RES_ should be released here */
570 rman_fini(&sc->sc_mem_rman);
571 rman_fini(&sc->sc_io_rman);
572
573 return (err);
574 }
575
576 static void
mv_pcib_enable(struct mv_pcib_softc * sc,uint32_t unit)577 mv_pcib_enable(struct mv_pcib_softc *sc, uint32_t unit)
578 {
579 uint32_t val;
580 int timeout;
581
582 if (sc->sc_skip_enable_procedure)
583 goto pcib_enable_root_mode;
584
585 /*
586 * Check if PCIE device is enabled.
587 */
588 if ((sc->sc_skip_enable_procedure == 0) &&
589 (read_cpu_ctrl(CPU_CONTROL) & CPU_CONTROL_PCIE_DISABLE(unit))) {
590 write_cpu_ctrl(CPU_CONTROL, read_cpu_ctrl(CPU_CONTROL) &
591 ~(CPU_CONTROL_PCIE_DISABLE(unit)));
592
593 timeout = PCIE_LINK_TIMEOUT;
594 val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
595 PCIE_REG_STATUS);
596 while (((val & PCIE_STATUS_LINK_DOWN) == 1) && (timeout > 0)) {
597 DELAY(1000);
598 timeout -= 1000;
599 val = bus_space_read_4(sc->sc_bst, sc->sc_bsh,
600 PCIE_REG_STATUS);
601 }
602 }
603
604 pcib_enable_root_mode:
605 if (sc->sc_mode == MV_MODE_ROOT) {
606 /*
607 * Enable PCI bridge.
608 */
609 val = bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND);
610 val |= PCIM_CMD_SERRESPEN | PCIM_CMD_BUSMASTEREN |
611 PCIM_CMD_MEMEN | PCIM_CMD_PORTEN;
612 bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIR_COMMAND, val);
613 }
614 }
615
616 static int
mv_pcib_mem_init(struct mv_pcib_softc * sc)617 mv_pcib_mem_init(struct mv_pcib_softc *sc)
618 {
619 int err;
620
621 /*
622 * Memory management.
623 */
624 sc->sc_mem_rman.rm_type = RMAN_ARRAY;
625 err = rman_init(&sc->sc_mem_rman);
626 if (err)
627 return (err);
628
629 sc->sc_io_rman.rm_type = RMAN_ARRAY;
630 err = rman_init(&sc->sc_io_rman);
631 if (err) {
632 rman_fini(&sc->sc_mem_rman);
633 return (err);
634 }
635
636 err = rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base,
637 sc->sc_mem_base + sc->sc_mem_size - 1);
638 if (err)
639 goto error;
640
641 err = rman_manage_region(&sc->sc_io_rman, sc->sc_io_base,
642 sc->sc_io_base + sc->sc_io_size - 1);
643 if (err)
644 goto error;
645
646 return (0);
647
648 error:
649 rman_fini(&sc->sc_mem_rman);
650 rman_fini(&sc->sc_io_rman);
651
652 return (err);
653 }
654
655 static inline uint32_t
pcib_bit_get(uint32_t * map,uint32_t bit)656 pcib_bit_get(uint32_t *map, uint32_t bit)
657 {
658 uint32_t n = bit / BITS_PER_UINT32;
659
660 bit = bit % BITS_PER_UINT32;
661 return (map[n] & (1 << bit));
662 }
663
664 static inline void
pcib_bit_set(uint32_t * map,uint32_t bit)665 pcib_bit_set(uint32_t *map, uint32_t bit)
666 {
667 uint32_t n = bit / BITS_PER_UINT32;
668
669 bit = bit % BITS_PER_UINT32;
670 map[n] |= (1 << bit);
671 }
672
673 static inline uint32_t
pcib_map_check(uint32_t * map,uint32_t start,uint32_t bits)674 pcib_map_check(uint32_t *map, uint32_t start, uint32_t bits)
675 {
676 uint32_t i;
677
678 for (i = start; i < start + bits; i++)
679 if (pcib_bit_get(map, i))
680 return (0);
681
682 return (1);
683 }
684
685 static inline void
pcib_map_set(uint32_t * map,uint32_t start,uint32_t bits)686 pcib_map_set(uint32_t *map, uint32_t start, uint32_t bits)
687 {
688 uint32_t i;
689
690 for (i = start; i < start + bits; i++)
691 pcib_bit_set(map, i);
692 }
693
694 /*
695 * The idea of this allocator is taken from ARM No-Cache memory
696 * management code (sys/arm/arm/vm_machdep.c).
697 */
698 static bus_addr_t
pcib_alloc(struct mv_pcib_softc * sc,uint32_t smask)699 pcib_alloc(struct mv_pcib_softc *sc, uint32_t smask)
700 {
701 uint32_t bits, bits_limit, i, *map, min_alloc, size;
702 bus_addr_t addr = 0;
703 bus_addr_t base;
704
705 if (smask & 1) {
706 base = sc->sc_io_base;
707 min_alloc = PCI_MIN_IO_ALLOC;
708 bits_limit = sc->sc_io_size / min_alloc;
709 map = sc->sc_io_map;
710 smask &= ~0x3;
711 } else {
712 base = sc->sc_mem_base;
713 min_alloc = PCI_MIN_MEM_ALLOC;
714 bits_limit = sc->sc_mem_size / min_alloc;
715 map = sc->sc_mem_map;
716 smask &= ~0xF;
717 }
718
719 size = ~smask + 1;
720 bits = size / min_alloc;
721
722 for (i = 0; i + bits <= bits_limit; i += bits)
723 if (pcib_map_check(map, i, bits)) {
724 pcib_map_set(map, i, bits);
725 addr = base + (i * min_alloc);
726 return (addr);
727 }
728
729 return (addr);
730 }
731
732 static int
mv_pcib_init_bar(struct mv_pcib_softc * sc,int bus,int slot,int func,int barno)733 mv_pcib_init_bar(struct mv_pcib_softc *sc, int bus, int slot, int func,
734 int barno)
735 {
736 uint32_t addr, bar;
737 int reg, width;
738
739 reg = PCIR_BAR(barno);
740
741 /*
742 * Need to init the BAR register with 0xffffffff before correct
743 * value can be read.
744 */
745 mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, ~0, 4);
746 bar = mv_pcib_read_config(sc->sc_dev, bus, slot, func, reg, 4);
747 if (bar == 0)
748 return (1);
749
750 /* Calculate BAR size: 64 or 32 bit (in 32-bit units) */
751 width = ((bar & 7) == 4) ? 2 : 1;
752
753 addr = pcib_alloc(sc, bar);
754 if (!addr)
755 return (-1);
756
757 if (bootverbose)
758 printf("PCI %u:%u:%u: reg %x: smask=%08x: addr=%08x\n",
759 bus, slot, func, reg, bar, addr);
760
761 mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg, addr, 4);
762 if (width == 2)
763 mv_pcib_write_config(sc->sc_dev, bus, slot, func, reg + 4,
764 0, 4);
765
766 return (width);
767 }
768
769 static void
mv_pcib_init_bridge(struct mv_pcib_softc * sc,int bus,int slot,int func)770 mv_pcib_init_bridge(struct mv_pcib_softc *sc, int bus, int slot, int func)
771 {
772 bus_addr_t io_base, mem_base;
773 uint32_t io_limit, mem_limit;
774 int secbus;
775
776 io_base = sc->sc_io_base;
777 io_limit = io_base + sc->sc_io_size - 1;
778 mem_base = sc->sc_mem_base;
779 mem_limit = mem_base + sc->sc_mem_size - 1;
780
781 /* Configure I/O decode registers */
782 mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEL_1,
783 io_base >> 8, 1);
784 mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOBASEH_1,
785 io_base >> 16, 2);
786 mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITL_1,
787 io_limit >> 8, 1);
788 mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_IOLIMITH_1,
789 io_limit >> 16, 2);
790
791 /* Configure memory decode registers */
792 mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMBASE_1,
793 mem_base >> 16, 2);
794 mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_MEMLIMIT_1,
795 mem_limit >> 16, 2);
796
797 /* Disable memory prefetch decode */
798 mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEL_1,
799 0x10, 2);
800 mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMBASEH_1,
801 0x0, 4);
802 mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITL_1,
803 0xF, 2);
804 mv_pcib_write_config(sc->sc_dev, bus, slot, func, PCIR_PMLIMITH_1,
805 0x0, 4);
806
807 secbus = mv_pcib_read_config(sc->sc_dev, bus, slot, func,
808 PCIR_SECBUS_1, 1);
809
810 /* Configure buses behind the bridge */
811 mv_pcib_init(sc, secbus, PCI_SLOTMAX);
812 }
813
814 static int
mv_pcib_init(struct mv_pcib_softc * sc,int bus,int maxslot)815 mv_pcib_init(struct mv_pcib_softc *sc, int bus, int maxslot)
816 {
817 int slot, func, maxfunc, error;
818 uint8_t hdrtype, command, class, subclass;
819
820 for (slot = 0; slot <= maxslot; slot++) {
821 maxfunc = 0;
822 for (func = 0; func <= maxfunc; func++) {
823 hdrtype = mv_pcib_read_config(sc->sc_dev, bus, slot,
824 func, PCIR_HDRTYPE, 1);
825
826 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
827 continue;
828
829 if (func == 0 && (hdrtype & PCIM_MFDEV))
830 maxfunc = PCI_FUNCMAX;
831
832 command = mv_pcib_read_config(sc->sc_dev, bus, slot,
833 func, PCIR_COMMAND, 1);
834 command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN);
835 mv_pcib_write_config(sc->sc_dev, bus, slot, func,
836 PCIR_COMMAND, command, 1);
837
838 error = mv_pcib_init_all_bars(sc, bus, slot, func,
839 hdrtype);
840
841 if (error)
842 return (error);
843
844 command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN |
845 PCIM_CMD_PORTEN;
846 mv_pcib_write_config(sc->sc_dev, bus, slot, func,
847 PCIR_COMMAND, command, 1);
848
849 /* Handle PCI-PCI bridges */
850 class = mv_pcib_read_config(sc->sc_dev, bus, slot,
851 func, PCIR_CLASS, 1);
852 subclass = mv_pcib_read_config(sc->sc_dev, bus, slot,
853 func, PCIR_SUBCLASS, 1);
854
855 if (class != PCIC_BRIDGE ||
856 subclass != PCIS_BRIDGE_PCI)
857 continue;
858
859 mv_pcib_init_bridge(sc, bus, slot, func);
860 }
861 }
862
863 /* Enable all ABCD interrupts */
864 pcib_write_irq_mask(sc, (0xF << 24));
865
866 return (0);
867 }
868
869 static int
mv_pcib_init_all_bars(struct mv_pcib_softc * sc,int bus,int slot,int func,int hdrtype)870 mv_pcib_init_all_bars(struct mv_pcib_softc *sc, int bus, int slot,
871 int func, int hdrtype)
872 {
873 int maxbar, bar, i;
874
875 maxbar = (hdrtype & PCIM_HDRTYPE) ? 0 : 6;
876 bar = 0;
877
878 /* Program the base address registers */
879 while (bar < maxbar) {
880 i = mv_pcib_init_bar(sc, bus, slot, func, bar);
881 bar += i;
882 if (i < 0) {
883 device_printf(sc->sc_dev,
884 "PCI IO/Memory space exhausted\n");
885 return (ENOMEM);
886 }
887 }
888
889 return (0);
890 }
891
892 static struct rman *
mv_pcib_get_rman(device_t dev,int type,u_int flags)893 mv_pcib_get_rman(device_t dev, int type, u_int flags)
894 {
895 struct mv_pcib_softc *sc = device_get_softc(dev);
896
897 switch (type) {
898 case SYS_RES_IOPORT:
899 return (&sc->sc_io_rman);
900 case SYS_RES_MEMORY:
901 return (&sc->sc_mem_rman);
902 default:
903 return (NULL);
904 }
905 }
906
907 static struct resource *
mv_pcib_alloc_resource(device_t dev,device_t child,int type,int * rid,rman_res_t start,rman_res_t end,rman_res_t count,u_int flags)908 mv_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
909 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
910 {
911 struct mv_pcib_softc *sc = device_get_softc(dev);
912
913 switch (type) {
914 case SYS_RES_IOPORT:
915 case SYS_RES_MEMORY:
916 break;
917 case PCI_RES_BUS:
918 return (pci_domain_alloc_bus(sc->ap_segment, child, rid, start,
919 end, count, flags));
920 default:
921 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), dev,
922 type, rid, start, end, count, flags));
923 }
924
925 if (RMAN_IS_DEFAULT_RANGE(start, end)) {
926 start = sc->sc_mem_base;
927 end = sc->sc_mem_base + sc->sc_mem_size - 1;
928 count = sc->sc_mem_size;
929 }
930
931 if ((start < sc->sc_mem_base) || (start + count - 1 != end) ||
932 (end > sc->sc_mem_base + sc->sc_mem_size - 1))
933 return (NULL);
934
935 return (bus_generic_rman_alloc_resource(dev, child, type, rid,
936 start, end, count, flags));
937 }
938
939 static int
mv_pcib_adjust_resource(device_t dev,device_t child,struct resource * r,rman_res_t start,rman_res_t end)940 mv_pcib_adjust_resource(device_t dev, device_t child,
941 struct resource *r, rman_res_t start, rman_res_t end)
942 {
943 struct mv_pcib_softc *sc = device_get_softc(dev);
944
945 switch (rman_get_type(r)) {
946 case SYS_RES_IOPORT:
947 case SYS_RES_MEMORY:
948 return (bus_generic_rman_adjust_resource(dev, child, r, start,
949 end));
950 case PCI_RES_BUS:
951 return (pci_domain_adjust_bus(sc->ap_segment, child, r, start,
952 end));
953 default:
954 return (bus_generic_adjust_resource(dev, child, r, start, end));
955 }
956 }
957
958 static int
mv_pcib_release_resource(device_t dev,device_t child,struct resource * res)959 mv_pcib_release_resource(device_t dev, device_t child, struct resource *res)
960 {
961 struct mv_pcib_softc *sc = device_get_softc(dev);
962
963 switch (rman_get_type(res)) {
964 case SYS_RES_IOPORT:
965 case SYS_RES_MEMORY:
966 return (bus_generic_rman_release_resource(dev, child, res));
967 case PCI_RES_BUS:
968 return (pci_domain_release_bus(sc->ap_segment, child, res));
969 default:
970 return (bus_generic_release_resource(dev, child, res));
971 }
972 }
973
974 static int
mv_pcib_activate_resource(device_t dev,device_t child,struct resource * r)975 mv_pcib_activate_resource(device_t dev, device_t child, struct resource *r)
976 {
977 struct mv_pcib_softc *sc = device_get_softc(dev);
978
979 switch (rman_get_type(r)) {
980 case SYS_RES_IOPORT:
981 case SYS_RES_MEMORY:
982 return (bus_generic_rman_activate_resource(dev, child, r));
983 case PCI_RES_BUS:
984 return (pci_domain_activate_bus(sc->ap_segment, child, r));
985 default:
986 return (bus_generic_activate_resource(dev, child, r));
987 }
988 }
989
990 static int
mv_pcib_deactivate_resource(device_t dev,device_t child,struct resource * r)991 mv_pcib_deactivate_resource(device_t dev, device_t child, struct resource *r)
992 {
993 struct mv_pcib_softc *sc = device_get_softc(dev);
994
995 switch (rman_get_type(r)) {
996 case SYS_RES_IOPORT:
997 case SYS_RES_MEMORY:
998 return (bus_generic_rman_deactivate_resource(dev, child, r));
999 case PCI_RES_BUS:
1000 return (pci_domain_deactivate_bus(sc->ap_segment, child, r));
1001 default:
1002 return (bus_generic_deactivate_resource(dev, child, r));
1003 }
1004 }
1005
1006 static int
mv_pcib_map_resource(device_t dev,device_t child,struct resource * r,struct resource_map_request * argsp,struct resource_map * map)1007 mv_pcib_map_resource(device_t dev, device_t child, struct resource *r,
1008 struct resource_map_request *argsp, struct resource_map *map)
1009 {
1010 struct resource_map_request args;
1011 rman_res_t length, start;
1012 int error;
1013
1014 /* Resources must be active to be mapped. */
1015 if (!(rman_get_flags(r) & RF_ACTIVE))
1016 return (ENXIO);
1017
1018 /* Mappings are only supported on I/O and memory resources. */
1019 switch (rman_get_type(r)) {
1020 case SYS_RES_IOPORT:
1021 case SYS_RES_MEMORY:
1022 break;
1023 default:
1024 return (EINVAL);
1025 }
1026
1027 resource_init_map_request(&args);
1028 error = resource_validate_map_request(r, argsp, &args, &start, &length);
1029 if (error)
1030 return (error);
1031
1032 map->r_bustag = fdtbus_bs_tag;
1033 map->r_bushandle = start;
1034 map->r_size = length;
1035 return (0);
1036 }
1037
1038 static int
mv_pcib_unmap_resource(device_t dev,device_t child,struct resource * r,struct resource_map * map)1039 mv_pcib_unmap_resource(device_t dev, device_t child, struct resource *r,
1040 struct resource_map *map)
1041 {
1042 switch (rman_get_type(r)) {
1043 case SYS_RES_IOPORT:
1044 case SYS_RES_MEMORY:
1045 return (0);
1046 default:
1047 return (EINVAL);
1048 }
1049 }
1050
1051 static int
mv_pcib_read_ivar(device_t dev,device_t child,int which,uintptr_t * result)1052 mv_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1053 {
1054 struct mv_pcib_softc *sc = device_get_softc(dev);
1055
1056 switch (which) {
1057 case PCIB_IVAR_BUS:
1058 *result = sc->sc_busnr;
1059 return (0);
1060 case PCIB_IVAR_DOMAIN:
1061 *result = device_get_unit(dev);
1062 return (0);
1063 }
1064
1065 return (ENOENT);
1066 }
1067
1068 static int
mv_pcib_write_ivar(device_t dev,device_t child,int which,uintptr_t value)1069 mv_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
1070 {
1071 struct mv_pcib_softc *sc = device_get_softc(dev);
1072
1073 switch (which) {
1074 case PCIB_IVAR_BUS:
1075 sc->sc_busnr = value;
1076 return (0);
1077 }
1078
1079 return (ENOENT);
1080 }
1081
1082 static inline void
pcib_write_irq_mask(struct mv_pcib_softc * sc,uint32_t mask)1083 pcib_write_irq_mask(struct mv_pcib_softc *sc, uint32_t mask)
1084 {
1085
1086 if (sc->sc_type != MV_TYPE_PCIE)
1087 return;
1088
1089 bus_space_write_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_IRQ_MASK, mask);
1090 }
1091
1092 static void
mv_pcib_hw_cfginit(void)1093 mv_pcib_hw_cfginit(void)
1094 {
1095 static int opened = 0;
1096
1097 if (opened)
1098 return;
1099
1100 mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN);
1101 opened = 1;
1102 }
1103
1104 static uint32_t
mv_pcib_hw_cfgread(struct mv_pcib_softc * sc,u_int bus,u_int slot,u_int func,u_int reg,int bytes)1105 mv_pcib_hw_cfgread(struct mv_pcib_softc *sc, u_int bus, u_int slot,
1106 u_int func, u_int reg, int bytes)
1107 {
1108 uint32_t addr, data, ca, cd;
1109
1110 ca = (sc->sc_type != MV_TYPE_PCI) ?
1111 PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR;
1112 cd = (sc->sc_type != MV_TYPE_PCI) ?
1113 PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA;
1114 addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) |
1115 PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg);
1116
1117 mtx_lock_spin(&pcicfg_mtx);
1118 bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr);
1119
1120 data = ~0;
1121 switch (bytes) {
1122 case 1:
1123 data = bus_space_read_1(sc->sc_bst, sc->sc_bsh,
1124 cd + (reg & 3));
1125 break;
1126 case 2:
1127 data = le16toh(bus_space_read_2(sc->sc_bst, sc->sc_bsh,
1128 cd + (reg & 2)));
1129 break;
1130 case 4:
1131 data = le32toh(bus_space_read_4(sc->sc_bst, sc->sc_bsh,
1132 cd));
1133 break;
1134 }
1135 mtx_unlock_spin(&pcicfg_mtx);
1136 return (data);
1137 }
1138
1139 static void
mv_pcib_hw_cfgwrite(struct mv_pcib_softc * sc,u_int bus,u_int slot,u_int func,u_int reg,uint32_t data,int bytes)1140 mv_pcib_hw_cfgwrite(struct mv_pcib_softc *sc, u_int bus, u_int slot,
1141 u_int func, u_int reg, uint32_t data, int bytes)
1142 {
1143 uint32_t addr, ca, cd;
1144
1145 ca = (sc->sc_type != MV_TYPE_PCI) ?
1146 PCIE_REG_CFG_ADDR : PCI_REG_CFG_ADDR;
1147 cd = (sc->sc_type != MV_TYPE_PCI) ?
1148 PCIE_REG_CFG_DATA : PCI_REG_CFG_DATA;
1149 addr = PCI_CFG_ENA | PCI_CFG_BUS(bus) | PCI_CFG_DEV(slot) |
1150 PCI_CFG_FUN(func) | PCI_CFG_PCIE_REG(reg);
1151
1152 mtx_lock_spin(&pcicfg_mtx);
1153 bus_space_write_4(sc->sc_bst, sc->sc_bsh, ca, addr);
1154
1155 switch (bytes) {
1156 case 1:
1157 bus_space_write_1(sc->sc_bst, sc->sc_bsh,
1158 cd + (reg & 3), data);
1159 break;
1160 case 2:
1161 bus_space_write_2(sc->sc_bst, sc->sc_bsh,
1162 cd + (reg & 2), htole16(data));
1163 break;
1164 case 4:
1165 bus_space_write_4(sc->sc_bst, sc->sc_bsh,
1166 cd, htole32(data));
1167 break;
1168 }
1169 mtx_unlock_spin(&pcicfg_mtx);
1170 }
1171
1172 static int
mv_pcib_maxslots(device_t dev)1173 mv_pcib_maxslots(device_t dev)
1174 {
1175 struct mv_pcib_softc *sc = device_get_softc(dev);
1176
1177 return ((sc->sc_type != MV_TYPE_PCI) ? 1 : PCI_SLOTMAX);
1178 }
1179
1180 static int
mv_pcib_root_slot(device_t dev,u_int bus,u_int slot,u_int func)1181 mv_pcib_root_slot(device_t dev, u_int bus, u_int slot, u_int func)
1182 {
1183 struct mv_pcib_softc *sc = device_get_softc(dev);
1184 uint32_t vendor, device;
1185
1186 /* On platforms other than Armada38x, root link is always at slot 0 */
1187 if (!sc->sc_enable_find_root_slot)
1188 return (slot == 0);
1189
1190 vendor = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_VENDOR,
1191 PCIR_VENDOR_LENGTH);
1192 device = mv_pcib_hw_cfgread(sc, bus, slot, func, PCIR_DEVICE,
1193 PCIR_DEVICE_LENGTH) & MV_DEV_FAMILY_MASK;
1194
1195 return (vendor == PCI_VENDORID_MRVL && device == MV_DEV_ARMADA38X);
1196 }
1197
1198 static uint32_t
mv_pcib_read_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,int bytes)1199 mv_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func,
1200 u_int reg, int bytes)
1201 {
1202 struct mv_pcib_softc *sc = device_get_softc(dev);
1203
1204 /* Return ~0 if link is inactive or trying to read from Root */
1205 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) &
1206 PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func))
1207 return (~0U);
1208
1209 return (mv_pcib_hw_cfgread(sc, bus, slot, func, reg, bytes));
1210 }
1211
1212 static void
mv_pcib_write_config(device_t dev,u_int bus,u_int slot,u_int func,u_int reg,uint32_t val,int bytes)1213 mv_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func,
1214 u_int reg, uint32_t val, int bytes)
1215 {
1216 struct mv_pcib_softc *sc = device_get_softc(dev);
1217
1218 /* Return if link is inactive or trying to write to Root */
1219 if ((bus_space_read_4(sc->sc_bst, sc->sc_bsh, PCIE_REG_STATUS) &
1220 PCIE_STATUS_LINK_DOWN) || mv_pcib_root_slot(dev, bus, slot, func))
1221 return;
1222
1223 mv_pcib_hw_cfgwrite(sc, bus, slot, func, reg, val, bytes);
1224 }
1225
1226 static int
mv_pcib_route_interrupt(device_t bus,device_t dev,int pin)1227 mv_pcib_route_interrupt(device_t bus, device_t dev, int pin)
1228 {
1229 struct mv_pcib_softc *sc;
1230 struct ofw_pci_register reg;
1231 uint32_t pintr, mintr[4];
1232 int icells;
1233 phandle_t iparent;
1234
1235 sc = device_get_softc(bus);
1236 pintr = pin;
1237
1238 /* Fabricate imap information in case this isn't an OFW device */
1239 bzero(®, sizeof(reg));
1240 reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) |
1241 (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) |
1242 (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT);
1243
1244 icells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo,
1245 ®, sizeof(reg), &pintr, sizeof(pintr), mintr, sizeof(mintr),
1246 &iparent);
1247 if (icells > 0)
1248 return (ofw_bus_map_intr(dev, iparent, icells, mintr));
1249
1250 /* Maybe it's a real interrupt, not an intpin */
1251 if (pin > 4)
1252 return (pin);
1253
1254 device_printf(bus, "could not route pin %d for device %d.%d\n",
1255 pin, pci_get_slot(dev), pci_get_function(dev));
1256 return (PCI_INVALID_IRQ);
1257 }
1258
1259 static int
mv_pcib_decode_win(phandle_t node,struct mv_pcib_softc * sc)1260 mv_pcib_decode_win(phandle_t node, struct mv_pcib_softc *sc)
1261 {
1262 struct mv_pci_range io_space, mem_space;
1263 device_t dev;
1264 int error;
1265
1266 dev = sc->sc_dev;
1267
1268 if ((error = mv_pci_ranges(node, &io_space, &mem_space)) != 0) {
1269 device_printf(dev, "could not retrieve 'ranges' data\n");
1270 return (error);
1271 }
1272
1273 /* Configure CPU decoding windows */
1274 error = decode_win_cpu_set(sc->sc_win_target,
1275 sc->sc_io_win_attr, io_space.base_parent, io_space.len, ~0);
1276 if (error < 0) {
1277 device_printf(dev, "could not set up CPU decode "
1278 "window for PCI IO\n");
1279 return (ENXIO);
1280 }
1281 error = decode_win_cpu_set(sc->sc_win_target,
1282 sc->sc_mem_win_attr, mem_space.base_parent, mem_space.len,
1283 mem_space.base_parent);
1284 if (error < 0) {
1285 device_printf(dev, "could not set up CPU decode "
1286 "windows for PCI MEM\n");
1287 return (ENXIO);
1288 }
1289
1290 sc->sc_io_base = io_space.base_parent;
1291 sc->sc_io_size = io_space.len;
1292
1293 sc->sc_mem_base = mem_space.base_parent;
1294 sc->sc_mem_size = mem_space.len;
1295
1296 return (0);
1297 }
1298
1299 static int
mv_pcib_map_msi(device_t dev,device_t child,int irq,uint64_t * addr,uint32_t * data)1300 mv_pcib_map_msi(device_t dev, device_t child, int irq, uint64_t *addr,
1301 uint32_t *data)
1302 {
1303 struct mv_pcib_softc *sc;
1304
1305 sc = device_get_softc(dev);
1306 if (!sc->sc_msi_supported)
1307 return (ENOTSUP);
1308
1309 irq = irq - MSI_IRQ;
1310
1311 /* validate parameters */
1312 if (isclr(&sc->sc_msi_bitmap, irq)) {
1313 device_printf(dev, "invalid MSI 0x%x\n", irq);
1314 return (EINVAL);
1315 }
1316
1317 mv_msi_data(irq, addr, data);
1318
1319 debugf("%s: irq: %d addr: %jx data: %x\n",
1320 __func__, irq, *addr, *data);
1321
1322 return (0);
1323 }
1324
1325 static int
mv_pcib_alloc_msi(device_t dev,device_t child,int count,int maxcount __unused,int * irqs)1326 mv_pcib_alloc_msi(device_t dev, device_t child, int count,
1327 int maxcount __unused, int *irqs)
1328 {
1329 struct mv_pcib_softc *sc;
1330 u_int start = 0, i;
1331
1332 sc = device_get_softc(dev);
1333 if (!sc->sc_msi_supported)
1334 return (ENOTSUP);
1335
1336 if (powerof2(count) == 0 || count > MSI_IRQ_NUM)
1337 return (EINVAL);
1338
1339 mtx_lock(&sc->sc_msi_mtx);
1340
1341 for (start = 0; (start + count) < MSI_IRQ_NUM; start++) {
1342 for (i = start; i < start + count; i++) {
1343 if (isset(&sc->sc_msi_bitmap, i))
1344 break;
1345 }
1346 if (i == start + count)
1347 break;
1348 }
1349
1350 if ((start + count) == MSI_IRQ_NUM) {
1351 mtx_unlock(&sc->sc_msi_mtx);
1352 return (ENXIO);
1353 }
1354
1355 for (i = start; i < start + count; i++) {
1356 setbit(&sc->sc_msi_bitmap, i);
1357 *irqs++ = MSI_IRQ + i;
1358 }
1359 debugf("%s: start: %x count: %x\n", __func__, start, count);
1360
1361 mtx_unlock(&sc->sc_msi_mtx);
1362 return (0);
1363 }
1364
1365 static int
mv_pcib_release_msi(device_t dev,device_t child,int count,int * irqs)1366 mv_pcib_release_msi(device_t dev, device_t child, int count, int *irqs)
1367 {
1368 struct mv_pcib_softc *sc;
1369 u_int i;
1370
1371 sc = device_get_softc(dev);
1372 if(!sc->sc_msi_supported)
1373 return (ENOTSUP);
1374
1375 mtx_lock(&sc->sc_msi_mtx);
1376
1377 for (i = 0; i < count; i++)
1378 clrbit(&sc->sc_msi_bitmap, irqs[i] - MSI_IRQ);
1379
1380 mtx_unlock(&sc->sc_msi_mtx);
1381 return (0);
1382 }
1383