xref: /freebsd/sys/dev/dpaa/portals_common.c (revision 0957b409a90fd597c1e9124cbaf3edd2b488f4ac)
1 /*-
2  * Copyright (c) 2012 Semihalf.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include "opt_platform.h"
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/bus.h>
35 #include <sys/proc.h>
36 #include <sys/pcpu.h>
37 #include <sys/rman.h>
38 #include <sys/sched.h>
39 
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42 
43 #include <machine/resource.h>
44 #include <machine/tlb.h>
45 
46 #include <contrib/ncsw/inc/error_ext.h>
47 #include <contrib/ncsw/inc/xx_ext.h>
48 
49 #include "portals.h"
50 
51 
52 int
53 dpaa_portal_alloc_res(device_t dev, struct dpaa_portals_devinfo *di, int cpu)
54 {
55 	struct dpaa_portals_softc *sc = device_get_softc(dev);
56 	struct resource_list_entry *rle;
57 	int err;
58 	struct resource_list *res;
59 
60 	/* Check if MallocSmart allocator is ready */
61 	if (XX_MallocSmartInit() != E_OK)
62 		return (ENXIO);
63 
64 	res = &di->di_res;
65 
66 	/*
67 	 * Allocate memory.
68 	 * Reserve only one pair of CE/CI virtual memory regions
69 	 * for all CPUs, in order to save the space.
70 	 */
71 	if (sc->sc_rres[0] == NULL) {
72 		/* Cache enabled area */
73 		rle = resource_list_find(res, SYS_RES_MEMORY, 0);
74 		sc->sc_rrid[0] = 0;
75 		sc->sc_rres[0] = bus_alloc_resource(dev,
76 		    SYS_RES_MEMORY, &sc->sc_rrid[0], rle->start + sc->sc_dp_pa,
77 		    rle->end + sc->sc_dp_pa, rle->count, RF_ACTIVE);
78 		if (sc->sc_rres[0] == NULL) {
79 			device_printf(dev,
80 			    "Could not allocate cache enabled memory.\n");
81 			return (ENXIO);
82 		}
83 		tlb1_set_entry(rman_get_bushandle(sc->sc_rres[0]),
84 		    rle->start + sc->sc_dp_pa, rle->count, _TLB_ENTRY_MEM);
85 		/* Cache inhibited area */
86 		rle = resource_list_find(res, SYS_RES_MEMORY, 1);
87 		sc->sc_rrid[1] = 1;
88 		sc->sc_rres[1] = bus_alloc_resource(dev,
89 		    SYS_RES_MEMORY, &sc->sc_rrid[1], rle->start + sc->sc_dp_pa,
90 		    rle->end + sc->sc_dp_pa, rle->count, RF_ACTIVE);
91 		if (sc->sc_rres[1] == NULL) {
92 			device_printf(dev,
93 			    "Could not allocate cache inhibited memory.\n");
94 			bus_release_resource(dev, SYS_RES_MEMORY,
95 			    sc->sc_rrid[0], sc->sc_rres[0]);
96 			return (ENXIO);
97 		}
98 		tlb1_set_entry(rman_get_bushandle(sc->sc_rres[1]),
99 		    rle->start + sc->sc_dp_pa, rle->count, _TLB_ENTRY_IO);
100 		sc->sc_dp[cpu].dp_regs_mapped = 1;
101 	}
102 	/* Acquire portal's CE_PA and CI_PA */
103 	rle = resource_list_find(res, SYS_RES_MEMORY, 0);
104 	sc->sc_dp[cpu].dp_ce_pa = rle->start + sc->sc_dp_pa;
105 	sc->sc_dp[cpu].dp_ce_size = rle->count;
106 	rle = resource_list_find(res, SYS_RES_MEMORY, 1);
107 	sc->sc_dp[cpu].dp_ci_pa = rle->start + sc->sc_dp_pa;
108 	sc->sc_dp[cpu].dp_ci_size = rle->count;
109 
110 	/* Allocate interrupts */
111 	rle = resource_list_find(res, SYS_RES_IRQ, 0);
112 	sc->sc_dp[cpu].dp_irid = 0;
113 	sc->sc_dp[cpu].dp_ires = bus_alloc_resource(dev,
114 	    SYS_RES_IRQ, &sc->sc_dp[cpu].dp_irid, rle->start, rle->end,
115 	    rle->count, RF_ACTIVE);
116 	/* Save interrupt number for later use */
117 	sc->sc_dp[cpu].dp_intr_num = rle->start;
118 
119 	if (sc->sc_dp[cpu].dp_ires == NULL) {
120 		device_printf(dev, "Could not allocate irq.\n");
121 		return (ENXIO);
122 	}
123 
124 	err = XX_PreallocAndBindIntr((uintptr_t)sc->sc_dp[cpu].dp_ires, cpu);
125 
126 	if (err != E_OK) {
127 		device_printf(dev, "Could not prealloc and bind interrupt\n");
128 		bus_release_resource(dev, SYS_RES_IRQ,
129 		    sc->sc_dp[cpu].dp_irid, sc->sc_dp[cpu].dp_ires);
130 		sc->sc_dp[cpu].dp_ires = NULL;
131 		return (ENXIO);
132 	}
133 
134 #if 0
135 	err = bus_generic_config_intr(dev, rle->start, di->di_intr_trig,
136 	    di->di_intr_pol);
137 	if (err != 0) {
138 		device_printf(dev, "Could not configure interrupt\n");
139 		bus_release_resource(dev, SYS_RES_IRQ,
140 		    sc->sc_dp[cpu].dp_irid, sc->sc_dp[cpu].dp_ires);
141 		sc->sc_dp[cpu].dp_ires = NULL;
142 		return (err);
143 	}
144 #endif
145 
146 	return (0);
147 }
148 
149 void
150 dpaa_portal_map_registers(struct dpaa_portals_softc *sc)
151 {
152 	unsigned int cpu;
153 
154 	sched_pin();
155 	cpu = PCPU_GET(cpuid);
156 	if (sc->sc_dp[cpu].dp_regs_mapped)
157 		goto out;
158 
159 	tlb1_set_entry(rman_get_bushandle(sc->sc_rres[0]),
160 	    sc->sc_dp[cpu].dp_ce_pa, sc->sc_dp[cpu].dp_ce_size,
161 	    _TLB_ENTRY_MEM);
162 	tlb1_set_entry(rman_get_bushandle(sc->sc_rres[1]),
163 	    sc->sc_dp[cpu].dp_ci_pa, sc->sc_dp[cpu].dp_ci_size,
164 	    _TLB_ENTRY_IO);
165 
166 	sc->sc_dp[cpu].dp_regs_mapped = 1;
167 
168 out:
169 	sched_unpin();
170 }
171