1 /*- 2 * Copyright (c) 2012 Semihalf. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include "opt_platform.h" 28 #include <sys/cdefs.h> 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/bus.h> 33 #include <sys/proc.h> 34 #include <sys/pcpu.h> 35 #include <sys/rman.h> 36 #include <sys/sched.h> 37 38 #include <vm/vm.h> 39 #include <vm/pmap.h> 40 41 #include <machine/resource.h> 42 #include <machine/tlb.h> 43 44 #include <contrib/ncsw/inc/error_ext.h> 45 #include <contrib/ncsw/inc/xx_ext.h> 46 47 #include "portals.h" 48 49 50 int 51 dpaa_portal_alloc_res(device_t dev, struct dpaa_portals_devinfo *di, int cpu) 52 { 53 struct dpaa_portals_softc *sc = device_get_softc(dev); 54 struct resource_list_entry *rle; 55 int err; 56 struct resource_list *res; 57 58 /* Check if MallocSmart allocator is ready */ 59 if (XX_MallocSmartInit() != E_OK) 60 return (ENXIO); 61 62 res = &di->di_res; 63 64 /* 65 * Allocate memory. 66 * Reserve only one pair of CE/CI virtual memory regions 67 * for all CPUs, in order to save the space. 68 */ 69 if (sc->sc_rres[0] == NULL) { 70 /* Cache enabled area */ 71 rle = resource_list_find(res, SYS_RES_MEMORY, 0); 72 sc->sc_rrid[0] = 0; 73 sc->sc_rres[0] = bus_alloc_resource(dev, 74 SYS_RES_MEMORY, &sc->sc_rrid[0], rle->start + sc->sc_dp_pa, 75 rle->end + sc->sc_dp_pa, rle->count, RF_ACTIVE); 76 if (sc->sc_rres[0] == NULL) { 77 device_printf(dev, 78 "Could not allocate cache enabled memory.\n"); 79 return (ENXIO); 80 } 81 tlb1_set_entry(rman_get_bushandle(sc->sc_rres[0]), 82 rle->start + sc->sc_dp_pa, rle->count, _TLB_ENTRY_MEM); 83 /* Cache inhibited area */ 84 rle = resource_list_find(res, SYS_RES_MEMORY, 1); 85 sc->sc_rrid[1] = 1; 86 sc->sc_rres[1] = bus_alloc_resource(dev, 87 SYS_RES_MEMORY, &sc->sc_rrid[1], rle->start + sc->sc_dp_pa, 88 rle->end + sc->sc_dp_pa, rle->count, RF_ACTIVE); 89 if (sc->sc_rres[1] == NULL) { 90 device_printf(dev, 91 "Could not allocate cache inhibited memory.\n"); 92 bus_release_resource(dev, SYS_RES_MEMORY, 93 sc->sc_rrid[0], sc->sc_rres[0]); 94 return (ENXIO); 95 } 96 tlb1_set_entry(rman_get_bushandle(sc->sc_rres[1]), 97 rle->start + sc->sc_dp_pa, rle->count, _TLB_ENTRY_IO); 98 sc->sc_dp[cpu].dp_regs_mapped = 1; 99 } 100 /* Acquire portal's CE_PA and CI_PA */ 101 rle = resource_list_find(res, SYS_RES_MEMORY, 0); 102 sc->sc_dp[cpu].dp_ce_pa = rle->start + sc->sc_dp_pa; 103 sc->sc_dp[cpu].dp_ce_size = rle->count; 104 rle = resource_list_find(res, SYS_RES_MEMORY, 1); 105 sc->sc_dp[cpu].dp_ci_pa = rle->start + sc->sc_dp_pa; 106 sc->sc_dp[cpu].dp_ci_size = rle->count; 107 108 /* Allocate interrupts */ 109 rle = resource_list_find(res, SYS_RES_IRQ, 0); 110 sc->sc_dp[cpu].dp_irid = 0; 111 sc->sc_dp[cpu].dp_ires = bus_alloc_resource(dev, 112 SYS_RES_IRQ, &sc->sc_dp[cpu].dp_irid, rle->start, rle->end, 113 rle->count, RF_ACTIVE); 114 /* Save interrupt number for later use */ 115 sc->sc_dp[cpu].dp_intr_num = rle->start; 116 117 if (sc->sc_dp[cpu].dp_ires == NULL) { 118 device_printf(dev, "Could not allocate irq.\n"); 119 return (ENXIO); 120 } 121 err = XX_PreallocAndBindIntr(dev, (uintptr_t)sc->sc_dp[cpu].dp_ires, cpu); 122 123 if (err != E_OK) { 124 device_printf(dev, "Could not prealloc and bind interrupt\n"); 125 bus_release_resource(dev, SYS_RES_IRQ, 126 sc->sc_dp[cpu].dp_irid, sc->sc_dp[cpu].dp_ires); 127 sc->sc_dp[cpu].dp_ires = NULL; 128 return (ENXIO); 129 } 130 131 #if 0 132 err = bus_generic_config_intr(dev, rle->start, di->di_intr_trig, 133 di->di_intr_pol); 134 if (err != 0) { 135 device_printf(dev, "Could not configure interrupt\n"); 136 bus_release_resource(dev, SYS_RES_IRQ, 137 sc->sc_dp[cpu].dp_irid, sc->sc_dp[cpu].dp_ires); 138 sc->sc_dp[cpu].dp_ires = NULL; 139 return (err); 140 } 141 #endif 142 143 return (0); 144 } 145 146 void 147 dpaa_portal_map_registers(struct dpaa_portals_softc *sc) 148 { 149 unsigned int cpu; 150 151 sched_pin(); 152 cpu = PCPU_GET(cpuid); 153 if (sc->sc_dp[cpu].dp_regs_mapped) 154 goto out; 155 156 tlb1_set_entry(rman_get_bushandle(sc->sc_rres[0]), 157 sc->sc_dp[cpu].dp_ce_pa, sc->sc_dp[cpu].dp_ce_size, 158 _TLB_ENTRY_MEM); 159 tlb1_set_entry(rman_get_bushandle(sc->sc_rres[1]), 160 sc->sc_dp[cpu].dp_ci_pa, sc->sc_dp[cpu].dp_ci_size, 161 _TLB_ENTRY_IO); 162 163 sc->sc_dp[cpu].dp_regs_mapped = 1; 164 165 out: 166 sched_unpin(); 167 } 168