1 /*- 2 * Copyright (c) 2014 Ganbold Tsagaankhuu <ganbold@freebsd.org> 3 * Copyright (c) 2016 Emmanuel Vadot <manu@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/bus.h> 31 #include <sys/kernel.h> 32 #include <sys/lock.h> 33 #include <sys/mutex.h> 34 #include <sys/smp.h> 35 36 #include <vm/vm.h> 37 #include <vm/pmap.h> 38 39 #include <machine/cpu.h> 40 #include <machine/smp.h> 41 #include <machine/fdt.h> 42 #include <machine/intr.h> 43 #include <machine/platformvar.h> 44 45 #include <arm/allwinner/aw_mp.h> 46 #include <arm/allwinner/aw_machdep.h> 47 48 /* Register for all dual-core SoC */ 49 #define A20_CPUCFG_BASE 0x01c25c00 50 /* Register for all quad-core SoC */ 51 #define CPUCFG_BASE 0x01f01c00 52 #define CPUCFG_SIZE 0x400 53 #define PRCM_BASE 0x01f01400 54 #define PRCM_SIZE 0x800 55 /* Register for multi-cluster SoC */ 56 #define CPUXCFG_BASE 0x01700000 57 #define CPUXCFG_SIZE 0x400 58 59 #define CPU_OFFSET 0x40 60 #define CPU_OFFSET_CTL 0x04 61 #define CPU_OFFSET_STATUS 0x08 62 #define CPU_RST_CTL(cpuid) ((cpuid + 1) * CPU_OFFSET) 63 #define CPU_CTL(cpuid) (((cpuid + 1) * CPU_OFFSET) + CPU_OFFSET_CTL) 64 #define CPU_STATUS(cpuid) (((cpuid + 1) * CPU_OFFSET) + CPU_OFFSET_STATUS) 65 66 #define CPU_RESET (1 << 0) 67 #define CPU_CORE_RESET (1 << 1) 68 69 #define CPUCFG_GENCTL 0x184 70 #define CPUCFG_P_REG0 0x1a4 71 72 #define A20_CPU1_PWR_CLAMP 0x1b0 73 #define CPU_PWR_CLAMP_REG 0x140 74 #define CPU_PWR_CLAMP(cpu) ((cpu * 4) + CPU_PWR_CLAMP_REG) 75 #define CPU_PWR_CLAMP_STEPS 8 76 77 #define A20_CPU1_PWROFF_REG 0x1b4 78 #define CPU_PWROFF 0x100 79 80 #define CPUCFG_DBGCTL0 0x1e0 81 #define CPUCFG_DBGCTL1 0x1e4 82 83 #define CPUS_CL_RST(cl) (0x30 + (cl) * 0x4) 84 #define CPUX_CL_CTRL0(cl) (0x0 + (cl) * 0x10) 85 #define CPUX_CL_CTRL1(cl) (0x4 + (cl) * 0x10) 86 #define CPUX_CL_CPU_STATUS(cl) (0x30 + (cl) * 0x4) 87 #define CPUX_CL_RST(cl) (0x80 + (cl) * 0x4) 88 #define PRCM_CL_PWROFF(cl) (0x100 + (cl) * 0x4) 89 #define PRCM_CL_PWR_CLAMP(cl, cpu) (0x140 + (cl) * 0x4 + (cpu) * 0x4) 90 91 void 92 aw_mp_setmaxid(platform_t plat) 93 { 94 int ncpu; 95 uint32_t reg; 96 97 if (mp_ncpus != 0) 98 return; 99 100 reg = cp15_l2ctlr_get(); 101 ncpu = CPUV7_L2CTLR_NPROC(reg); 102 103 mp_ncpus = ncpu; 104 mp_maxid = ncpu - 1; 105 } 106 107 void 108 aw_mp_start_ap(platform_t plat) 109 { 110 bus_space_handle_t cpucfg; 111 bus_space_handle_t prcm; 112 int i, j, soc_family; 113 uint32_t val; 114 115 soc_family = allwinner_soc_family(); 116 if (soc_family == ALLWINNERSOC_SUN7I) { 117 if (bus_space_map(fdtbus_bs_tag, A20_CPUCFG_BASE, CPUCFG_SIZE, 118 0, &cpucfg) != 0) 119 panic("Couldn't map the CPUCFG\n"); 120 } else { 121 if (bus_space_map(fdtbus_bs_tag, CPUCFG_BASE, CPUCFG_SIZE, 122 0, &cpucfg) != 0) 123 panic("Couldn't map the CPUCFG\n"); 124 if (bus_space_map(fdtbus_bs_tag, PRCM_BASE, PRCM_SIZE, 0, 125 &prcm) != 0) 126 panic("Couldn't map the PRCM\n"); 127 } 128 129 dcache_wbinv_poc_all(); 130 131 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_P_REG0, 132 pmap_kextract((vm_offset_t)mpentry)); 133 134 /* 135 * Assert nCOREPORESET low and set L1RSTDISABLE low. 136 * Ensure DBGPWRDUP is set to LOW to prevent any external 137 * debug access to the processor. 138 */ 139 for (i = 1; i < mp_ncpus; i++) 140 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU_RST_CTL(i), 0); 141 142 /* Set L1RSTDISABLE low */ 143 val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_GENCTL); 144 for (i = 1; i < mp_ncpus; i++) 145 val &= ~(1 << i); 146 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_GENCTL, val); 147 148 /* Set DBGPWRDUP low */ 149 val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1); 150 for (i = 1; i < mp_ncpus; i++) 151 val &= ~(1 << i); 152 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1, val); 153 154 /* Release power clamp */ 155 for (i = 1; i < mp_ncpus; i++) 156 for (j = 0; j <= CPU_PWR_CLAMP_STEPS; j++) { 157 if (soc_family != ALLWINNERSOC_SUN7I) { 158 bus_space_write_4(fdtbus_bs_tag, prcm, 159 CPU_PWR_CLAMP(i), 0xff >> j); 160 } else { 161 bus_space_write_4(fdtbus_bs_tag, 162 cpucfg, A20_CPU1_PWR_CLAMP, 0xff >> j); 163 } 164 } 165 DELAY(10000); 166 167 /* Clear power-off gating */ 168 if (soc_family != ALLWINNERSOC_SUN7I) { 169 val = bus_space_read_4(fdtbus_bs_tag, prcm, CPU_PWROFF); 170 for (i = 0; i < mp_ncpus; i++) 171 val &= ~(1 << i); 172 bus_space_write_4(fdtbus_bs_tag, prcm, CPU_PWROFF, val); 173 } else { 174 val = bus_space_read_4(fdtbus_bs_tag, 175 cpucfg, A20_CPU1_PWROFF_REG); 176 val &= ~(1 << 0); 177 bus_space_write_4(fdtbus_bs_tag, cpucfg, 178 A20_CPU1_PWROFF_REG, val); 179 } 180 DELAY(1000); 181 182 /* De-assert cpu core reset */ 183 for (i = 1; i < mp_ncpus; i++) 184 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU_RST_CTL(i), 185 CPU_RESET | CPU_CORE_RESET); 186 187 /* Assert DBGPWRDUP signal */ 188 val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1); 189 for (i = 1; i < mp_ncpus; i++) 190 val |= (1 << i); 191 bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1, val); 192 193 dsb(); 194 sev(); 195 bus_space_unmap(fdtbus_bs_tag, cpucfg, CPUCFG_SIZE); 196 if (soc_family != ALLWINNERSOC_SUN7I) 197 bus_space_unmap(fdtbus_bs_tag, prcm, PRCM_SIZE); 198 } 199 200 static void 201 aw_mc_mp_start_cpu(bus_space_handle_t cpuscfg, bus_space_handle_t cpuxcfg, 202 bus_space_handle_t prcm, int cluster, int cpu) 203 { 204 uint32_t val; 205 int i; 206 207 /* Assert core reset */ 208 val = bus_space_read_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster)); 209 val &= ~(1 << cpu); 210 bus_space_write_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster), val); 211 212 /* Assert power-on reset */ 213 val = bus_space_read_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster)); 214 val &= ~(1 << cpu); 215 bus_space_write_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster), val); 216 217 /* Disable automatic L1 cache invalidate at reset */ 218 val = bus_space_read_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_CTRL0(cluster)); 219 val &= ~(1 << cpu); 220 bus_space_write_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_CTRL0(cluster), val); 221 222 /* Release power clamp */ 223 for (i = 0; i <= CPU_PWR_CLAMP_STEPS; i++) 224 bus_space_write_4(fdtbus_bs_tag, prcm, 225 PRCM_CL_PWR_CLAMP(cluster, cpu), 0xff >> i); 226 while (bus_space_read_4(fdtbus_bs_tag, prcm, 227 PRCM_CL_PWR_CLAMP(cluster, cpu)) != 0) 228 ; 229 230 /* Clear power-off gating */ 231 val = bus_space_read_4(fdtbus_bs_tag, prcm, PRCM_CL_PWROFF(cluster)); 232 val &= ~(1 << cpu); 233 bus_space_write_4(fdtbus_bs_tag, prcm, PRCM_CL_PWROFF(cluster), val); 234 235 /* De-assert power-on reset */ 236 val = bus_space_read_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster)); 237 val |= (1 << cpu); 238 bus_space_write_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster), val); 239 240 /* De-assert core reset */ 241 val = bus_space_read_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster)); 242 val |= (1 << cpu); 243 bus_space_write_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster), val); 244 } 245 246 static void 247 aw_mc_mp_start_ap(bus_space_handle_t cpuscfg, bus_space_handle_t cpuxcfg, 248 bus_space_handle_t prcm) 249 { 250 int cluster, cpu; 251 252 KASSERT(mp_ncpus <= 4, ("multiple clusters not yet supported")); 253 254 dcache_wbinv_poc_all(); 255 256 bus_space_write_4(fdtbus_bs_tag, cpuscfg, CPUCFG_P_REG0, 257 pmap_kextract((vm_offset_t)mpentry)); 258 259 cluster = 0; 260 for (cpu = 1; cpu < mp_ncpus; cpu++) 261 aw_mc_mp_start_cpu(cpuscfg, cpuxcfg, prcm, cluster, cpu); 262 } 263 264 void 265 a83t_mp_start_ap(platform_t plat) 266 { 267 bus_space_handle_t cpuscfg, cpuxcfg, prcm; 268 269 if (bus_space_map(fdtbus_bs_tag, CPUCFG_BASE, CPUCFG_SIZE, 270 0, &cpuscfg) != 0) 271 panic("Couldn't map the CPUCFG\n"); 272 if (bus_space_map(fdtbus_bs_tag, CPUXCFG_BASE, CPUXCFG_SIZE, 273 0, &cpuxcfg) != 0) 274 panic("Couldn't map the CPUXCFG\n"); 275 if (bus_space_map(fdtbus_bs_tag, PRCM_BASE, PRCM_SIZE, 0, 276 &prcm) != 0) 277 panic("Couldn't map the PRCM\n"); 278 279 aw_mc_mp_start_ap(cpuscfg, cpuxcfg, prcm); 280 dsb(); 281 sev(); 282 bus_space_unmap(fdtbus_bs_tag, cpuxcfg, CPUXCFG_SIZE); 283 bus_space_unmap(fdtbus_bs_tag, cpuscfg, CPUCFG_SIZE); 284 bus_space_unmap(fdtbus_bs_tag, prcm, PRCM_SIZE); 285 } 286