1 /* 2 * Copyright 2022 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "priv.h" 23 24 #include <core/memory.h> 25 #include <subdev/mc.h> 26 #include <subdev/timer.h> 27 28 void 29 gm200_flcn_tracepc(struct nvkm_falcon *falcon) 30 { 31 u32 sctl = nvkm_falcon_rd32(falcon, 0x240); 32 u32 tidx = nvkm_falcon_rd32(falcon, 0x148); 33 int nr = (tidx & 0x00ff0000) >> 16, sp, ip; 34 35 FLCN_ERR(falcon, "TRACEPC SCTL %08x TIDX %08x", sctl, tidx); 36 for (sp = 0; sp < nr; sp++) { 37 nvkm_falcon_wr32(falcon, 0x148, sp); 38 ip = nvkm_falcon_rd32(falcon, 0x14c); 39 FLCN_ERR(falcon, "TRACEPC: %08x", ip); 40 } 41 } 42 43 static void 44 gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len) 45 { 46 while (len >= 4) { 47 *(u32 *)img = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8)); 48 img += 4; 49 len -= 4; 50 } 51 52 /* Sigh. Tegra PMU FW's init message... */ 53 if (len) { 54 u32 data = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8)); 55 56 while (len--) { 57 *(u8 *)img++ = data & 0xff; 58 data >>= 8; 59 } 60 } 61 } 62 63 static void 64 gm200_flcn_pio_dmem_rd_init(struct nvkm_falcon *falcon, u8 port, u32 dmem_base) 65 { 66 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(25) | dmem_base); 67 } 68 69 static void 70 gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag) 71 { 72 while (len >= 4) { 73 nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), *(u32 *)img); 74 img += 4; 75 len -= 4; 76 } 77 78 WARN_ON(len); 79 } 80 81 static void 82 gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 dmem_base) 83 { 84 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(24) | dmem_base); 85 } 86 87 const struct nvkm_falcon_func_pio 88 gm200_flcn_dmem_pio = { 89 .min = 1, 90 .max = 0x100, 91 .wr_init = gm200_flcn_pio_dmem_wr_init, 92 .wr = gm200_flcn_pio_dmem_wr, 93 .rd_init = gm200_flcn_pio_dmem_rd_init, 94 .rd = gm200_flcn_pio_dmem_rd, 95 }; 96 97 static void 98 gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 imem_base) 99 { 100 nvkm_falcon_wr32(falcon, 0x180 + (port * 0x10), (sec ? BIT(28) : 0) | BIT(24) | imem_base); 101 } 102 103 static void 104 gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag) 105 { 106 nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag); 107 while (len >= 4) { 108 nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img); 109 img += 4; 110 len -= 4; 111 } 112 } 113 114 const struct nvkm_falcon_func_pio 115 gm200_flcn_imem_pio = { 116 .min = 0x100, 117 .max = 0x100, 118 .wr_init = gm200_flcn_pio_imem_wr_init, 119 .wr = gm200_flcn_pio_imem_wr, 120 }; 121 122 int 123 gm200_flcn_bind_stat(struct nvkm_falcon *falcon, bool intr) 124 { 125 if (intr && !(nvkm_falcon_rd32(falcon, 0x008) & 0x00000008)) 126 return -1; 127 128 return (nvkm_falcon_rd32(falcon, 0x0dc) & 0x00007000) >> 12; 129 } 130 131 void 132 gm200_flcn_bind_inst(struct nvkm_falcon *falcon, int target, u64 addr) 133 { 134 nvkm_falcon_mask(falcon, 0x604, 0x00000007, 0x00000000); /* DMAIDX_VIRT */ 135 nvkm_falcon_wr32(falcon, 0x054, (1 << 30) | (target << 28) | (addr >> 12)); 136 nvkm_falcon_mask(falcon, 0x090, 0x00010000, 0x00010000); 137 nvkm_falcon_mask(falcon, 0x0a4, 0x00000008, 0x00000008); 138 } 139 140 int 141 gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon) 142 { 143 nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000); 144 145 if (nvkm_msec(falcon->owner->device, 10, 146 if (!(nvkm_falcon_rd32(falcon, 0x10c) & 0x00000006)) 147 break; 148 ) < 0) 149 return -ETIMEDOUT; 150 151 return 0; 152 } 153 154 int 155 gm200_flcn_enable(struct nvkm_falcon *falcon) 156 { 157 struct nvkm_device *device = falcon->owner->device; 158 int ret; 159 160 if (falcon->func->reset_eng) { 161 ret = falcon->func->reset_eng(falcon); 162 if (ret) 163 return ret; 164 } 165 166 if (falcon->func->select) { 167 ret = falcon->func->select(falcon); 168 if (ret) 169 return ret; 170 } 171 172 if (falcon->func->reset_pmc) 173 nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst); 174 175 ret = falcon->func->reset_wait_mem_scrubbing(falcon); 176 if (ret) 177 return ret; 178 179 nvkm_falcon_wr32(falcon, 0x084, nvkm_rd32(device, 0x000000)); 180 return 0; 181 } 182 183 int 184 gm200_flcn_disable(struct nvkm_falcon *falcon) 185 { 186 struct nvkm_device *device = falcon->owner->device; 187 int ret; 188 189 if (falcon->func->select) { 190 ret = falcon->func->select(falcon); 191 if (ret) 192 return ret; 193 } 194 195 nvkm_falcon_mask(falcon, 0x048, 0x00000003, 0x00000000); 196 nvkm_falcon_wr32(falcon, 0x014, 0xffffffff); 197 198 if (falcon->func->reset_pmc) { 199 if (falcon->func->reset_prep) { 200 ret = falcon->func->reset_prep(falcon); 201 if (ret) 202 return ret; 203 } 204 205 nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst); 206 } 207 208 if (falcon->func->reset_eng) { 209 ret = falcon->func->reset_eng(falcon); 210 if (ret) 211 return ret; 212 } 213 214 return 0; 215 } 216 217 int 218 gm200_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *pmbox0, u32 *pmbox1, u32 mbox0_ok, u32 irqsclr) 219 { 220 struct nvkm_falcon *falcon = fw->falcon; 221 u32 mbox0, mbox1; 222 int ret = 0; 223 224 nvkm_falcon_wr32(falcon, 0x040, pmbox0 ? *pmbox0 : 0xcafebeef); 225 if (pmbox1) 226 nvkm_falcon_wr32(falcon, 0x044, *pmbox1); 227 228 nvkm_falcon_wr32(falcon, 0x104, fw->boot_addr); 229 nvkm_falcon_wr32(falcon, 0x100, 0x00000002); 230 231 if (nvkm_msec(falcon->owner->device, 2000, 232 if (nvkm_falcon_rd32(falcon, 0x100) & 0x00000010) 233 break; 234 ) < 0) 235 ret = -ETIMEDOUT; 236 237 mbox0 = nvkm_falcon_rd32(falcon, 0x040); 238 mbox1 = nvkm_falcon_rd32(falcon, 0x044); 239 if (FLCN_ERRON(falcon, ret || mbox0 != mbox0_ok, "mbox %08x %08x", mbox0, mbox1)) 240 ret = ret ?: -EIO; 241 242 if (irqsclr) 243 nvkm_falcon_mask(falcon, 0x004, 0xffffffff, irqsclr); 244 245 return ret; 246 } 247 248 int 249 gm200_flcn_fw_load(struct nvkm_falcon_fw *fw) 250 { 251 struct nvkm_falcon *falcon = fw->falcon; 252 int ret; 253 254 if (fw->inst) { 255 int target; 256 257 nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001); 258 259 switch (nvkm_memory_target(fw->inst)) { 260 case NVKM_MEM_TARGET_VRAM: target = 0; break; 261 case NVKM_MEM_TARGET_HOST: target = 2; break; 262 case NVKM_MEM_TARGET_NCOH: target = 3; break; 263 default: 264 WARN_ON(1); 265 return -EINVAL; 266 } 267 268 falcon->func->bind_inst(falcon, target, nvkm_memory_addr(fw->inst)); 269 270 if (nvkm_msec(falcon->owner->device, 10, 271 if (falcon->func->bind_stat(falcon, falcon->func->bind_intr) == 5) 272 break; 273 ) < 0) 274 return -ETIMEDOUT; 275 276 nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008); 277 nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002); 278 279 if (nvkm_msec(falcon->owner->device, 10, 280 if (falcon->func->bind_stat(falcon, false) == 0) 281 break; 282 ) < 0) 283 return -ETIMEDOUT; 284 } else { 285 nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080); 286 nvkm_falcon_wr32(falcon, 0x10c, 0x00000000); 287 } 288 289 if (fw->boot) { 290 ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0, 291 IMEM, falcon->code.limit - fw->boot_size, fw->boot_size, 292 fw->boot_addr >> 8, false); 293 if (ret) 294 return ret; 295 296 return fw->func->load_bld(fw); 297 } 298 299 ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->nmem_base_img, fw->nmem_base_img, 0, 300 IMEM, fw->nmem_base, fw->nmem_size, fw->nmem_base >> 8, false); 301 if (ret) 302 return ret; 303 304 ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->imem_base_img, fw->imem_base_img, 0, 305 IMEM, fw->imem_base, fw->imem_size, fw->imem_base >> 8, true); 306 if (ret) 307 return ret; 308 309 ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->dmem_base_img, fw->dmem_base_img, 0, 310 DMEM, fw->dmem_base, fw->dmem_size, 0, false); 311 if (ret) 312 return ret; 313 314 return 0; 315 } 316 317 int 318 gm200_flcn_fw_reset(struct nvkm_falcon_fw *fw) 319 { 320 return nvkm_falcon_reset(fw->falcon); 321 } 322 323 int 324 gm200_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *sig_base_src) 325 { 326 struct nvkm_falcon *falcon = fw->falcon; 327 u32 addr = falcon->func->debug; 328 int ret = 0; 329 330 if (addr) { 331 ret = nvkm_falcon_enable(falcon); 332 if (ret) 333 return ret; 334 335 if (nvkm_falcon_rd32(falcon, addr) & 0x00100000) { 336 *sig_base_src = fw->sig_base_dbg; 337 return 1; 338 } 339 } 340 341 return ret; 342 } 343 344 const struct nvkm_falcon_fw_func 345 gm200_flcn_fw = { 346 .signature = gm200_flcn_fw_signature, 347 .reset = gm200_flcn_fw_reset, 348 .load = gm200_flcn_fw_load, 349 .boot = gm200_flcn_fw_boot, 350 }; 351