1 /* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include <engine/falcon.h> 23 24 #include <core/gpuobj.h> 25 #include <subdev/mc.h> 26 #include <subdev/timer.h> 27 #include <engine/fifo.h> 28 29 static int 30 nvkm_falcon_oclass_get(struct nvkm_oclass *oclass, int index) 31 { 32 struct nvkm_falcon *falcon = nvkm_falcon(oclass->engine); 33 int c = 0; 34 35 while (falcon->func->sclass[c].oclass) { 36 if (c++ == index) { 37 oclass->base = falcon->func->sclass[index]; 38 return index; 39 } 40 } 41 42 return c; 43 } 44 45 static int 46 nvkm_falcon_cclass_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, 47 int align, struct nvkm_gpuobj **pgpuobj) 48 { 49 return nvkm_gpuobj_new(object->engine->subdev.device, 256, 50 align, true, parent, pgpuobj); 51 } 52 53 static const struct nvkm_object_func 54 nvkm_falcon_cclass = { 55 .bind = nvkm_falcon_cclass_bind, 56 }; 57 58 static void 59 nvkm_falcon_intr(struct nvkm_engine *engine) 60 { 61 struct nvkm_falcon *falcon = nvkm_falcon(engine); 62 struct nvkm_subdev *subdev = &falcon->engine.subdev; 63 struct nvkm_device *device = subdev->device; 64 const u32 base = falcon->addr; 65 u32 dest = nvkm_rd32(device, base + 0x01c); 66 u32 intr = nvkm_rd32(device, base + 0x008) & dest & ~(dest >> 16); 67 u32 inst = nvkm_rd32(device, base + 0x050) & 0x3fffffff; 68 struct nvkm_chan *chan; 69 unsigned long flags; 70 71 chan = nvkm_chan_get_inst(engine, (u64)inst << 12, &flags); 72 73 if (intr & 0x00000040) { 74 if (falcon->func->intr) { 75 falcon->func->intr(falcon, chan); 76 nvkm_wr32(device, base + 0x004, 0x00000040); 77 intr &= ~0x00000040; 78 } 79 } 80 81 if (intr & 0x00000010) { 82 nvkm_debug(subdev, "ucode halted\n"); 83 nvkm_wr32(device, base + 0x004, 0x00000010); 84 intr &= ~0x00000010; 85 } 86 87 if (intr) { 88 nvkm_error(subdev, "intr %08x\n", intr); 89 nvkm_wr32(device, base + 0x004, intr); 90 } 91 92 nvkm_chan_put(&chan, flags); 93 } 94 95 static int 96 nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend) 97 { 98 struct nvkm_falcon *falcon = nvkm_falcon(engine); 99 struct nvkm_device *device = falcon->engine.subdev.device; 100 const u32 base = falcon->addr; 101 102 if (!suspend) { 103 nvkm_memory_unref(&falcon->core); 104 if (falcon->external) { 105 vfree(falcon->data.data); 106 vfree(falcon->code.data); 107 falcon->code.data = NULL; 108 } 109 } 110 111 if (nvkm_mc_enabled(device, engine->subdev.type, engine->subdev.inst)) { 112 nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); 113 nvkm_wr32(device, base + 0x014, 0xffffffff); 114 } 115 return 0; 116 } 117 118 static void * 119 vmemdup(const void *src, size_t len) 120 { 121 void *p = vmalloc(len); 122 123 if (p) 124 memcpy(p, src, len); 125 return p; 126 } 127 128 static int 129 nvkm_falcon_oneinit(struct nvkm_engine *engine) 130 { 131 struct nvkm_falcon *falcon = nvkm_falcon(engine); 132 struct nvkm_subdev *subdev = &falcon->engine.subdev; 133 struct nvkm_device *device = subdev->device; 134 const u32 base = falcon->addr; 135 u32 caps; 136 137 /* determine falcon capabilities */ 138 if (device->chipset < 0xa3 || 139 device->chipset == 0xaa || device->chipset == 0xac) { 140 falcon->version = 0; 141 falcon->secret = (falcon->addr == 0x087000) ? 1 : 0; 142 } else { 143 caps = nvkm_rd32(device, base + 0x12c); 144 falcon->version = (caps & 0x0000000f); 145 falcon->secret = (caps & 0x00000030) >> 4; 146 } 147 148 caps = nvkm_rd32(device, base + 0x108); 149 falcon->code.limit = (caps & 0x000001ff) << 8; 150 falcon->data.limit = (caps & 0x0003fe00) >> 1; 151 152 nvkm_debug(subdev, "falcon version: %d\n", falcon->version); 153 nvkm_debug(subdev, "secret level: %d\n", falcon->secret); 154 nvkm_debug(subdev, "code limit: %d\n", falcon->code.limit); 155 nvkm_debug(subdev, "data limit: %d\n", falcon->data.limit); 156 return 0; 157 } 158 159 static int 160 nvkm_falcon_init(struct nvkm_engine *engine) 161 { 162 struct nvkm_falcon *falcon = nvkm_falcon(engine); 163 struct nvkm_subdev *subdev = &falcon->engine.subdev; 164 struct nvkm_device *device = subdev->device; 165 const struct firmware *fw; 166 char name[32] = "internal"; 167 const u32 base = falcon->addr; 168 int ret, i; 169 170 /* wait for 'uc halted' to be signalled before continuing */ 171 if (falcon->secret && falcon->version < 4) { 172 if (!falcon->version) { 173 nvkm_msec(device, 2000, 174 if (nvkm_rd32(device, base + 0x008) & 0x00000010) 175 break; 176 ); 177 } else { 178 nvkm_msec(device, 2000, 179 if (!(nvkm_rd32(device, base + 0x180) & 0x80000000)) 180 break; 181 ); 182 } 183 nvkm_wr32(device, base + 0x004, 0x00000010); 184 } 185 186 /* disable all interrupts */ 187 nvkm_wr32(device, base + 0x014, 0xffffffff); 188 189 /* no default ucode provided by the engine implementation, try and 190 * locate a "self-bootstrapping" firmware image for the engine 191 */ 192 if (!falcon->code.data) { 193 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x", 194 device->chipset, falcon->addr >> 12); 195 196 ret = request_firmware(&fw, name, device->dev); 197 if (ret == 0) { 198 falcon->code.data = vmemdup(fw->data, fw->size); 199 falcon->code.size = fw->size; 200 falcon->data.data = NULL; 201 falcon->data.size = 0; 202 release_firmware(fw); 203 } 204 205 falcon->external = true; 206 } 207 208 /* next step is to try and load "static code/data segment" firmware 209 * images for the engine 210 */ 211 if (!falcon->code.data) { 212 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd", 213 device->chipset, falcon->addr >> 12); 214 215 ret = request_firmware(&fw, name, device->dev); 216 if (ret) { 217 nvkm_error(subdev, "unable to load firmware data\n"); 218 return -ENODEV; 219 } 220 221 falcon->data.data = vmemdup(fw->data, fw->size); 222 falcon->data.size = fw->size; 223 release_firmware(fw); 224 if (!falcon->data.data) 225 return -ENOMEM; 226 227 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc", 228 device->chipset, falcon->addr >> 12); 229 230 ret = request_firmware(&fw, name, device->dev); 231 if (ret) { 232 nvkm_error(subdev, "unable to load firmware code\n"); 233 return -ENODEV; 234 } 235 236 falcon->code.data = vmemdup(fw->data, fw->size); 237 falcon->code.size = fw->size; 238 release_firmware(fw); 239 if (!falcon->code.data) 240 return -ENOMEM; 241 } 242 243 nvkm_debug(subdev, "firmware: %s (%s)\n", name, falcon->data.data ? 244 "static code/data segments" : "self-bootstrapping"); 245 246 /* ensure any "self-bootstrapping" firmware image is in vram */ 247 if (!falcon->data.data && !falcon->core) { 248 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 249 falcon->code.size, 256, false, 250 &falcon->core); 251 if (ret) { 252 nvkm_error(subdev, "core allocation failed, %d\n", ret); 253 return ret; 254 } 255 256 nvkm_kmap(falcon->core); 257 for (i = 0; i < falcon->code.size; i += 4) 258 nvkm_wo32(falcon->core, i, falcon->code.data[i / 4]); 259 nvkm_done(falcon->core); 260 } 261 262 /* upload firmware bootloader (or the full code segments) */ 263 if (falcon->core) { 264 u64 addr = nvkm_memory_addr(falcon->core); 265 if (device->card_type < NV_C0) 266 nvkm_wr32(device, base + 0x618, 0x04000000); 267 else 268 nvkm_wr32(device, base + 0x618, 0x00000114); 269 nvkm_wr32(device, base + 0x11c, 0); 270 nvkm_wr32(device, base + 0x110, addr >> 8); 271 nvkm_wr32(device, base + 0x114, 0); 272 nvkm_wr32(device, base + 0x118, 0x00006610); 273 } else { 274 if (falcon->code.size > falcon->code.limit || 275 falcon->data.size > falcon->data.limit) { 276 nvkm_error(subdev, "ucode exceeds falcon limit(s)\n"); 277 return -EINVAL; 278 } 279 280 if (falcon->version < 3) { 281 nvkm_wr32(device, base + 0xff8, 0x00100000); 282 for (i = 0; i < falcon->code.size / 4; i++) 283 nvkm_wr32(device, base + 0xff4, falcon->code.data[i]); 284 } else { 285 nvkm_wr32(device, base + 0x180, 0x01000000); 286 for (i = 0; i < falcon->code.size / 4; i++) { 287 if ((i & 0x3f) == 0) 288 nvkm_wr32(device, base + 0x188, i >> 6); 289 nvkm_wr32(device, base + 0x184, falcon->code.data[i]); 290 } 291 } 292 } 293 294 /* upload data segment (if necessary), zeroing the remainder */ 295 if (falcon->version < 3) { 296 nvkm_wr32(device, base + 0xff8, 0x00000000); 297 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++) 298 nvkm_wr32(device, base + 0xff4, falcon->data.data[i]); 299 for (; i < falcon->data.limit; i += 4) 300 nvkm_wr32(device, base + 0xff4, 0x00000000); 301 } else { 302 nvkm_wr32(device, base + 0x1c0, 0x01000000); 303 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++) 304 nvkm_wr32(device, base + 0x1c4, falcon->data.data[i]); 305 for (; i < falcon->data.limit / 4; i++) 306 nvkm_wr32(device, base + 0x1c4, 0x00000000); 307 } 308 309 /* start it running */ 310 nvkm_wr32(device, base + 0x10c, 0x00000001); /* BLOCK_ON_FIFO */ 311 nvkm_wr32(device, base + 0x104, 0x00000000); /* ENTRY */ 312 nvkm_wr32(device, base + 0x100, 0x00000002); /* TRIGGER */ 313 nvkm_wr32(device, base + 0x048, 0x00000003); /* FIFO | CHSW */ 314 315 if (falcon->func->init) 316 falcon->func->init(falcon); 317 return 0; 318 } 319 320 static void * 321 nvkm_falcon_dtor(struct nvkm_engine *engine) 322 { 323 return nvkm_falcon(engine); 324 } 325 326 static const struct nvkm_engine_func 327 nvkm_falcon = { 328 .dtor = nvkm_falcon_dtor, 329 .oneinit = nvkm_falcon_oneinit, 330 .init = nvkm_falcon_init, 331 .fini = nvkm_falcon_fini, 332 .intr = nvkm_falcon_intr, 333 .fifo.sclass = nvkm_falcon_oclass_get, 334 .cclass = &nvkm_falcon_cclass, 335 }; 336 337 int 338 nvkm_falcon_new_(const struct nvkm_falcon_func *func, struct nvkm_device *device, 339 enum nvkm_subdev_type type, int inst, bool enable, u32 addr, 340 struct nvkm_engine **pengine) 341 { 342 struct nvkm_falcon *falcon; 343 344 if (!(falcon = kzalloc(sizeof(*falcon), GFP_KERNEL))) 345 return -ENOMEM; 346 falcon->func = func; 347 falcon->addr = addr; 348 falcon->code.data = func->code.data; 349 falcon->code.size = func->code.size; 350 falcon->data.data = func->data.data; 351 falcon->data.size = func->data.size; 352 *pengine = &falcon->engine; 353 354 return nvkm_engine_ctor(&nvkm_falcon, device, type, inst, enable, &falcon->engine); 355 } 356