1 /*
2 * Copyright 2022 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include "priv.h"
23
24 #include <subdev/mc.h>
25 #include <subdev/timer.h>
26
27 bool
ga102_flcn_riscv_active(struct nvkm_falcon * falcon)28 ga102_flcn_riscv_active(struct nvkm_falcon *falcon)
29 {
30 return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x388) & 0x00000080) != 0;
31 }
32
33 static bool
ga102_flcn_dma_done(struct nvkm_falcon * falcon)34 ga102_flcn_dma_done(struct nvkm_falcon *falcon)
35 {
36 return !!(nvkm_falcon_rd32(falcon, 0x118) & 0x00000002);
37 }
38
39 static void
ga102_flcn_dma_xfer(struct nvkm_falcon * falcon,u32 mem_base,u32 dma_base,u32 cmd)40 ga102_flcn_dma_xfer(struct nvkm_falcon *falcon, u32 mem_base, u32 dma_base, u32 cmd)
41 {
42 nvkm_falcon_wr32(falcon, 0x114, mem_base);
43 nvkm_falcon_wr32(falcon, 0x11c, dma_base);
44 nvkm_falcon_wr32(falcon, 0x118, cmd);
45 }
46
47 static int
ga102_flcn_dma_init(struct nvkm_falcon * falcon,u64 dma_addr,int xfer_len,enum nvkm_falcon_mem mem_type,bool sec,u32 * cmd)48 ga102_flcn_dma_init(struct nvkm_falcon *falcon, u64 dma_addr, int xfer_len,
49 enum nvkm_falcon_mem mem_type, bool sec, u32 *cmd)
50 {
51 *cmd = (ilog2(xfer_len) - 2) << 8;
52 if (mem_type == IMEM)
53 *cmd |= 0x00000010;
54 if (sec)
55 *cmd |= 0x00000004;
56
57 nvkm_falcon_wr32(falcon, 0x110, dma_addr >> 8);
58 nvkm_falcon_wr32(falcon, 0x128, 0x00000000);
59 return 0;
60 }
61
62 const struct nvkm_falcon_func_dma
63 ga102_flcn_dma = {
64 .init = ga102_flcn_dma_init,
65 .xfer = ga102_flcn_dma_xfer,
66 .done = ga102_flcn_dma_done,
67 };
68
69 int
ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon * falcon)70 ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon)
71 {
72 nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000);
73
74 if (nvkm_msec(falcon->owner->device, 20,
75 if (!(nvkm_falcon_rd32(falcon, 0x0f4) & 0x00001000))
76 break;
77 ) < 0)
78 return -ETIMEDOUT;
79
80 return 0;
81 }
82
83 int
ga102_flcn_reset_prep(struct nvkm_falcon * falcon)84 ga102_flcn_reset_prep(struct nvkm_falcon *falcon)
85 {
86 nvkm_falcon_rd32(falcon, 0x0f4);
87
88 nvkm_usec(falcon->owner->device, 150,
89 if (nvkm_falcon_rd32(falcon, 0x0f4) & 0x80000000)
90 break;
91 _warn = false;
92 );
93
94 return 0;
95 }
96
97 int
ga102_flcn_select(struct nvkm_falcon * falcon)98 ga102_flcn_select(struct nvkm_falcon *falcon)
99 {
100 if ((nvkm_falcon_rd32(falcon, falcon->addr2 + 0x668) & 0x00000010) != 0x00000000) {
101 nvkm_falcon_wr32(falcon, falcon->addr2 + 0x668, 0x00000000);
102 if (nvkm_msec(falcon->owner->device, 10,
103 if (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x668) & 0x00000001)
104 break;
105 ) < 0)
106 return -ETIMEDOUT;
107 }
108
109 return 0;
110 }
111
112 int
ga102_flcn_fw_boot(struct nvkm_falcon_fw * fw,u32 * mbox0,u32 * mbox1,u32 mbox0_ok,u32 irqsclr)113 ga102_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *mbox0, u32 *mbox1, u32 mbox0_ok, u32 irqsclr)
114 {
115 struct nvkm_falcon *falcon = fw->falcon;
116
117 nvkm_falcon_wr32(falcon, falcon->addr2 + 0x210, fw->dmem_sign);
118 nvkm_falcon_wr32(falcon, falcon->addr2 + 0x19c, fw->engine_id);
119 nvkm_falcon_wr32(falcon, falcon->addr2 + 0x198, fw->ucode_id);
120 nvkm_falcon_wr32(falcon, falcon->addr2 + 0x180, 0x00000001);
121
122 return gm200_flcn_fw_boot(fw, mbox0, mbox1, mbox0_ok, irqsclr);
123 }
124
125 int
ga102_flcn_fw_load(struct nvkm_falcon_fw * fw)126 ga102_flcn_fw_load(struct nvkm_falcon_fw *fw)
127 {
128 struct nvkm_falcon *falcon = fw->falcon;
129 int ret = 0;
130
131 nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080);
132 nvkm_falcon_wr32(falcon, 0x10c, 0x00000000);
133 nvkm_falcon_mask(falcon, 0x600, 0x00010007, (0 << 16) | (1 << 2) | 1);
134
135 ret = nvkm_falcon_dma_wr(falcon, fw->fw.img, fw->fw.phys, fw->imem_base_img,
136 IMEM, fw->imem_base, fw->imem_size, true);
137 if (ret)
138 return ret;
139
140 ret = nvkm_falcon_dma_wr(falcon, fw->fw.img, fw->fw.phys, fw->dmem_base_img,
141 DMEM, fw->dmem_base, fw->dmem_size, false);
142 if (ret)
143 return ret;
144
145 return 0;
146 }
147
148 const struct nvkm_falcon_fw_func
149 ga102_flcn_fw = {
150 .signature = ga100_flcn_fw_signature,
151 .reset = gm200_flcn_fw_reset,
152 .load = ga102_flcn_fw_load,
153 .boot = ga102_flcn_fw_boot,
154 };
155