1 /* 2 * Copyright 2023 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 #include "gf100.h" 23 24 #include <core/mm.h> 25 #include <subdev/fb.h> 26 #include <subdev/gsp.h> 27 #include <subdev/instmem.h> 28 #include <subdev/mmu/vmm.h> 29 30 #include <nvrm/nvtypes.h> 31 #include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h> 32 #include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h> 33 #include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h> 34 35 static void 36 r535_bar_flush(struct nvkm_bar *bar) 37 { 38 ioread32_native(bar->flushBAR2); 39 } 40 41 static void 42 r535_bar_bar2_wait(struct nvkm_bar *base) 43 { 44 } 45 46 static int 47 r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) 48 { 49 rpc_update_bar_pde_v15_00 *rpc; 50 51 rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc)); 52 if (WARN_ON(IS_ERR_OR_NULL(rpc))) 53 return -EIO; 54 55 rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2; 56 rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */ 57 rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu! 58 59 return nvkm_gsp_rpc_wr(gsp, rpc, true); 60 } 61 62 static void 63 r535_bar_bar2_fini(struct nvkm_bar *bar) 64 { 65 struct nvkm_gsp *gsp = bar->subdev.device->gsp; 66 67 bar->flushBAR2 = bar->flushBAR2PhysMode; 68 nvkm_done(bar->flushFBZero); 69 70 WARN_ON(r535_bar_bar2_update_pde(gsp, 0)); 71 } 72 73 static void 74 r535_bar_bar2_init(struct nvkm_bar *bar) 75 { 76 struct nvkm_device *device = bar->subdev.device; 77 struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm; 78 struct nvkm_gsp *gsp = device->gsp; 79 80 WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr)); 81 vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb; 82 83 if (!bar->flushFBZero) { 84 struct nvkm_memory *fbZero; 85 int ret; 86 87 ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero); 88 if (ret == 0) { 89 ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero); 90 nvkm_memory_unref(&fbZero); 91 } 92 WARN_ON(ret); 93 } 94 95 bar->bar2 = true; 96 bar->flushBAR2 = nvkm_kmap(bar->flushFBZero); 97 WARN_ON(!bar->flushBAR2); 98 } 99 100 static void 101 r535_bar_bar1_wait(struct nvkm_bar *base) 102 { 103 } 104 105 static void 106 r535_bar_bar1_fini(struct nvkm_bar *base) 107 { 108 } 109 110 static void 111 r535_bar_bar1_init(struct nvkm_bar *bar) 112 { 113 struct nvkm_device *device = bar->subdev.device; 114 struct nvkm_gsp *gsp = device->gsp; 115 struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm; 116 struct nvkm_memory *pd3; 117 int ret; 118 119 ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3); 120 if (WARN_ON(ret)) 121 return; 122 123 nvkm_memory_unref(&vmm->pd->pt[0]->memory); 124 125 ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory); 126 nvkm_memory_unref(&pd3); 127 if (WARN_ON(ret)) 128 return; 129 130 vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory); 131 } 132 133 static void * 134 r535_bar_dtor(struct nvkm_bar *bar) 135 { 136 void *data = gf100_bar_dtor(bar); 137 138 nvkm_memory_unref(&bar->flushFBZero); 139 140 if (bar->flushBAR2PhysMode) 141 iounmap(bar->flushBAR2PhysMode); 142 143 kfree(bar->func); 144 return data; 145 } 146 147 int 148 r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device, 149 enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar) 150 { 151 struct nvkm_bar_func *rm; 152 struct nvkm_bar *bar; 153 int ret; 154 155 if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL))) 156 return -ENOMEM; 157 158 rm->dtor = r535_bar_dtor; 159 rm->oneinit = hw->oneinit; 160 rm->bar1.init = r535_bar_bar1_init; 161 rm->bar1.fini = r535_bar_bar1_fini; 162 rm->bar1.wait = r535_bar_bar1_wait; 163 rm->bar1.vmm = hw->bar1.vmm; 164 rm->bar2.init = r535_bar_bar2_init; 165 rm->bar2.fini = r535_bar_bar2_fini; 166 rm->bar2.wait = r535_bar_bar2_wait; 167 rm->bar2.vmm = hw->bar2.vmm; 168 rm->flush = r535_bar_flush; 169 170 ret = gf100_bar_new_(rm, device, type, inst, &bar); 171 *pbar = bar; 172 if (ret) { 173 if (!bar) 174 kfree(rm); 175 return ret; 176 } 177 178 bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE); 179 if (!bar->flushBAR2PhysMode) 180 return -ENOMEM; 181 182 bar->flushBAR2 = bar->flushBAR2PhysMode; 183 184 gf100_bar(*pbar)->bar2_halve = true; 185 return 0; 186 } 187