1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Ke Yu 25 * Kevin Tian <kevin.tian@intel.com> 26 * Dexuan Cui 27 * 28 * Contributors: 29 * Tina Zhang <tina.zhang@intel.com> 30 * Min He <min.he@intel.com> 31 * Niu Bing <bing.niu@intel.com> 32 * Zhi Wang <zhi.a.wang@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gvt.h" 38 39 /** 40 * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset 41 * @vgpu: a vGPU 42 * 43 * Returns: 44 * Zero on success, negative error code if failed 45 */ 46 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) 47 { 48 u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) & 49 ~GENMASK(3, 0); 50 return gpa - gttmmio_gpa; 51 } 52 53 #define reg_is_mmio(gvt, reg) \ 54 (reg >= 0 && reg < gvt->device_info.mmio_size) 55 56 #define reg_is_gtt(gvt, reg) \ 57 (reg >= gvt->device_info.gtt_start_offset \ 58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) 59 60 /** 61 * intel_vgpu_emulate_mmio_read - emulate MMIO read 62 * @vgpu: a vGPU 63 * @pa: guest physical address 64 * @p_data: data return buffer 65 * @bytes: access data length 66 * 67 * Returns: 68 * Zero on success, negative error code if failed 69 */ 70 int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa, 71 void *p_data, unsigned int bytes) 72 { 73 struct intel_vgpu *vgpu = __vgpu; 74 struct intel_gvt *gvt = vgpu->gvt; 75 struct intel_gvt_mmio_info *mmio; 76 unsigned int offset = 0; 77 int ret = -EINVAL; 78 79 mutex_lock(&gvt->lock); 80 81 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 82 struct intel_vgpu_guest_page *gp; 83 84 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); 85 if (gp) { 86 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, 87 p_data, bytes); 88 if (ret) { 89 gvt_err("vgpu%d: guest page read error %d, " 90 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 91 vgpu->id, ret, 92 gp->gfn, pa, *(u32 *)p_data, bytes); 93 } 94 mutex_unlock(&gvt->lock); 95 return ret; 96 } 97 } 98 99 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); 100 101 if (WARN_ON(bytes > 8)) 102 goto err; 103 104 if (reg_is_gtt(gvt, offset)) { 105 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) 106 goto err; 107 if (WARN_ON(bytes != 4 && bytes != 8)) 108 goto err; 109 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) 110 goto err; 111 112 ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset, 113 p_data, bytes); 114 if (ret) 115 goto err; 116 mutex_unlock(&gvt->lock); 117 return ret; 118 } 119 120 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { 121 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); 122 mutex_unlock(&gvt->lock); 123 return ret; 124 } 125 126 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 127 goto err; 128 129 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 130 if (!mmio && !vgpu->mmio.disable_warn_untrack) { 131 gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", 132 vgpu->id, offset, bytes, *(u32 *)p_data); 133 134 if (offset == 0x206c) { 135 gvt_err("------------------------------------------\n"); 136 gvt_err("vgpu%d: likely triggers a gfx reset\n", 137 vgpu->id); 138 gvt_err("------------------------------------------\n"); 139 vgpu->mmio.disable_warn_untrack = true; 140 } 141 } 142 143 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 144 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 145 goto err; 146 } 147 148 if (mmio) { 149 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 150 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) 151 goto err; 152 if (WARN_ON(mmio->offset != offset)) 153 goto err; 154 } 155 ret = mmio->read(vgpu, offset, p_data, bytes); 156 } else 157 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 158 159 if (ret) 160 goto err; 161 162 intel_gvt_mmio_set_accessed(gvt, offset); 163 mutex_unlock(&gvt->lock); 164 return 0; 165 err: 166 gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", 167 vgpu->id, offset, bytes); 168 mutex_unlock(&gvt->lock); 169 return ret; 170 } 171 172 /** 173 * intel_vgpu_emulate_mmio_write - emulate MMIO write 174 * @vgpu: a vGPU 175 * @pa: guest physical address 176 * @p_data: write data buffer 177 * @bytes: access data length 178 * 179 * Returns: 180 * Zero on success, negative error code if failed 181 */ 182 int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa, 183 void *p_data, unsigned int bytes) 184 { 185 struct intel_vgpu *vgpu = __vgpu; 186 struct intel_gvt *gvt = vgpu->gvt; 187 struct intel_gvt_mmio_info *mmio; 188 unsigned int offset = 0; 189 u32 old_vreg = 0, old_sreg = 0; 190 int ret = -EINVAL; 191 192 mutex_lock(&gvt->lock); 193 194 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 195 struct intel_vgpu_guest_page *gp; 196 197 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); 198 if (gp) { 199 ret = gp->handler(gp, pa, p_data, bytes); 200 if (ret) { 201 gvt_err("vgpu%d: guest page write error %d, " 202 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 203 vgpu->id, ret, 204 gp->gfn, pa, *(u32 *)p_data, bytes); 205 } 206 mutex_unlock(&gvt->lock); 207 return ret; 208 } 209 } 210 211 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); 212 213 if (WARN_ON(bytes > 8)) 214 goto err; 215 216 if (reg_is_gtt(gvt, offset)) { 217 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) 218 goto err; 219 if (WARN_ON(bytes != 4 && bytes != 8)) 220 goto err; 221 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) 222 goto err; 223 224 ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset, 225 p_data, bytes); 226 if (ret) 227 goto err; 228 mutex_unlock(&gvt->lock); 229 return ret; 230 } 231 232 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { 233 ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); 234 mutex_unlock(&gvt->lock); 235 return ret; 236 } 237 238 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 239 if (!mmio && !vgpu->mmio.disable_warn_untrack) 240 gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n", 241 vgpu->id, offset, bytes, *(u32 *)p_data); 242 243 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 244 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 245 goto err; 246 } 247 248 if (mmio) { 249 u64 ro_mask = mmio->ro_mask; 250 251 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 252 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) 253 goto err; 254 if (WARN_ON(mmio->offset != offset)) 255 goto err; 256 } 257 258 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) { 259 old_vreg = vgpu_vreg(vgpu, offset); 260 old_sreg = vgpu_sreg(vgpu, offset); 261 } 262 263 if (!ro_mask) { 264 ret = mmio->write(vgpu, offset, p_data, bytes); 265 } else { 266 /* Protect RO bits like HW */ 267 u64 data = 0; 268 269 /* all register bits are RO. */ 270 if (ro_mask == ~(u64)0) { 271 gvt_err("vgpu%d: try to write RO reg %x\n", 272 vgpu->id, offset); 273 ret = 0; 274 goto out; 275 } 276 /* keep the RO bits in the virtual register */ 277 memcpy(&data, p_data, bytes); 278 data &= ~mmio->ro_mask; 279 data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask; 280 ret = mmio->write(vgpu, offset, &data, bytes); 281 } 282 283 /* higher 16bits of mode ctl regs are mask bits for change */ 284 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) { 285 u32 mask = vgpu_vreg(vgpu, offset) >> 16; 286 287 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) 288 | (vgpu_vreg(vgpu, offset) & mask); 289 vgpu_sreg(vgpu, offset) = (old_sreg & ~mask) 290 | (vgpu_sreg(vgpu, offset) & mask); 291 } 292 } else 293 ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, 294 bytes); 295 if (ret) 296 goto err; 297 out: 298 intel_gvt_mmio_set_accessed(gvt, offset); 299 mutex_unlock(&gvt->lock); 300 return 0; 301 err: 302 gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", 303 vgpu->id, offset, bytes); 304 mutex_unlock(&gvt->lock); 305 return ret; 306 } 307