1 /* 2 * GTT virtualization 3 * 4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Zhi Wang <zhi.a.wang@intel.com> 27 * Zhenyu Wang <zhenyuw@linux.intel.com> 28 * Xiao Zheng <xiao.zheng@intel.com> 29 * 30 * Contributors: 31 * Min He <min.he@intel.com> 32 * Bing Niu <bing.niu@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gvt.h" 38 #include "i915_pvinfo.h" 39 #include "trace.h" 40 41 #if defined(VERBOSE_DEBUG) 42 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) 43 #else 44 #define gvt_vdbg_mm(fmt, args...) 45 #endif 46 47 static bool enable_out_of_sync = false; 48 static int preallocated_oos_pages = 8192; 49 50 /* 51 * validate a gm address and related range size, 52 * translate it to host gm address 53 */ 54 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) 55 { 56 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 57 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 58 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", 59 addr, size); 60 return false; 61 } 62 return true; 63 } 64 65 /* translate a guest gmadr to host gmadr */ 66 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) 67 { 68 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr), 69 "invalid guest gmadr %llx\n", g_addr)) 70 return -EACCES; 71 72 if (vgpu_gmadr_is_aperture(vgpu, g_addr)) 73 *h_addr = vgpu_aperture_gmadr_base(vgpu) 74 + (g_addr - vgpu_aperture_offset(vgpu)); 75 else 76 *h_addr = vgpu_hidden_gmadr_base(vgpu) 77 + (g_addr - vgpu_hidden_offset(vgpu)); 78 return 0; 79 } 80 81 /* translate a host gmadr to guest gmadr */ 82 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) 83 { 84 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr), 85 "invalid host gmadr %llx\n", h_addr)) 86 return -EACCES; 87 88 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) 89 *g_addr = vgpu_aperture_gmadr_base(vgpu) 90 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); 91 else 92 *g_addr = vgpu_hidden_gmadr_base(vgpu) 93 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); 94 return 0; 95 } 96 97 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, 98 unsigned long *h_index) 99 { 100 u64 h_addr; 101 int ret; 102 103 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, 104 &h_addr); 105 if (ret) 106 return ret; 107 108 *h_index = h_addr >> I915_GTT_PAGE_SHIFT; 109 return 0; 110 } 111 112 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 113 unsigned long *g_index) 114 { 115 u64 g_addr; 116 int ret; 117 118 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, 119 &g_addr); 120 if (ret) 121 return ret; 122 123 *g_index = g_addr >> I915_GTT_PAGE_SHIFT; 124 return 0; 125 } 126 127 #define gtt_type_is_entry(type) \ 128 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \ 129 && type != GTT_TYPE_PPGTT_PTE_ENTRY \ 130 && type != GTT_TYPE_PPGTT_ROOT_ENTRY) 131 132 #define gtt_type_is_pt(type) \ 133 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) 134 135 #define gtt_type_is_pte_pt(type) \ 136 (type == GTT_TYPE_PPGTT_PTE_PT) 137 138 #define gtt_type_is_root_pointer(type) \ 139 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY) 140 141 #define gtt_init_entry(e, t, p, v) do { \ 142 (e)->type = t; \ 143 (e)->pdev = p; \ 144 memcpy(&(e)->val64, &v, sizeof(v)); \ 145 } while (0) 146 147 /* 148 * Mappings between GTT_TYPE* enumerations. 149 * Following information can be found according to the given type: 150 * - type of next level page table 151 * - type of entry inside this level page table 152 * - type of entry with PSE set 153 * 154 * If the given type doesn't have such a kind of information, 155 * e.g. give a l4 root entry type, then request to get its PSE type, 156 * give a PTE page table type, then request to get its next level page 157 * table type, as we know l4 root entry doesn't have a PSE bit, 158 * and a PTE page table doesn't have a next level page table type, 159 * GTT_TYPE_INVALID will be returned. This is useful when traversing a 160 * page table. 161 */ 162 163 struct gtt_type_table_entry { 164 int entry_type; 165 int pt_type; 166 int next_pt_type; 167 int pse_entry_type; 168 }; 169 170 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \ 171 [type] = { \ 172 .entry_type = e_type, \ 173 .pt_type = cpt_type, \ 174 .next_pt_type = npt_type, \ 175 .pse_entry_type = pse_type, \ 176 } 177 178 static struct gtt_type_table_entry gtt_type_table[] = { 179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 180 GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 181 GTT_TYPE_INVALID, 182 GTT_TYPE_PPGTT_PML4_PT, 183 GTT_TYPE_INVALID), 184 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT, 185 GTT_TYPE_PPGTT_PML4_ENTRY, 186 GTT_TYPE_PPGTT_PML4_PT, 187 GTT_TYPE_PPGTT_PDP_PT, 188 GTT_TYPE_INVALID), 189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY, 190 GTT_TYPE_PPGTT_PML4_ENTRY, 191 GTT_TYPE_PPGTT_PML4_PT, 192 GTT_TYPE_PPGTT_PDP_PT, 193 GTT_TYPE_INVALID), 194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT, 195 GTT_TYPE_PPGTT_PDP_ENTRY, 196 GTT_TYPE_PPGTT_PDP_PT, 197 GTT_TYPE_PPGTT_PDE_PT, 198 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 200 GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 201 GTT_TYPE_INVALID, 202 GTT_TYPE_PPGTT_PDE_PT, 203 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY, 205 GTT_TYPE_PPGTT_PDP_ENTRY, 206 GTT_TYPE_PPGTT_PDP_PT, 207 GTT_TYPE_PPGTT_PDE_PT, 208 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT, 210 GTT_TYPE_PPGTT_PDE_ENTRY, 211 GTT_TYPE_PPGTT_PDE_PT, 212 GTT_TYPE_PPGTT_PTE_PT, 213 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY, 215 GTT_TYPE_PPGTT_PDE_ENTRY, 216 GTT_TYPE_PPGTT_PDE_PT, 217 GTT_TYPE_PPGTT_PTE_PT, 218 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, 220 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 221 GTT_TYPE_PPGTT_PTE_PT, 222 GTT_TYPE_INVALID, 223 GTT_TYPE_INVALID), 224 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, 225 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 226 GTT_TYPE_PPGTT_PTE_PT, 227 GTT_TYPE_INVALID, 228 GTT_TYPE_INVALID), 229 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, 230 GTT_TYPE_PPGTT_PDE_ENTRY, 231 GTT_TYPE_PPGTT_PDE_PT, 232 GTT_TYPE_INVALID, 233 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 234 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY, 235 GTT_TYPE_PPGTT_PDP_ENTRY, 236 GTT_TYPE_PPGTT_PDP_PT, 237 GTT_TYPE_INVALID, 238 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 239 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE, 240 GTT_TYPE_GGTT_PTE, 241 GTT_TYPE_INVALID, 242 GTT_TYPE_INVALID, 243 GTT_TYPE_INVALID), 244 }; 245 246 static inline int get_next_pt_type(int type) 247 { 248 return gtt_type_table[type].next_pt_type; 249 } 250 251 static inline int get_pt_type(int type) 252 { 253 return gtt_type_table[type].pt_type; 254 } 255 256 static inline int get_entry_type(int type) 257 { 258 return gtt_type_table[type].entry_type; 259 } 260 261 static inline int get_pse_type(int type) 262 { 263 return gtt_type_table[type].pse_entry_type; 264 } 265 266 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) 267 { 268 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 269 270 return readq(addr); 271 } 272 273 static void ggtt_invalidate(struct drm_i915_private *dev_priv) 274 { 275 mmio_hw_access_pre(dev_priv); 276 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 277 mmio_hw_access_post(dev_priv); 278 } 279 280 static void write_pte64(struct drm_i915_private *dev_priv, 281 unsigned long index, u64 pte) 282 { 283 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; 284 285 writeq(pte, addr); 286 } 287 288 static inline int gtt_get_entry64(void *pt, 289 struct intel_gvt_gtt_entry *e, 290 unsigned long index, bool hypervisor_access, unsigned long gpa, 291 struct intel_vgpu *vgpu) 292 { 293 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 294 int ret; 295 296 if (WARN_ON(info->gtt_entry_size != 8)) 297 return -EINVAL; 298 299 if (hypervisor_access) { 300 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + 301 (index << info->gtt_entry_size_shift), 302 &e->val64, 8); 303 if (WARN_ON(ret)) 304 return ret; 305 } else if (!pt) { 306 e->val64 = read_pte64(vgpu->gvt->dev_priv, index); 307 } else { 308 e->val64 = *((u64 *)pt + index); 309 } 310 return 0; 311 } 312 313 static inline int gtt_set_entry64(void *pt, 314 struct intel_gvt_gtt_entry *e, 315 unsigned long index, bool hypervisor_access, unsigned long gpa, 316 struct intel_vgpu *vgpu) 317 { 318 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 319 int ret; 320 321 if (WARN_ON(info->gtt_entry_size != 8)) 322 return -EINVAL; 323 324 if (hypervisor_access) { 325 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa + 326 (index << info->gtt_entry_size_shift), 327 &e->val64, 8); 328 if (WARN_ON(ret)) 329 return ret; 330 } else if (!pt) { 331 write_pte64(vgpu->gvt->dev_priv, index, e->val64); 332 } else { 333 *((u64 *)pt + index) = e->val64; 334 } 335 return 0; 336 } 337 338 #define GTT_HAW 46 339 340 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30) 341 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21) 342 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12) 343 344 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) 345 { 346 unsigned long pfn; 347 348 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) 349 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT; 350 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) 351 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT; 352 else 353 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT; 354 return pfn; 355 } 356 357 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) 358 { 359 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { 360 e->val64 &= ~ADDR_1G_MASK; 361 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT); 362 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { 363 e->val64 &= ~ADDR_2M_MASK; 364 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT); 365 } else { 366 e->val64 &= ~ADDR_4K_MASK; 367 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT); 368 } 369 370 e->val64 |= (pfn << PAGE_SHIFT); 371 } 372 373 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) 374 { 375 /* Entry doesn't have PSE bit. */ 376 if (get_pse_type(e->type) == GTT_TYPE_INVALID) 377 return false; 378 379 e->type = get_entry_type(e->type); 380 if (!(e->val64 & _PAGE_PSE)) 381 return false; 382 383 e->type = get_pse_type(e->type); 384 return true; 385 } 386 387 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) 388 { 389 /* 390 * i915 writes PDP root pointer registers without present bit, 391 * it also works, so we need to treat root pointer entry 392 * specifically. 393 */ 394 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY 395 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 396 return (e->val64 != 0); 397 else 398 return (e->val64 & _PAGE_PRESENT); 399 } 400 401 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) 402 { 403 e->val64 &= ~_PAGE_PRESENT; 404 } 405 406 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) 407 { 408 e->val64 |= _PAGE_PRESENT; 409 } 410 411 /* 412 * Per-platform GMA routines. 413 */ 414 static unsigned long gma_to_ggtt_pte_index(unsigned long gma) 415 { 416 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT); 417 418 trace_gma_index(__func__, gma, x); 419 return x; 420 } 421 422 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \ 423 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \ 424 { \ 425 unsigned long x = (exp); \ 426 trace_gma_index(__func__, gma, x); \ 427 return x; \ 428 } 429 430 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff)); 431 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff)); 432 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3)); 433 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); 434 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); 435 436 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { 437 .get_entry = gtt_get_entry64, 438 .set_entry = gtt_set_entry64, 439 .clear_present = gtt_entry_clear_present, 440 .set_present = gtt_entry_set_present, 441 .test_present = gen8_gtt_test_present, 442 .test_pse = gen8_gtt_test_pse, 443 .get_pfn = gen8_gtt_get_pfn, 444 .set_pfn = gen8_gtt_set_pfn, 445 }; 446 447 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { 448 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index, 449 .gma_to_pte_index = gen8_gma_to_pte_index, 450 .gma_to_pde_index = gen8_gma_to_pde_index, 451 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index, 452 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index, 453 .gma_to_pml4_index = gen8_gma_to_pml4_index, 454 }; 455 456 /* 457 * MM helpers. 458 */ 459 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, 460 struct intel_gvt_gtt_entry *entry, unsigned long index, 461 bool guest) 462 { 463 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 464 465 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); 466 467 entry->type = mm->ppgtt_mm.root_entry_type; 468 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : 469 mm->ppgtt_mm.shadow_pdps, 470 entry, index, false, 0, mm->vgpu); 471 472 pte_ops->test_pse(entry); 473 } 474 475 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, 476 struct intel_gvt_gtt_entry *entry, unsigned long index) 477 { 478 _ppgtt_get_root_entry(mm, entry, index, true); 479 } 480 481 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm, 482 struct intel_gvt_gtt_entry *entry, unsigned long index) 483 { 484 _ppgtt_get_root_entry(mm, entry, index, false); 485 } 486 487 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm, 488 struct intel_gvt_gtt_entry *entry, unsigned long index, 489 bool guest) 490 { 491 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 492 493 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps : 494 mm->ppgtt_mm.shadow_pdps, 495 entry, index, false, 0, mm->vgpu); 496 } 497 498 static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm, 499 struct intel_gvt_gtt_entry *entry, unsigned long index) 500 { 501 _ppgtt_set_root_entry(mm, entry, index, true); 502 } 503 504 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm, 505 struct intel_gvt_gtt_entry *entry, unsigned long index) 506 { 507 _ppgtt_set_root_entry(mm, entry, index, false); 508 } 509 510 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, 511 struct intel_gvt_gtt_entry *entry, unsigned long index) 512 { 513 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 514 515 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 516 517 entry->type = GTT_TYPE_GGTT_PTE; 518 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index, 519 false, 0, mm->vgpu); 520 } 521 522 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, 523 struct intel_gvt_gtt_entry *entry, unsigned long index) 524 { 525 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 526 527 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 528 529 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index, 530 false, 0, mm->vgpu); 531 } 532 533 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, 534 struct intel_gvt_gtt_entry *entry, unsigned long index) 535 { 536 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 537 538 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 539 540 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); 541 } 542 543 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, 544 struct intel_gvt_gtt_entry *entry, unsigned long index) 545 { 546 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 547 548 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 549 550 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); 551 } 552 553 /* 554 * PPGTT shadow page table helpers. 555 */ 556 static inline int ppgtt_spt_get_entry( 557 struct intel_vgpu_ppgtt_spt *spt, 558 void *page_table, int type, 559 struct intel_gvt_gtt_entry *e, unsigned long index, 560 bool guest) 561 { 562 struct intel_gvt *gvt = spt->vgpu->gvt; 563 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 564 int ret; 565 566 e->type = get_entry_type(type); 567 568 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 569 return -EINVAL; 570 571 ret = ops->get_entry(page_table, e, index, guest, 572 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 573 spt->vgpu); 574 if (ret) 575 return ret; 576 577 ops->test_pse(e); 578 579 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", 580 type, e->type, index, e->val64); 581 return 0; 582 } 583 584 static inline int ppgtt_spt_set_entry( 585 struct intel_vgpu_ppgtt_spt *spt, 586 void *page_table, int type, 587 struct intel_gvt_gtt_entry *e, unsigned long index, 588 bool guest) 589 { 590 struct intel_gvt *gvt = spt->vgpu->gvt; 591 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 592 593 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 594 return -EINVAL; 595 596 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", 597 type, e->type, index, e->val64); 598 599 return ops->set_entry(page_table, e, index, guest, 600 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 601 spt->vgpu); 602 } 603 604 #define ppgtt_get_guest_entry(spt, e, index) \ 605 ppgtt_spt_get_entry(spt, NULL, \ 606 spt->guest_page.type, e, index, true) 607 608 #define ppgtt_set_guest_entry(spt, e, index) \ 609 ppgtt_spt_set_entry(spt, NULL, \ 610 spt->guest_page.type, e, index, true) 611 612 #define ppgtt_get_shadow_entry(spt, e, index) \ 613 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ 614 spt->shadow_page.type, e, index, false) 615 616 #define ppgtt_set_shadow_entry(spt, e, index) \ 617 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ 618 spt->shadow_page.type, e, index, false) 619 620 static void *alloc_spt(gfp_t gfp_mask) 621 { 622 struct intel_vgpu_ppgtt_spt *spt; 623 624 spt = kzalloc(sizeof(*spt), gfp_mask); 625 if (!spt) 626 return NULL; 627 628 spt->shadow_page.page = alloc_page(gfp_mask); 629 if (!spt->shadow_page.page) { 630 kfree(spt); 631 return NULL; 632 } 633 return spt; 634 } 635 636 static void free_spt(struct intel_vgpu_ppgtt_spt *spt) 637 { 638 __free_page(spt->shadow_page.page); 639 kfree(spt); 640 } 641 642 static int detach_oos_page(struct intel_vgpu *vgpu, 643 struct intel_vgpu_oos_page *oos_page); 644 645 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) 646 { 647 struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev; 648 649 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); 650 651 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, 652 PCI_DMA_BIDIRECTIONAL); 653 654 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); 655 656 if (spt->guest_page.oos_page) 657 detach_oos_page(spt->vgpu, spt->guest_page.oos_page); 658 659 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); 660 661 list_del_init(&spt->post_shadow_list); 662 free_spt(spt); 663 } 664 665 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) 666 { 667 struct intel_vgpu_ppgtt_spt *spt; 668 struct radix_tree_iter iter; 669 void **slot; 670 671 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { 672 spt = radix_tree_deref_slot(slot); 673 ppgtt_free_spt(spt); 674 } 675 } 676 677 static int ppgtt_handle_guest_write_page_table_bytes( 678 struct intel_vgpu_ppgtt_spt *spt, 679 u64 pa, void *p_data, int bytes); 680 681 static int ppgtt_write_protection_handler( 682 struct intel_vgpu_page_track *page_track, 683 u64 gpa, void *data, int bytes) 684 { 685 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data; 686 687 int ret; 688 689 if (bytes != 4 && bytes != 8) 690 return -EINVAL; 691 692 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes); 693 if (ret) 694 return ret; 695 return ret; 696 } 697 698 /* Find a spt by guest gfn. */ 699 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn( 700 struct intel_vgpu *vgpu, unsigned long gfn) 701 { 702 struct intel_vgpu_page_track *track; 703 704 track = intel_vgpu_find_page_track(vgpu, gfn); 705 if (track && track->handler == ppgtt_write_protection_handler) 706 return track->priv_data; 707 708 return NULL; 709 } 710 711 /* Find the spt by shadow page mfn. */ 712 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( 713 struct intel_vgpu *vgpu, unsigned long mfn) 714 { 715 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn); 716 } 717 718 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); 719 720 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( 721 struct intel_vgpu *vgpu, int type, unsigned long gfn) 722 { 723 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; 724 struct intel_vgpu_ppgtt_spt *spt = NULL; 725 dma_addr_t daddr; 726 int ret; 727 728 retry: 729 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); 730 if (!spt) { 731 if (reclaim_one_ppgtt_mm(vgpu->gvt)) 732 goto retry; 733 734 gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); 735 return ERR_PTR(-ENOMEM); 736 } 737 738 spt->vgpu = vgpu; 739 atomic_set(&spt->refcount, 1); 740 INIT_LIST_HEAD(&spt->post_shadow_list); 741 742 /* 743 * Init shadow_page. 744 */ 745 spt->shadow_page.type = type; 746 daddr = dma_map_page(kdev, spt->shadow_page.page, 747 0, 4096, PCI_DMA_BIDIRECTIONAL); 748 if (dma_mapping_error(kdev, daddr)) { 749 gvt_vgpu_err("fail to map dma addr\n"); 750 ret = -EINVAL; 751 goto err_free_spt; 752 } 753 spt->shadow_page.vaddr = page_address(spt->shadow_page.page); 754 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; 755 756 /* 757 * Init guest_page. 758 */ 759 spt->guest_page.type = type; 760 spt->guest_page.gfn = gfn; 761 762 ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn, 763 ppgtt_write_protection_handler, spt); 764 if (ret) 765 goto err_unmap_dma; 766 767 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); 768 if (ret) 769 goto err_unreg_page_track; 770 771 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); 772 return spt; 773 774 err_unreg_page_track: 775 intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn); 776 err_unmap_dma: 777 dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 778 err_free_spt: 779 free_spt(spt); 780 return ERR_PTR(ret); 781 } 782 783 #define pt_entry_size_shift(spt) \ 784 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) 785 786 #define pt_entries(spt) \ 787 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) 788 789 #define for_each_present_guest_entry(spt, e, i) \ 790 for (i = 0; i < pt_entries(spt); i++) \ 791 if (!ppgtt_get_guest_entry(spt, e, i) && \ 792 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 793 794 #define for_each_present_shadow_entry(spt, e, i) \ 795 for (i = 0; i < pt_entries(spt); i++) \ 796 if (!ppgtt_get_shadow_entry(spt, e, i) && \ 797 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 798 799 static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) 800 { 801 int v = atomic_read(&spt->refcount); 802 803 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); 804 805 atomic_inc(&spt->refcount); 806 } 807 808 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); 809 810 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, 811 struct intel_gvt_gtt_entry *e) 812 { 813 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 814 struct intel_vgpu_ppgtt_spt *s; 815 intel_gvt_gtt_type_t cur_pt_type; 816 817 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); 818 819 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY 820 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 821 cur_pt_type = get_next_pt_type(e->type) + 1; 822 if (ops->get_pfn(e) == 823 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) 824 return 0; 825 } 826 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); 827 if (!s) { 828 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", 829 ops->get_pfn(e)); 830 return -ENXIO; 831 } 832 return ppgtt_invalidate_spt(s); 833 } 834 835 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, 836 struct intel_gvt_gtt_entry *entry) 837 { 838 struct intel_vgpu *vgpu = spt->vgpu; 839 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 840 unsigned long pfn; 841 int type; 842 843 pfn = ops->get_pfn(entry); 844 type = spt->shadow_page.type; 845 846 if (pfn == vgpu->gtt.scratch_pt[type].page_mfn) 847 return; 848 849 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); 850 } 851 852 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) 853 { 854 struct intel_vgpu *vgpu = spt->vgpu; 855 struct intel_gvt_gtt_entry e; 856 unsigned long index; 857 int ret; 858 int v = atomic_read(&spt->refcount); 859 860 trace_spt_change(spt->vgpu->id, "die", spt, 861 spt->guest_page.gfn, spt->shadow_page.type); 862 863 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); 864 865 if (atomic_dec_return(&spt->refcount) > 0) 866 return 0; 867 868 for_each_present_shadow_entry(spt, &e, index) { 869 switch (e.type) { 870 case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 871 gvt_vdbg_mm("invalidate 4K entry\n"); 872 ppgtt_invalidate_pte(spt, &e); 873 break; 874 case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 875 case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 876 WARN(1, "GVT doesn't support 2M/1GB page\n"); 877 continue; 878 case GTT_TYPE_PPGTT_PML4_ENTRY: 879 case GTT_TYPE_PPGTT_PDP_ENTRY: 880 case GTT_TYPE_PPGTT_PDE_ENTRY: 881 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n"); 882 ret = ppgtt_invalidate_spt_by_shadow_entry( 883 spt->vgpu, &e); 884 if (ret) 885 goto fail; 886 break; 887 default: 888 GEM_BUG_ON(1); 889 } 890 } 891 892 trace_spt_change(spt->vgpu->id, "release", spt, 893 spt->guest_page.gfn, spt->shadow_page.type); 894 ppgtt_free_spt(spt); 895 return 0; 896 fail: 897 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", 898 spt, e.val64, e.type); 899 return ret; 900 } 901 902 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); 903 904 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( 905 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) 906 { 907 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 908 struct intel_vgpu_ppgtt_spt *spt = NULL; 909 int ret; 910 911 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); 912 913 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); 914 if (spt) 915 ppgtt_get_spt(spt); 916 else { 917 int type = get_next_pt_type(we->type); 918 919 spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we)); 920 if (IS_ERR(spt)) { 921 ret = PTR_ERR(spt); 922 goto fail; 923 } 924 925 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); 926 if (ret) 927 goto fail; 928 929 ret = ppgtt_populate_spt(spt); 930 if (ret) 931 goto fail; 932 933 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, 934 spt->shadow_page.type); 935 } 936 return spt; 937 fail: 938 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 939 spt, we->val64, we->type); 940 return ERR_PTR(ret); 941 } 942 943 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, 944 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge) 945 { 946 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; 947 948 se->type = ge->type; 949 se->val64 = ge->val64; 950 951 ops->set_pfn(se, s->shadow_page.mfn); 952 } 953 954 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, 955 struct intel_vgpu_ppgtt_spt *spt, unsigned long index, 956 struct intel_gvt_gtt_entry *ge) 957 { 958 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 959 struct intel_gvt_gtt_entry se = *ge; 960 unsigned long gfn; 961 dma_addr_t dma_addr; 962 int ret; 963 964 if (!pte_ops->test_present(ge)) 965 return 0; 966 967 gfn = pte_ops->get_pfn(ge); 968 969 switch (ge->type) { 970 case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 971 gvt_vdbg_mm("shadow 4K gtt entry\n"); 972 break; 973 case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 974 case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 975 gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n"); 976 return -EINVAL; 977 default: 978 GEM_BUG_ON(1); 979 }; 980 981 /* direct shadow */ 982 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr); 983 if (ret) 984 return -ENXIO; 985 986 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT); 987 ppgtt_set_shadow_entry(spt, &se, index); 988 return 0; 989 } 990 991 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) 992 { 993 struct intel_vgpu *vgpu = spt->vgpu; 994 struct intel_gvt *gvt = vgpu->gvt; 995 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 996 struct intel_vgpu_ppgtt_spt *s; 997 struct intel_gvt_gtt_entry se, ge; 998 unsigned long gfn, i; 999 int ret; 1000 1001 trace_spt_change(spt->vgpu->id, "born", spt, 1002 spt->guest_page.gfn, spt->shadow_page.type); 1003 1004 for_each_present_guest_entry(spt, &ge, i) { 1005 if (gtt_type_is_pt(get_next_pt_type(ge.type))) { 1006 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); 1007 if (IS_ERR(s)) { 1008 ret = PTR_ERR(s); 1009 goto fail; 1010 } 1011 ppgtt_get_shadow_entry(spt, &se, i); 1012 ppgtt_generate_shadow_entry(&se, s, &ge); 1013 ppgtt_set_shadow_entry(spt, &se, i); 1014 } else { 1015 gfn = ops->get_pfn(&ge); 1016 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { 1017 ops->set_pfn(&se, gvt->gtt.scratch_mfn); 1018 ppgtt_set_shadow_entry(spt, &se, i); 1019 continue; 1020 } 1021 1022 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge); 1023 if (ret) 1024 goto fail; 1025 } 1026 } 1027 return 0; 1028 fail: 1029 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1030 spt, ge.val64, ge.type); 1031 return ret; 1032 } 1033 1034 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, 1035 struct intel_gvt_gtt_entry *se, unsigned long index) 1036 { 1037 struct intel_vgpu *vgpu = spt->vgpu; 1038 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1039 int ret; 1040 1041 trace_spt_guest_change(spt->vgpu->id, "remove", spt, 1042 spt->shadow_page.type, se->val64, index); 1043 1044 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n", 1045 se->type, index, se->val64); 1046 1047 if (!ops->test_present(se)) 1048 return 0; 1049 1050 if (ops->get_pfn(se) == 1051 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn) 1052 return 0; 1053 1054 if (gtt_type_is_pt(get_next_pt_type(se->type))) { 1055 struct intel_vgpu_ppgtt_spt *s = 1056 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se)); 1057 if (!s) { 1058 gvt_vgpu_err("fail to find guest page\n"); 1059 ret = -ENXIO; 1060 goto fail; 1061 } 1062 ret = ppgtt_invalidate_spt(s); 1063 if (ret) 1064 goto fail; 1065 } else 1066 ppgtt_invalidate_pte(spt, se); 1067 1068 return 0; 1069 fail: 1070 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1071 spt, se->val64, se->type); 1072 return ret; 1073 } 1074 1075 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt, 1076 struct intel_gvt_gtt_entry *we, unsigned long index) 1077 { 1078 struct intel_vgpu *vgpu = spt->vgpu; 1079 struct intel_gvt_gtt_entry m; 1080 struct intel_vgpu_ppgtt_spt *s; 1081 int ret; 1082 1083 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type, 1084 we->val64, index); 1085 1086 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n", 1087 we->type, index, we->val64); 1088 1089 if (gtt_type_is_pt(get_next_pt_type(we->type))) { 1090 s = ppgtt_populate_spt_by_guest_entry(vgpu, we); 1091 if (IS_ERR(s)) { 1092 ret = PTR_ERR(s); 1093 goto fail; 1094 } 1095 ppgtt_get_shadow_entry(spt, &m, index); 1096 ppgtt_generate_shadow_entry(&m, s, we); 1097 ppgtt_set_shadow_entry(spt, &m, index); 1098 } else { 1099 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we); 1100 if (ret) 1101 goto fail; 1102 } 1103 return 0; 1104 fail: 1105 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", 1106 spt, we->val64, we->type); 1107 return ret; 1108 } 1109 1110 static int sync_oos_page(struct intel_vgpu *vgpu, 1111 struct intel_vgpu_oos_page *oos_page) 1112 { 1113 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1114 struct intel_gvt *gvt = vgpu->gvt; 1115 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1116 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; 1117 struct intel_gvt_gtt_entry old, new; 1118 int index; 1119 int ret; 1120 1121 trace_oos_change(vgpu->id, "sync", oos_page->id, 1122 spt, spt->guest_page.type); 1123 1124 old.type = new.type = get_entry_type(spt->guest_page.type); 1125 old.val64 = new.val64 = 0; 1126 1127 for (index = 0; index < (I915_GTT_PAGE_SIZE >> 1128 info->gtt_entry_size_shift); index++) { 1129 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); 1130 ops->get_entry(NULL, &new, index, true, 1131 spt->guest_page.gfn << PAGE_SHIFT, vgpu); 1132 1133 if (old.val64 == new.val64 1134 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) 1135 continue; 1136 1137 trace_oos_sync(vgpu->id, oos_page->id, 1138 spt, spt->guest_page.type, 1139 new.val64, index); 1140 1141 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new); 1142 if (ret) 1143 return ret; 1144 1145 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); 1146 } 1147 1148 spt->guest_page.write_cnt = 0; 1149 list_del_init(&spt->post_shadow_list); 1150 return 0; 1151 } 1152 1153 static int detach_oos_page(struct intel_vgpu *vgpu, 1154 struct intel_vgpu_oos_page *oos_page) 1155 { 1156 struct intel_gvt *gvt = vgpu->gvt; 1157 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; 1158 1159 trace_oos_change(vgpu->id, "detach", oos_page->id, 1160 spt, spt->guest_page.type); 1161 1162 spt->guest_page.write_cnt = 0; 1163 spt->guest_page.oos_page = NULL; 1164 oos_page->spt = NULL; 1165 1166 list_del_init(&oos_page->vm_list); 1167 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); 1168 1169 return 0; 1170 } 1171 1172 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, 1173 struct intel_vgpu_ppgtt_spt *spt) 1174 { 1175 struct intel_gvt *gvt = spt->vgpu->gvt; 1176 int ret; 1177 1178 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu, 1179 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 1180 oos_page->mem, I915_GTT_PAGE_SIZE); 1181 if (ret) 1182 return ret; 1183 1184 oos_page->spt = spt; 1185 spt->guest_page.oos_page = oos_page; 1186 1187 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); 1188 1189 trace_oos_change(spt->vgpu->id, "attach", oos_page->id, 1190 spt, spt->guest_page.type); 1191 return 0; 1192 } 1193 1194 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt) 1195 { 1196 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1197 int ret; 1198 1199 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn); 1200 if (ret) 1201 return ret; 1202 1203 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id, 1204 spt, spt->guest_page.type); 1205 1206 list_del_init(&oos_page->vm_list); 1207 return sync_oos_page(spt->vgpu, oos_page); 1208 } 1209 1210 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt) 1211 { 1212 struct intel_gvt *gvt = spt->vgpu->gvt; 1213 struct intel_gvt_gtt *gtt = &gvt->gtt; 1214 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1215 int ret; 1216 1217 WARN(oos_page, "shadow PPGTT page has already has a oos page\n"); 1218 1219 if (list_empty(>t->oos_page_free_list_head)) { 1220 oos_page = container_of(gtt->oos_page_use_list_head.next, 1221 struct intel_vgpu_oos_page, list); 1222 ret = ppgtt_set_guest_page_sync(oos_page->spt); 1223 if (ret) 1224 return ret; 1225 ret = detach_oos_page(spt->vgpu, oos_page); 1226 if (ret) 1227 return ret; 1228 } else 1229 oos_page = container_of(gtt->oos_page_free_list_head.next, 1230 struct intel_vgpu_oos_page, list); 1231 return attach_oos_page(oos_page, spt); 1232 } 1233 1234 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt) 1235 { 1236 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1237 1238 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) 1239 return -EINVAL; 1240 1241 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id, 1242 spt, spt->guest_page.type); 1243 1244 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head); 1245 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn); 1246 } 1247 1248 /** 1249 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU 1250 * @vgpu: a vGPU 1251 * 1252 * This function is called before submitting a guest workload to host, 1253 * to sync all the out-of-synced shadow for vGPU 1254 * 1255 * Returns: 1256 * Zero on success, negative error code if failed. 1257 */ 1258 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) 1259 { 1260 struct list_head *pos, *n; 1261 struct intel_vgpu_oos_page *oos_page; 1262 int ret; 1263 1264 if (!enable_out_of_sync) 1265 return 0; 1266 1267 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { 1268 oos_page = container_of(pos, 1269 struct intel_vgpu_oos_page, vm_list); 1270 ret = ppgtt_set_guest_page_sync(oos_page->spt); 1271 if (ret) 1272 return ret; 1273 } 1274 return 0; 1275 } 1276 1277 /* 1278 * The heart of PPGTT shadow page table. 1279 */ 1280 static int ppgtt_handle_guest_write_page_table( 1281 struct intel_vgpu_ppgtt_spt *spt, 1282 struct intel_gvt_gtt_entry *we, unsigned long index) 1283 { 1284 struct intel_vgpu *vgpu = spt->vgpu; 1285 int type = spt->shadow_page.type; 1286 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1287 struct intel_gvt_gtt_entry old_se; 1288 int new_present; 1289 int ret; 1290 1291 new_present = ops->test_present(we); 1292 1293 /* 1294 * Adding the new entry first and then removing the old one, that can 1295 * guarantee the ppgtt table is validated during the window between 1296 * adding and removal. 1297 */ 1298 ppgtt_get_shadow_entry(spt, &old_se, index); 1299 1300 if (new_present) { 1301 ret = ppgtt_handle_guest_entry_add(spt, we, index); 1302 if (ret) 1303 goto fail; 1304 } 1305 1306 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index); 1307 if (ret) 1308 goto fail; 1309 1310 if (!new_present) { 1311 ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn); 1312 ppgtt_set_shadow_entry(spt, &old_se, index); 1313 } 1314 1315 return 0; 1316 fail: 1317 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", 1318 spt, we->val64, we->type); 1319 return ret; 1320 } 1321 1322 1323 1324 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt) 1325 { 1326 return enable_out_of_sync 1327 && gtt_type_is_pte_pt(spt->guest_page.type) 1328 && spt->guest_page.write_cnt >= 2; 1329 } 1330 1331 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt, 1332 unsigned long index) 1333 { 1334 set_bit(index, spt->post_shadow_bitmap); 1335 if (!list_empty(&spt->post_shadow_list)) 1336 return; 1337 1338 list_add_tail(&spt->post_shadow_list, 1339 &spt->vgpu->gtt.post_shadow_list_head); 1340 } 1341 1342 /** 1343 * intel_vgpu_flush_post_shadow - flush the post shadow transactions 1344 * @vgpu: a vGPU 1345 * 1346 * This function is called before submitting a guest workload to host, 1347 * to flush all the post shadows for a vGPU. 1348 * 1349 * Returns: 1350 * Zero on success, negative error code if failed. 1351 */ 1352 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) 1353 { 1354 struct list_head *pos, *n; 1355 struct intel_vgpu_ppgtt_spt *spt; 1356 struct intel_gvt_gtt_entry ge; 1357 unsigned long index; 1358 int ret; 1359 1360 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) { 1361 spt = container_of(pos, struct intel_vgpu_ppgtt_spt, 1362 post_shadow_list); 1363 1364 for_each_set_bit(index, spt->post_shadow_bitmap, 1365 GTT_ENTRY_NUM_IN_ONE_PAGE) { 1366 ppgtt_get_guest_entry(spt, &ge, index); 1367 1368 ret = ppgtt_handle_guest_write_page_table(spt, 1369 &ge, index); 1370 if (ret) 1371 return ret; 1372 clear_bit(index, spt->post_shadow_bitmap); 1373 } 1374 list_del_init(&spt->post_shadow_list); 1375 } 1376 return 0; 1377 } 1378 1379 static int ppgtt_handle_guest_write_page_table_bytes( 1380 struct intel_vgpu_ppgtt_spt *spt, 1381 u64 pa, void *p_data, int bytes) 1382 { 1383 struct intel_vgpu *vgpu = spt->vgpu; 1384 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1385 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1386 struct intel_gvt_gtt_entry we, se; 1387 unsigned long index; 1388 int ret; 1389 1390 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; 1391 1392 ppgtt_get_guest_entry(spt, &we, index); 1393 1394 ops->test_pse(&we); 1395 1396 if (bytes == info->gtt_entry_size) { 1397 ret = ppgtt_handle_guest_write_page_table(spt, &we, index); 1398 if (ret) 1399 return ret; 1400 } else { 1401 if (!test_bit(index, spt->post_shadow_bitmap)) { 1402 int type = spt->shadow_page.type; 1403 1404 ppgtt_get_shadow_entry(spt, &se, index); 1405 ret = ppgtt_handle_guest_entry_removal(spt, &se, index); 1406 if (ret) 1407 return ret; 1408 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); 1409 ppgtt_set_shadow_entry(spt, &se, index); 1410 } 1411 ppgtt_set_post_shadow(spt, index); 1412 } 1413 1414 if (!enable_out_of_sync) 1415 return 0; 1416 1417 spt->guest_page.write_cnt++; 1418 1419 if (spt->guest_page.oos_page) 1420 ops->set_entry(spt->guest_page.oos_page->mem, &we, index, 1421 false, 0, vgpu); 1422 1423 if (can_do_out_of_sync(spt)) { 1424 if (!spt->guest_page.oos_page) 1425 ppgtt_allocate_oos_page(spt); 1426 1427 ret = ppgtt_set_guest_page_oos(spt); 1428 if (ret < 0) 1429 return ret; 1430 } 1431 return 0; 1432 } 1433 1434 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) 1435 { 1436 struct intel_vgpu *vgpu = mm->vgpu; 1437 struct intel_gvt *gvt = vgpu->gvt; 1438 struct intel_gvt_gtt *gtt = &gvt->gtt; 1439 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1440 struct intel_gvt_gtt_entry se; 1441 int index; 1442 1443 if (!mm->ppgtt_mm.shadowed) 1444 return; 1445 1446 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) { 1447 ppgtt_get_shadow_root_entry(mm, &se, index); 1448 1449 if (!ops->test_present(&se)) 1450 continue; 1451 1452 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se); 1453 se.val64 = 0; 1454 ppgtt_set_shadow_root_entry(mm, &se, index); 1455 1456 trace_spt_guest_change(vgpu->id, "destroy root pointer", 1457 NULL, se.type, se.val64, index); 1458 } 1459 1460 mm->ppgtt_mm.shadowed = false; 1461 } 1462 1463 1464 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) 1465 { 1466 struct intel_vgpu *vgpu = mm->vgpu; 1467 struct intel_gvt *gvt = vgpu->gvt; 1468 struct intel_gvt_gtt *gtt = &gvt->gtt; 1469 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1470 struct intel_vgpu_ppgtt_spt *spt; 1471 struct intel_gvt_gtt_entry ge, se; 1472 int index, ret; 1473 1474 if (mm->ppgtt_mm.shadowed) 1475 return 0; 1476 1477 mm->ppgtt_mm.shadowed = true; 1478 1479 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { 1480 ppgtt_get_guest_root_entry(mm, &ge, index); 1481 1482 if (!ops->test_present(&ge)) 1483 continue; 1484 1485 trace_spt_guest_change(vgpu->id, __func__, NULL, 1486 ge.type, ge.val64, index); 1487 1488 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); 1489 if (IS_ERR(spt)) { 1490 gvt_vgpu_err("fail to populate guest root pointer\n"); 1491 ret = PTR_ERR(spt); 1492 goto fail; 1493 } 1494 ppgtt_generate_shadow_entry(&se, spt, &ge); 1495 ppgtt_set_shadow_root_entry(mm, &se, index); 1496 1497 trace_spt_guest_change(vgpu->id, "populate root pointer", 1498 NULL, se.type, se.val64, index); 1499 } 1500 1501 return 0; 1502 fail: 1503 invalidate_ppgtt_mm(mm); 1504 return ret; 1505 } 1506 1507 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu) 1508 { 1509 struct intel_vgpu_mm *mm; 1510 1511 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 1512 if (!mm) 1513 return NULL; 1514 1515 mm->vgpu = vgpu; 1516 kref_init(&mm->ref); 1517 atomic_set(&mm->pincount, 0); 1518 1519 return mm; 1520 } 1521 1522 static void vgpu_free_mm(struct intel_vgpu_mm *mm) 1523 { 1524 kfree(mm); 1525 } 1526 1527 /** 1528 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU 1529 * @vgpu: a vGPU 1530 * @root_entry_type: ppgtt root entry type 1531 * @pdps: guest pdps. 1532 * 1533 * This function is used to create a ppgtt mm object for a vGPU. 1534 * 1535 * Returns: 1536 * Zero on success, negative error code in pointer if failed. 1537 */ 1538 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 1539 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) 1540 { 1541 struct intel_gvt *gvt = vgpu->gvt; 1542 struct intel_vgpu_mm *mm; 1543 int ret; 1544 1545 mm = vgpu_alloc_mm(vgpu); 1546 if (!mm) 1547 return ERR_PTR(-ENOMEM); 1548 1549 mm->type = INTEL_GVT_MM_PPGTT; 1550 1551 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY && 1552 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY); 1553 mm->ppgtt_mm.root_entry_type = root_entry_type; 1554 1555 INIT_LIST_HEAD(&mm->ppgtt_mm.list); 1556 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list); 1557 1558 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 1559 mm->ppgtt_mm.guest_pdps[0] = pdps[0]; 1560 else 1561 memcpy(mm->ppgtt_mm.guest_pdps, pdps, 1562 sizeof(mm->ppgtt_mm.guest_pdps)); 1563 1564 ret = shadow_ppgtt_mm(mm); 1565 if (ret) { 1566 gvt_vgpu_err("failed to shadow ppgtt mm\n"); 1567 vgpu_free_mm(mm); 1568 return ERR_PTR(ret); 1569 } 1570 1571 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); 1572 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); 1573 return mm; 1574 } 1575 1576 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) 1577 { 1578 struct intel_vgpu_mm *mm; 1579 unsigned long nr_entries; 1580 1581 mm = vgpu_alloc_mm(vgpu); 1582 if (!mm) 1583 return ERR_PTR(-ENOMEM); 1584 1585 mm->type = INTEL_GVT_MM_GGTT; 1586 1587 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; 1588 mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries * 1589 vgpu->gvt->device_info.gtt_entry_size); 1590 if (!mm->ggtt_mm.virtual_ggtt) { 1591 vgpu_free_mm(mm); 1592 return ERR_PTR(-ENOMEM); 1593 } 1594 1595 return mm; 1596 } 1597 1598 /** 1599 * _intel_vgpu_mm_release - destroy a mm object 1600 * @mm_ref: a kref object 1601 * 1602 * This function is used to destroy a mm object for vGPU 1603 * 1604 */ 1605 void _intel_vgpu_mm_release(struct kref *mm_ref) 1606 { 1607 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref); 1608 1609 if (GEM_WARN_ON(atomic_read(&mm->pincount))) 1610 gvt_err("vgpu mm pin count bug detected\n"); 1611 1612 if (mm->type == INTEL_GVT_MM_PPGTT) { 1613 list_del(&mm->ppgtt_mm.list); 1614 list_del(&mm->ppgtt_mm.lru_list); 1615 invalidate_ppgtt_mm(mm); 1616 } else { 1617 vfree(mm->ggtt_mm.virtual_ggtt); 1618 } 1619 1620 vgpu_free_mm(mm); 1621 } 1622 1623 /** 1624 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object 1625 * @mm: a vGPU mm object 1626 * 1627 * This function is called when user doesn't want to use a vGPU mm object 1628 */ 1629 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1630 { 1631 atomic_dec(&mm->pincount); 1632 } 1633 1634 /** 1635 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object 1636 * @vgpu: a vGPU 1637 * 1638 * This function is called when user wants to use a vGPU mm object. If this 1639 * mm object hasn't been shadowed yet, the shadow will be populated at this 1640 * time. 1641 * 1642 * Returns: 1643 * Zero on success, negative error code if failed. 1644 */ 1645 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) 1646 { 1647 int ret; 1648 1649 atomic_inc(&mm->pincount); 1650 1651 if (mm->type == INTEL_GVT_MM_PPGTT) { 1652 ret = shadow_ppgtt_mm(mm); 1653 if (ret) 1654 return ret; 1655 1656 list_move_tail(&mm->ppgtt_mm.lru_list, 1657 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); 1658 1659 } 1660 1661 return 0; 1662 } 1663 1664 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) 1665 { 1666 struct intel_vgpu_mm *mm; 1667 struct list_head *pos, *n; 1668 1669 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { 1670 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); 1671 1672 if (atomic_read(&mm->pincount)) 1673 continue; 1674 1675 list_del_init(&mm->ppgtt_mm.lru_list); 1676 invalidate_ppgtt_mm(mm); 1677 return 1; 1678 } 1679 return 0; 1680 } 1681 1682 /* 1683 * GMA translation APIs. 1684 */ 1685 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, 1686 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) 1687 { 1688 struct intel_vgpu *vgpu = mm->vgpu; 1689 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1690 struct intel_vgpu_ppgtt_spt *s; 1691 1692 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); 1693 if (!s) 1694 return -ENXIO; 1695 1696 if (!guest) 1697 ppgtt_get_shadow_entry(s, e, index); 1698 else 1699 ppgtt_get_guest_entry(s, e, index); 1700 return 0; 1701 } 1702 1703 /** 1704 * intel_vgpu_gma_to_gpa - translate a gma to GPA 1705 * @mm: mm object. could be a PPGTT or GGTT mm object 1706 * @gma: graphics memory address in this mm object 1707 * 1708 * This function is used to translate a graphics memory address in specific 1709 * graphics memory space to guest physical address. 1710 * 1711 * Returns: 1712 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed. 1713 */ 1714 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) 1715 { 1716 struct intel_vgpu *vgpu = mm->vgpu; 1717 struct intel_gvt *gvt = vgpu->gvt; 1718 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; 1719 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; 1720 unsigned long gpa = INTEL_GVT_INVALID_ADDR; 1721 unsigned long gma_index[4]; 1722 struct intel_gvt_gtt_entry e; 1723 int i, levels = 0; 1724 int ret; 1725 1726 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT && 1727 mm->type != INTEL_GVT_MM_PPGTT); 1728 1729 if (mm->type == INTEL_GVT_MM_GGTT) { 1730 if (!vgpu_gmadr_is_valid(vgpu, gma)) 1731 goto err; 1732 1733 ggtt_get_guest_entry(mm, &e, 1734 gma_ops->gma_to_ggtt_pte_index(gma)); 1735 1736 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) 1737 + (gma & ~I915_GTT_PAGE_MASK); 1738 1739 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); 1740 } else { 1741 switch (mm->ppgtt_mm.root_entry_type) { 1742 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: 1743 ppgtt_get_shadow_root_entry(mm, &e, 0); 1744 1745 gma_index[0] = gma_ops->gma_to_pml4_index(gma); 1746 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); 1747 gma_index[2] = gma_ops->gma_to_pde_index(gma); 1748 gma_index[3] = gma_ops->gma_to_pte_index(gma); 1749 levels = 4; 1750 break; 1751 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: 1752 ppgtt_get_shadow_root_entry(mm, &e, 1753 gma_ops->gma_to_l3_pdp_index(gma)); 1754 1755 gma_index[0] = gma_ops->gma_to_pde_index(gma); 1756 gma_index[1] = gma_ops->gma_to_pte_index(gma); 1757 levels = 2; 1758 break; 1759 default: 1760 GEM_BUG_ON(1); 1761 } 1762 1763 /* walk the shadow page table and get gpa from guest entry */ 1764 for (i = 0; i < levels; i++) { 1765 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i], 1766 (i == levels - 1)); 1767 if (ret) 1768 goto err; 1769 1770 if (!pte_ops->test_present(&e)) { 1771 gvt_dbg_core("GMA 0x%lx is not present\n", gma); 1772 goto err; 1773 } 1774 } 1775 1776 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + 1777 (gma & ~I915_GTT_PAGE_MASK); 1778 trace_gma_translate(vgpu->id, "ppgtt", 0, 1779 mm->ppgtt_mm.root_entry_type, gma, gpa); 1780 } 1781 1782 return gpa; 1783 err: 1784 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1785 return INTEL_GVT_INVALID_ADDR; 1786 } 1787 1788 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, 1789 unsigned int off, void *p_data, unsigned int bytes) 1790 { 1791 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 1792 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1793 unsigned long index = off >> info->gtt_entry_size_shift; 1794 struct intel_gvt_gtt_entry e; 1795 1796 if (bytes != 4 && bytes != 8) 1797 return -EINVAL; 1798 1799 ggtt_get_guest_entry(ggtt_mm, &e, index); 1800 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), 1801 bytes); 1802 return 0; 1803 } 1804 1805 /** 1806 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read 1807 * @vgpu: a vGPU 1808 * @off: register offset 1809 * @p_data: data will be returned to guest 1810 * @bytes: data length 1811 * 1812 * This function is used to emulate the GTT MMIO register read 1813 * 1814 * Returns: 1815 * Zero on success, error code if failed. 1816 */ 1817 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, 1818 void *p_data, unsigned int bytes) 1819 { 1820 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1821 int ret; 1822 1823 if (bytes != 4 && bytes != 8) 1824 return -EINVAL; 1825 1826 off -= info->gtt_start_offset; 1827 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes); 1828 return ret; 1829 } 1830 1831 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, 1832 struct intel_gvt_gtt_entry *entry) 1833 { 1834 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 1835 unsigned long pfn; 1836 1837 pfn = pte_ops->get_pfn(entry); 1838 if (pfn != vgpu->gvt->gtt.scratch_mfn) 1839 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, 1840 pfn << PAGE_SHIFT); 1841 } 1842 1843 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 1844 void *p_data, unsigned int bytes) 1845 { 1846 struct intel_gvt *gvt = vgpu->gvt; 1847 const struct intel_gvt_device_info *info = &gvt->device_info; 1848 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 1849 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1850 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; 1851 unsigned long gma, gfn; 1852 struct intel_gvt_gtt_entry e, m; 1853 dma_addr_t dma_addr; 1854 int ret; 1855 1856 if (bytes != 4 && bytes != 8) 1857 return -EINVAL; 1858 1859 gma = g_gtt_index << I915_GTT_PAGE_SHIFT; 1860 1861 /* the VM may configure the whole GM space when ballooning is used */ 1862 if (!vgpu_gmadr_is_valid(vgpu, gma)) 1863 return 0; 1864 1865 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 1866 1867 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 1868 bytes); 1869 1870 if (ops->test_present(&e)) { 1871 gfn = ops->get_pfn(&e); 1872 m = e; 1873 1874 /* one PTE update may be issued in multiple writes and the 1875 * first write may not construct a valid gfn 1876 */ 1877 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { 1878 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1879 goto out; 1880 } 1881 1882 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, 1883 &dma_addr); 1884 if (ret) { 1885 gvt_vgpu_err("fail to populate guest ggtt entry\n"); 1886 /* guest driver may read/write the entry when partial 1887 * update the entry in this situation p2m will fail 1888 * settting the shadow entry to point to a scratch page 1889 */ 1890 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1891 } else 1892 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 1893 } else { 1894 ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); 1895 ggtt_invalidate_pte(vgpu, &m); 1896 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 1897 ops->clear_present(&m); 1898 } 1899 1900 out: 1901 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); 1902 ggtt_invalidate(gvt->dev_priv); 1903 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); 1904 return 0; 1905 } 1906 1907 /* 1908 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write 1909 * @vgpu: a vGPU 1910 * @off: register offset 1911 * @p_data: data from guest write 1912 * @bytes: data length 1913 * 1914 * This function is used to emulate the GTT MMIO register write 1915 * 1916 * Returns: 1917 * Zero on success, error code if failed. 1918 */ 1919 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, 1920 unsigned int off, void *p_data, unsigned int bytes) 1921 { 1922 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1923 int ret; 1924 1925 if (bytes != 4 && bytes != 8) 1926 return -EINVAL; 1927 1928 off -= info->gtt_start_offset; 1929 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes); 1930 return ret; 1931 } 1932 1933 static int alloc_scratch_pages(struct intel_vgpu *vgpu, 1934 intel_gvt_gtt_type_t type) 1935 { 1936 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 1937 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1938 int page_entry_num = I915_GTT_PAGE_SIZE >> 1939 vgpu->gvt->device_info.gtt_entry_size_shift; 1940 void *scratch_pt; 1941 int i; 1942 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 1943 dma_addr_t daddr; 1944 1945 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 1946 return -EINVAL; 1947 1948 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1949 if (!scratch_pt) { 1950 gvt_vgpu_err("fail to allocate scratch page\n"); 1951 return -ENOMEM; 1952 } 1953 1954 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 1955 4096, PCI_DMA_BIDIRECTIONAL); 1956 if (dma_mapping_error(dev, daddr)) { 1957 gvt_vgpu_err("fail to dmamap scratch_pt\n"); 1958 __free_page(virt_to_page(scratch_pt)); 1959 return -ENOMEM; 1960 } 1961 gtt->scratch_pt[type].page_mfn = 1962 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); 1963 gtt->scratch_pt[type].page = virt_to_page(scratch_pt); 1964 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 1965 vgpu->id, type, gtt->scratch_pt[type].page_mfn); 1966 1967 /* Build the tree by full filled the scratch pt with the entries which 1968 * point to the next level scratch pt or scratch page. The 1969 * scratch_pt[type] indicate the scratch pt/scratch page used by the 1970 * 'type' pt. 1971 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 1972 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self 1973 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 1974 */ 1975 if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) { 1976 struct intel_gvt_gtt_entry se; 1977 1978 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); 1979 se.type = get_entry_type(type - 1); 1980 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); 1981 1982 /* The entry parameters like present/writeable/cache type 1983 * set to the same as i915's scratch page tree. 1984 */ 1985 se.val64 |= _PAGE_PRESENT | _PAGE_RW; 1986 if (type == GTT_TYPE_PPGTT_PDE_PT) 1987 se.val64 |= PPAT_CACHED; 1988 1989 for (i = 0; i < page_entry_num; i++) 1990 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); 1991 } 1992 1993 return 0; 1994 } 1995 1996 static int release_scratch_page_tree(struct intel_vgpu *vgpu) 1997 { 1998 int i; 1999 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 2000 dma_addr_t daddr; 2001 2002 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2003 if (vgpu->gtt.scratch_pt[i].page != NULL) { 2004 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << 2005 I915_GTT_PAGE_SHIFT); 2006 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2007 __free_page(vgpu->gtt.scratch_pt[i].page); 2008 vgpu->gtt.scratch_pt[i].page = NULL; 2009 vgpu->gtt.scratch_pt[i].page_mfn = 0; 2010 } 2011 } 2012 2013 return 0; 2014 } 2015 2016 static int create_scratch_page_tree(struct intel_vgpu *vgpu) 2017 { 2018 int i, ret; 2019 2020 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2021 ret = alloc_scratch_pages(vgpu, i); 2022 if (ret) 2023 goto err; 2024 } 2025 2026 return 0; 2027 2028 err: 2029 release_scratch_page_tree(vgpu); 2030 return ret; 2031 } 2032 2033 /** 2034 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization 2035 * @vgpu: a vGPU 2036 * 2037 * This function is used to initialize per-vGPU graphics memory virtualization 2038 * components. 2039 * 2040 * Returns: 2041 * Zero on success, error code if failed. 2042 */ 2043 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) 2044 { 2045 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 2046 2047 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL); 2048 2049 INIT_LIST_HEAD(>t->ppgtt_mm_list_head); 2050 INIT_LIST_HEAD(>t->oos_page_list_head); 2051 INIT_LIST_HEAD(>t->post_shadow_list_head); 2052 2053 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu); 2054 if (IS_ERR(gtt->ggtt_mm)) { 2055 gvt_vgpu_err("fail to create mm for ggtt.\n"); 2056 return PTR_ERR(gtt->ggtt_mm); 2057 } 2058 2059 intel_vgpu_reset_ggtt(vgpu, false); 2060 2061 return create_scratch_page_tree(vgpu); 2062 } 2063 2064 static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) 2065 { 2066 struct list_head *pos, *n; 2067 struct intel_vgpu_mm *mm; 2068 2069 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2070 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2071 intel_vgpu_destroy_mm(mm); 2072 } 2073 2074 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head))) 2075 gvt_err("vgpu ppgtt mm is not fully destroyed\n"); 2076 2077 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) { 2078 gvt_err("Why we still has spt not freed?\n"); 2079 ppgtt_free_all_spt(vgpu); 2080 } 2081 } 2082 2083 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) 2084 { 2085 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); 2086 vgpu->gtt.ggtt_mm = NULL; 2087 } 2088 2089 /** 2090 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization 2091 * @vgpu: a vGPU 2092 * 2093 * This function is used to clean up per-vGPU graphics memory virtualization 2094 * components. 2095 * 2096 * Returns: 2097 * Zero on success, error code if failed. 2098 */ 2099 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) 2100 { 2101 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2102 intel_vgpu_destroy_ggtt_mm(vgpu); 2103 release_scratch_page_tree(vgpu); 2104 } 2105 2106 static void clean_spt_oos(struct intel_gvt *gvt) 2107 { 2108 struct intel_gvt_gtt *gtt = &gvt->gtt; 2109 struct list_head *pos, *n; 2110 struct intel_vgpu_oos_page *oos_page; 2111 2112 WARN(!list_empty(>t->oos_page_use_list_head), 2113 "someone is still using oos page\n"); 2114 2115 list_for_each_safe(pos, n, >t->oos_page_free_list_head) { 2116 oos_page = container_of(pos, struct intel_vgpu_oos_page, list); 2117 list_del(&oos_page->list); 2118 kfree(oos_page); 2119 } 2120 } 2121 2122 static int setup_spt_oos(struct intel_gvt *gvt) 2123 { 2124 struct intel_gvt_gtt *gtt = &gvt->gtt; 2125 struct intel_vgpu_oos_page *oos_page; 2126 int i; 2127 int ret; 2128 2129 INIT_LIST_HEAD(>t->oos_page_free_list_head); 2130 INIT_LIST_HEAD(>t->oos_page_use_list_head); 2131 2132 for (i = 0; i < preallocated_oos_pages; i++) { 2133 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2134 if (!oos_page) { 2135 ret = -ENOMEM; 2136 goto fail; 2137 } 2138 2139 INIT_LIST_HEAD(&oos_page->list); 2140 INIT_LIST_HEAD(&oos_page->vm_list); 2141 oos_page->id = i; 2142 list_add_tail(&oos_page->list, >t->oos_page_free_list_head); 2143 } 2144 2145 gvt_dbg_mm("%d oos pages preallocated\n", i); 2146 2147 return 0; 2148 fail: 2149 clean_spt_oos(gvt); 2150 return ret; 2151 } 2152 2153 /** 2154 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object 2155 * @vgpu: a vGPU 2156 * @page_table_level: PPGTT page table level 2157 * @root_entry: PPGTT page table root pointers 2158 * 2159 * This function is used to find a PPGTT mm object from mm object pool 2160 * 2161 * Returns: 2162 * pointer to mm object on success, NULL if failed. 2163 */ 2164 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, 2165 u64 pdps[]) 2166 { 2167 struct intel_vgpu_mm *mm; 2168 struct list_head *pos; 2169 2170 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) { 2171 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2172 2173 switch (mm->ppgtt_mm.root_entry_type) { 2174 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: 2175 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0]) 2176 return mm; 2177 break; 2178 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: 2179 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps, 2180 sizeof(mm->ppgtt_mm.guest_pdps))) 2181 return mm; 2182 break; 2183 default: 2184 GEM_BUG_ON(1); 2185 } 2186 } 2187 return NULL; 2188 } 2189 2190 /** 2191 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object. 2192 * @vgpu: a vGPU 2193 * @root_entry_type: ppgtt root entry type 2194 * @pdps: guest pdps 2195 * 2196 * This function is used to find or create a PPGTT mm object from a guest. 2197 * 2198 * Returns: 2199 * Zero on success, negative error code if failed. 2200 */ 2201 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, 2202 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) 2203 { 2204 struct intel_vgpu_mm *mm; 2205 2206 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 2207 if (mm) { 2208 intel_vgpu_mm_get(mm); 2209 } else { 2210 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps); 2211 if (IS_ERR(mm)) 2212 gvt_vgpu_err("fail to create mm\n"); 2213 } 2214 return mm; 2215 } 2216 2217 /** 2218 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object. 2219 * @vgpu: a vGPU 2220 * @pdps: guest pdps 2221 * 2222 * This function is used to find a PPGTT mm object from a guest and destroy it. 2223 * 2224 * Returns: 2225 * Zero on success, negative error code if failed. 2226 */ 2227 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]) 2228 { 2229 struct intel_vgpu_mm *mm; 2230 2231 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 2232 if (!mm) { 2233 gvt_vgpu_err("fail to find ppgtt instance.\n"); 2234 return -EINVAL; 2235 } 2236 intel_vgpu_mm_put(mm); 2237 return 0; 2238 } 2239 2240 /** 2241 * intel_gvt_init_gtt - initialize mm components of a GVT device 2242 * @gvt: GVT device 2243 * 2244 * This function is called at the initialization stage, to initialize 2245 * the mm components of a GVT device. 2246 * 2247 * Returns: 2248 * zero on success, negative error code if failed. 2249 */ 2250 int intel_gvt_init_gtt(struct intel_gvt *gvt) 2251 { 2252 int ret; 2253 void *page; 2254 struct device *dev = &gvt->dev_priv->drm.pdev->dev; 2255 dma_addr_t daddr; 2256 2257 gvt_dbg_core("init gtt\n"); 2258 2259 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) 2260 || IS_KABYLAKE(gvt->dev_priv)) { 2261 gvt->gtt.pte_ops = &gen8_gtt_pte_ops; 2262 gvt->gtt.gma_ops = &gen8_gtt_gma_ops; 2263 } else { 2264 return -ENODEV; 2265 } 2266 2267 page = (void *)get_zeroed_page(GFP_KERNEL); 2268 if (!page) { 2269 gvt_err("fail to allocate scratch ggtt page\n"); 2270 return -ENOMEM; 2271 } 2272 2273 daddr = dma_map_page(dev, virt_to_page(page), 0, 2274 4096, PCI_DMA_BIDIRECTIONAL); 2275 if (dma_mapping_error(dev, daddr)) { 2276 gvt_err("fail to dmamap scratch ggtt page\n"); 2277 __free_page(virt_to_page(page)); 2278 return -ENOMEM; 2279 } 2280 2281 gvt->gtt.scratch_page = virt_to_page(page); 2282 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); 2283 2284 if (enable_out_of_sync) { 2285 ret = setup_spt_oos(gvt); 2286 if (ret) { 2287 gvt_err("fail to initialize SPT oos\n"); 2288 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2289 __free_page(gvt->gtt.scratch_page); 2290 return ret; 2291 } 2292 } 2293 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); 2294 return 0; 2295 } 2296 2297 /** 2298 * intel_gvt_clean_gtt - clean up mm components of a GVT device 2299 * @gvt: GVT device 2300 * 2301 * This function is called at the driver unloading stage, to clean up the 2302 * the mm components of a GVT device. 2303 * 2304 */ 2305 void intel_gvt_clean_gtt(struct intel_gvt *gvt) 2306 { 2307 struct device *dev = &gvt->dev_priv->drm.pdev->dev; 2308 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << 2309 I915_GTT_PAGE_SHIFT); 2310 2311 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); 2312 2313 __free_page(gvt->gtt.scratch_page); 2314 2315 if (enable_out_of_sync) 2316 clean_spt_oos(gvt); 2317 } 2318 2319 /** 2320 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances 2321 * @vgpu: a vGPU 2322 * 2323 * This function is called when invalidate all PPGTT instances of a vGPU. 2324 * 2325 */ 2326 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) 2327 { 2328 struct list_head *pos, *n; 2329 struct intel_vgpu_mm *mm; 2330 2331 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2332 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2333 if (mm->type == INTEL_GVT_MM_PPGTT) { 2334 list_del_init(&mm->ppgtt_mm.lru_list); 2335 if (mm->ppgtt_mm.shadowed) 2336 invalidate_ppgtt_mm(mm); 2337 } 2338 } 2339 } 2340 2341 /** 2342 * intel_vgpu_reset_ggtt - reset the GGTT entry 2343 * @vgpu: a vGPU 2344 * @invalidate_old: invalidate old entries 2345 * 2346 * This function is called at the vGPU create stage 2347 * to reset all the GGTT entries. 2348 * 2349 */ 2350 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) 2351 { 2352 struct intel_gvt *gvt = vgpu->gvt; 2353 struct drm_i915_private *dev_priv = gvt->dev_priv; 2354 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2355 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; 2356 struct intel_gvt_gtt_entry old_entry; 2357 u32 index; 2358 u32 num_entries; 2359 2360 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn); 2361 pte_ops->set_present(&entry); 2362 2363 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2364 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2365 while (num_entries--) { 2366 if (invalidate_old) { 2367 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2368 ggtt_invalidate_pte(vgpu, &old_entry); 2369 } 2370 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2371 } 2372 2373 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2374 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2375 while (num_entries--) { 2376 if (invalidate_old) { 2377 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2378 ggtt_invalidate_pte(vgpu, &old_entry); 2379 } 2380 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2381 } 2382 2383 ggtt_invalidate(dev_priv); 2384 } 2385 2386 /** 2387 * intel_vgpu_reset_gtt - reset the all GTT related status 2388 * @vgpu: a vGPU 2389 * 2390 * This function is called from vfio core to reset reset all 2391 * GTT related status, including GGTT, PPGTT, scratch page. 2392 * 2393 */ 2394 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) 2395 { 2396 /* Shadow pages are only created when there is no page 2397 * table tracking data, so remove page tracking data after 2398 * removing the shadow pages. 2399 */ 2400 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2401 intel_vgpu_reset_ggtt(vgpu, true); 2402 } 2403