1 /* 2 * GTT virtualization 3 * 4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Zhi Wang <zhi.a.wang@intel.com> 27 * Zhenyu Wang <zhenyuw@linux.intel.com> 28 * Xiao Zheng <xiao.zheng@intel.com> 29 * 30 * Contributors: 31 * Min He <min.he@intel.com> 32 * Bing Niu <bing.niu@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gvt.h" 38 #include "i915_pvinfo.h" 39 #include "trace.h" 40 41 #include "gt/intel_gt_regs.h" 42 #include <linux/vmalloc.h> 43 44 #if defined(VERBOSE_DEBUG) 45 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) 46 #else 47 #define gvt_vdbg_mm(fmt, args...) 48 #endif 49 50 static bool enable_out_of_sync = false; 51 static int preallocated_oos_pages = 8192; 52 53 /* 54 * validate a gm address and related range size, 55 * translate it to host gm address 56 */ 57 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) 58 { 59 if (size == 0) 60 return vgpu_gmadr_is_valid(vgpu, addr); 61 62 if (vgpu_gmadr_is_aperture(vgpu, addr) && 63 vgpu_gmadr_is_aperture(vgpu, addr + size - 1)) 64 return true; 65 else if (vgpu_gmadr_is_hidden(vgpu, addr) && 66 vgpu_gmadr_is_hidden(vgpu, addr + size - 1)) 67 return true; 68 69 gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n", 70 addr, size); 71 return false; 72 } 73 74 /* translate a guest gmadr to host gmadr */ 75 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) 76 { 77 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; 78 79 if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), 80 "invalid guest gmadr %llx\n", g_addr)) 81 return -EACCES; 82 83 if (vgpu_gmadr_is_aperture(vgpu, g_addr)) 84 *h_addr = vgpu_aperture_gmadr_base(vgpu) 85 + (g_addr - vgpu_aperture_offset(vgpu)); 86 else 87 *h_addr = vgpu_hidden_gmadr_base(vgpu) 88 + (g_addr - vgpu_hidden_offset(vgpu)); 89 return 0; 90 } 91 92 /* translate a host gmadr to guest gmadr */ 93 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) 94 { 95 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; 96 97 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), 98 "invalid host gmadr %llx\n", h_addr)) 99 return -EACCES; 100 101 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) 102 *g_addr = vgpu_aperture_gmadr_base(vgpu) 103 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); 104 else 105 *g_addr = vgpu_hidden_gmadr_base(vgpu) 106 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); 107 return 0; 108 } 109 110 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, 111 unsigned long *h_index) 112 { 113 u64 h_addr; 114 int ret; 115 116 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT, 117 &h_addr); 118 if (ret) 119 return ret; 120 121 *h_index = h_addr >> I915_GTT_PAGE_SHIFT; 122 return 0; 123 } 124 125 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 126 unsigned long *g_index) 127 { 128 u64 g_addr; 129 int ret; 130 131 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT, 132 &g_addr); 133 if (ret) 134 return ret; 135 136 *g_index = g_addr >> I915_GTT_PAGE_SHIFT; 137 return 0; 138 } 139 140 #define gtt_type_is_entry(type) \ 141 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \ 142 && type != GTT_TYPE_PPGTT_PTE_ENTRY \ 143 && type != GTT_TYPE_PPGTT_ROOT_ENTRY) 144 145 #define gtt_type_is_pt(type) \ 146 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) 147 148 #define gtt_type_is_pte_pt(type) \ 149 (type == GTT_TYPE_PPGTT_PTE_PT) 150 151 #define gtt_type_is_root_pointer(type) \ 152 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY) 153 154 #define gtt_init_entry(e, t, p, v) do { \ 155 (e)->type = t; \ 156 (e)->pdev = p; \ 157 memcpy(&(e)->val64, &v, sizeof(v)); \ 158 } while (0) 159 160 /* 161 * Mappings between GTT_TYPE* enumerations. 162 * Following information can be found according to the given type: 163 * - type of next level page table 164 * - type of entry inside this level page table 165 * - type of entry with PSE set 166 * 167 * If the given type doesn't have such a kind of information, 168 * e.g. give a l4 root entry type, then request to get its PSE type, 169 * give a PTE page table type, then request to get its next level page 170 * table type, as we know l4 root entry doesn't have a PSE bit, 171 * and a PTE page table doesn't have a next level page table type, 172 * GTT_TYPE_INVALID will be returned. This is useful when traversing a 173 * page table. 174 */ 175 176 struct gtt_type_table_entry { 177 int entry_type; 178 int pt_type; 179 int next_pt_type; 180 int pse_entry_type; 181 }; 182 183 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \ 184 [type] = { \ 185 .entry_type = e_type, \ 186 .pt_type = cpt_type, \ 187 .next_pt_type = npt_type, \ 188 .pse_entry_type = pse_type, \ 189 } 190 191 static const struct gtt_type_table_entry gtt_type_table[] = { 192 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 193 GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 194 GTT_TYPE_INVALID, 195 GTT_TYPE_PPGTT_PML4_PT, 196 GTT_TYPE_INVALID), 197 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT, 198 GTT_TYPE_PPGTT_PML4_ENTRY, 199 GTT_TYPE_PPGTT_PML4_PT, 200 GTT_TYPE_PPGTT_PDP_PT, 201 GTT_TYPE_INVALID), 202 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY, 203 GTT_TYPE_PPGTT_PML4_ENTRY, 204 GTT_TYPE_PPGTT_PML4_PT, 205 GTT_TYPE_PPGTT_PDP_PT, 206 GTT_TYPE_INVALID), 207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT, 208 GTT_TYPE_PPGTT_PDP_ENTRY, 209 GTT_TYPE_PPGTT_PDP_PT, 210 GTT_TYPE_PPGTT_PDE_PT, 211 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 212 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 213 GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 214 GTT_TYPE_INVALID, 215 GTT_TYPE_PPGTT_PDE_PT, 216 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 217 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY, 218 GTT_TYPE_PPGTT_PDP_ENTRY, 219 GTT_TYPE_PPGTT_PDP_PT, 220 GTT_TYPE_PPGTT_PDE_PT, 221 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 222 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT, 223 GTT_TYPE_PPGTT_PDE_ENTRY, 224 GTT_TYPE_PPGTT_PDE_PT, 225 GTT_TYPE_PPGTT_PTE_PT, 226 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 227 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY, 228 GTT_TYPE_PPGTT_PDE_ENTRY, 229 GTT_TYPE_PPGTT_PDE_PT, 230 GTT_TYPE_PPGTT_PTE_PT, 231 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 232 /* We take IPS bit as 'PSE' for PTE level. */ 233 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT, 234 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 235 GTT_TYPE_PPGTT_PTE_PT, 236 GTT_TYPE_INVALID, 237 GTT_TYPE_PPGTT_PTE_64K_ENTRY), 238 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY, 239 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 240 GTT_TYPE_PPGTT_PTE_PT, 241 GTT_TYPE_INVALID, 242 GTT_TYPE_PPGTT_PTE_64K_ENTRY), 243 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY, 244 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 245 GTT_TYPE_PPGTT_PTE_PT, 246 GTT_TYPE_INVALID, 247 GTT_TYPE_PPGTT_PTE_64K_ENTRY), 248 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY, 249 GTT_TYPE_PPGTT_PDE_ENTRY, 250 GTT_TYPE_PPGTT_PDE_PT, 251 GTT_TYPE_INVALID, 252 GTT_TYPE_PPGTT_PTE_2M_ENTRY), 253 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY, 254 GTT_TYPE_PPGTT_PDP_ENTRY, 255 GTT_TYPE_PPGTT_PDP_PT, 256 GTT_TYPE_INVALID, 257 GTT_TYPE_PPGTT_PTE_1G_ENTRY), 258 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE, 259 GTT_TYPE_GGTT_PTE, 260 GTT_TYPE_INVALID, 261 GTT_TYPE_INVALID, 262 GTT_TYPE_INVALID), 263 }; 264 265 static inline int get_next_pt_type(int type) 266 { 267 return gtt_type_table[type].next_pt_type; 268 } 269 270 static inline int get_entry_type(int type) 271 { 272 return gtt_type_table[type].entry_type; 273 } 274 275 static inline int get_pse_type(int type) 276 { 277 return gtt_type_table[type].pse_entry_type; 278 } 279 280 static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index) 281 { 282 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index; 283 284 return readq(addr); 285 } 286 287 static void ggtt_invalidate(struct intel_gt *gt) 288 { 289 mmio_hw_access_pre(gt); 290 intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); 291 mmio_hw_access_post(gt); 292 } 293 294 static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte) 295 { 296 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index; 297 298 writeq(pte, addr); 299 } 300 301 static inline int gtt_get_entry64(void *pt, 302 struct intel_gvt_gtt_entry *e, 303 unsigned long index, bool hypervisor_access, unsigned long gpa, 304 struct intel_vgpu *vgpu) 305 { 306 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 307 int ret; 308 309 if (WARN_ON(info->gtt_entry_size != 8)) 310 return -EINVAL; 311 312 if (hypervisor_access) { 313 ret = intel_gvt_read_gpa(vgpu, gpa + 314 (index << info->gtt_entry_size_shift), 315 &e->val64, 8); 316 if (WARN_ON(ret)) 317 return ret; 318 } else if (!pt) { 319 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index); 320 } else { 321 e->val64 = *((u64 *)pt + index); 322 } 323 return 0; 324 } 325 326 static inline int gtt_set_entry64(void *pt, 327 struct intel_gvt_gtt_entry *e, 328 unsigned long index, bool hypervisor_access, unsigned long gpa, 329 struct intel_vgpu *vgpu) 330 { 331 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 332 int ret; 333 334 if (WARN_ON(info->gtt_entry_size != 8)) 335 return -EINVAL; 336 337 if (hypervisor_access) { 338 ret = intel_gvt_write_gpa(vgpu, gpa + 339 (index << info->gtt_entry_size_shift), 340 &e->val64, 8); 341 if (WARN_ON(ret)) 342 return ret; 343 } else if (!pt) { 344 write_pte64(vgpu->gvt->gt->ggtt, index, e->val64); 345 } else { 346 *((u64 *)pt + index) = e->val64; 347 } 348 return 0; 349 } 350 351 #define GTT_HAW 46 352 353 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30) 354 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21) 355 #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16) 356 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12) 357 358 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52) 359 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */ 360 361 #define GTT_64K_PTE_STRIDE 16 362 363 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) 364 { 365 unsigned long pfn; 366 367 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) 368 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT; 369 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) 370 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT; 371 else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) 372 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT; 373 else 374 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT; 375 return pfn; 376 } 377 378 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn) 379 { 380 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { 381 e->val64 &= ~ADDR_1G_MASK; 382 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT); 383 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { 384 e->val64 &= ~ADDR_2M_MASK; 385 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT); 386 } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) { 387 e->val64 &= ~ADDR_64K_MASK; 388 pfn &= (ADDR_64K_MASK >> PAGE_SHIFT); 389 } else { 390 e->val64 &= ~ADDR_4K_MASK; 391 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT); 392 } 393 394 e->val64 |= (pfn << PAGE_SHIFT); 395 } 396 397 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e) 398 { 399 return !!(e->val64 & _PAGE_PSE); 400 } 401 402 static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e) 403 { 404 if (gen8_gtt_test_pse(e)) { 405 switch (e->type) { 406 case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 407 e->val64 &= ~_PAGE_PSE; 408 e->type = GTT_TYPE_PPGTT_PDE_ENTRY; 409 break; 410 case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 411 e->type = GTT_TYPE_PPGTT_PDP_ENTRY; 412 e->val64 &= ~_PAGE_PSE; 413 break; 414 default: 415 WARN_ON(1); 416 } 417 } 418 } 419 420 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e) 421 { 422 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) 423 return false; 424 425 return !!(e->val64 & GEN8_PDE_IPS_64K); 426 } 427 428 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e) 429 { 430 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) 431 return; 432 433 e->val64 &= ~GEN8_PDE_IPS_64K; 434 } 435 436 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) 437 { 438 /* 439 * i915 writes PDP root pointer registers without present bit, 440 * it also works, so we need to treat root pointer entry 441 * specifically. 442 */ 443 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY 444 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 445 return (e->val64 != 0); 446 else 447 return (e->val64 & GEN8_PAGE_PRESENT); 448 } 449 450 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) 451 { 452 e->val64 &= ~GEN8_PAGE_PRESENT; 453 } 454 455 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) 456 { 457 e->val64 |= GEN8_PAGE_PRESENT; 458 } 459 460 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e) 461 { 462 return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED); 463 } 464 465 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e) 466 { 467 e->val64 |= GTT_SPTE_FLAG_64K_SPLITED; 468 } 469 470 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e) 471 { 472 e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED; 473 } 474 475 /* 476 * Per-platform GMA routines. 477 */ 478 static unsigned long gma_to_ggtt_pte_index(unsigned long gma) 479 { 480 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT); 481 482 trace_gma_index(__func__, gma, x); 483 return x; 484 } 485 486 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \ 487 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \ 488 { \ 489 unsigned long x = (exp); \ 490 trace_gma_index(__func__, gma, x); \ 491 return x; \ 492 } 493 494 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff)); 495 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff)); 496 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3)); 497 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); 498 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); 499 500 static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { 501 .get_entry = gtt_get_entry64, 502 .set_entry = gtt_set_entry64, 503 .clear_present = gtt_entry_clear_present, 504 .set_present = gtt_entry_set_present, 505 .test_present = gen8_gtt_test_present, 506 .test_pse = gen8_gtt_test_pse, 507 .clear_pse = gen8_gtt_clear_pse, 508 .clear_ips = gen8_gtt_clear_ips, 509 .test_ips = gen8_gtt_test_ips, 510 .clear_64k_splited = gen8_gtt_clear_64k_splited, 511 .set_64k_splited = gen8_gtt_set_64k_splited, 512 .test_64k_splited = gen8_gtt_test_64k_splited, 513 .get_pfn = gen8_gtt_get_pfn, 514 .set_pfn = gen8_gtt_set_pfn, 515 }; 516 517 static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { 518 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index, 519 .gma_to_pte_index = gen8_gma_to_pte_index, 520 .gma_to_pde_index = gen8_gma_to_pde_index, 521 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index, 522 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index, 523 .gma_to_pml4_index = gen8_gma_to_pml4_index, 524 }; 525 526 /* Update entry type per pse and ips bit. */ 527 static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops, 528 struct intel_gvt_gtt_entry *entry, bool ips) 529 { 530 switch (entry->type) { 531 case GTT_TYPE_PPGTT_PDE_ENTRY: 532 case GTT_TYPE_PPGTT_PDP_ENTRY: 533 if (pte_ops->test_pse(entry)) 534 entry->type = get_pse_type(entry->type); 535 break; 536 case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 537 if (ips) 538 entry->type = get_pse_type(entry->type); 539 break; 540 default: 541 GEM_BUG_ON(!gtt_type_is_entry(entry->type)); 542 } 543 544 GEM_BUG_ON(entry->type == GTT_TYPE_INVALID); 545 } 546 547 /* 548 * MM helpers. 549 */ 550 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, 551 struct intel_gvt_gtt_entry *entry, unsigned long index, 552 bool guest) 553 { 554 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 555 556 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); 557 558 entry->type = mm->ppgtt_mm.root_entry_type; 559 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : 560 mm->ppgtt_mm.shadow_pdps, 561 entry, index, false, 0, mm->vgpu); 562 update_entry_type_for_real(pte_ops, entry, false); 563 } 564 565 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm, 566 struct intel_gvt_gtt_entry *entry, unsigned long index) 567 { 568 _ppgtt_get_root_entry(mm, entry, index, true); 569 } 570 571 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm, 572 struct intel_gvt_gtt_entry *entry, unsigned long index) 573 { 574 _ppgtt_get_root_entry(mm, entry, index, false); 575 } 576 577 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm, 578 struct intel_gvt_gtt_entry *entry, unsigned long index, 579 bool guest) 580 { 581 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 582 583 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps : 584 mm->ppgtt_mm.shadow_pdps, 585 entry, index, false, 0, mm->vgpu); 586 } 587 588 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm, 589 struct intel_gvt_gtt_entry *entry, unsigned long index) 590 { 591 _ppgtt_set_root_entry(mm, entry, index, false); 592 } 593 594 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, 595 struct intel_gvt_gtt_entry *entry, unsigned long index) 596 { 597 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 598 599 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 600 601 entry->type = GTT_TYPE_GGTT_PTE; 602 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index, 603 false, 0, mm->vgpu); 604 } 605 606 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, 607 struct intel_gvt_gtt_entry *entry, unsigned long index) 608 { 609 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 610 611 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 612 613 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index, 614 false, 0, mm->vgpu); 615 } 616 617 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, 618 struct intel_gvt_gtt_entry *entry, unsigned long index) 619 { 620 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 621 622 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 623 624 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); 625 } 626 627 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, 628 struct intel_gvt_gtt_entry *entry, unsigned long index) 629 { 630 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 631 unsigned long offset = index; 632 633 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); 634 635 if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { 636 offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT); 637 mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64; 638 } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { 639 offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT); 640 mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64; 641 } 642 643 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); 644 } 645 646 /* 647 * PPGTT shadow page table helpers. 648 */ 649 static inline int ppgtt_spt_get_entry( 650 struct intel_vgpu_ppgtt_spt *spt, 651 void *page_table, int type, 652 struct intel_gvt_gtt_entry *e, unsigned long index, 653 bool guest) 654 { 655 struct intel_gvt *gvt = spt->vgpu->gvt; 656 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 657 int ret; 658 659 e->type = get_entry_type(type); 660 661 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 662 return -EINVAL; 663 664 ret = ops->get_entry(page_table, e, index, guest, 665 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 666 spt->vgpu); 667 if (ret) 668 return ret; 669 670 update_entry_type_for_real(ops, e, guest ? 671 spt->guest_page.pde_ips : false); 672 673 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", 674 type, e->type, index, e->val64); 675 return 0; 676 } 677 678 static inline int ppgtt_spt_set_entry( 679 struct intel_vgpu_ppgtt_spt *spt, 680 void *page_table, int type, 681 struct intel_gvt_gtt_entry *e, unsigned long index, 682 bool guest) 683 { 684 struct intel_gvt *gvt = spt->vgpu->gvt; 685 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 686 687 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) 688 return -EINVAL; 689 690 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n", 691 type, e->type, index, e->val64); 692 693 return ops->set_entry(page_table, e, index, guest, 694 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 695 spt->vgpu); 696 } 697 698 #define ppgtt_get_guest_entry(spt, e, index) \ 699 ppgtt_spt_get_entry(spt, NULL, \ 700 spt->guest_page.type, e, index, true) 701 702 #define ppgtt_set_guest_entry(spt, e, index) \ 703 ppgtt_spt_set_entry(spt, NULL, \ 704 spt->guest_page.type, e, index, true) 705 706 #define ppgtt_get_shadow_entry(spt, e, index) \ 707 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ 708 spt->shadow_page.type, e, index, false) 709 710 #define ppgtt_set_shadow_entry(spt, e, index) \ 711 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ 712 spt->shadow_page.type, e, index, false) 713 714 static void *alloc_spt(gfp_t gfp_mask) 715 { 716 struct intel_vgpu_ppgtt_spt *spt; 717 718 spt = kzalloc(sizeof(*spt), gfp_mask); 719 if (!spt) 720 return NULL; 721 722 spt->shadow_page.page = alloc_page(gfp_mask); 723 if (!spt->shadow_page.page) { 724 kfree(spt); 725 return NULL; 726 } 727 return spt; 728 } 729 730 static void free_spt(struct intel_vgpu_ppgtt_spt *spt) 731 { 732 __free_page(spt->shadow_page.page); 733 kfree(spt); 734 } 735 736 static int detach_oos_page(struct intel_vgpu *vgpu, 737 struct intel_vgpu_oos_page *oos_page); 738 739 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) 740 { 741 struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev; 742 743 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); 744 745 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, 746 DMA_BIDIRECTIONAL); 747 748 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); 749 750 if (spt->guest_page.gfn) { 751 if (spt->guest_page.oos_page) 752 detach_oos_page(spt->vgpu, spt->guest_page.oos_page); 753 754 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); 755 } 756 757 list_del_init(&spt->post_shadow_list); 758 free_spt(spt); 759 } 760 761 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) 762 { 763 struct intel_vgpu_ppgtt_spt *spt, *spn; 764 struct radix_tree_iter iter; 765 LIST_HEAD(all_spt); 766 void __rcu **slot; 767 768 rcu_read_lock(); 769 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { 770 spt = radix_tree_deref_slot(slot); 771 list_move(&spt->post_shadow_list, &all_spt); 772 } 773 rcu_read_unlock(); 774 775 list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list) 776 ppgtt_free_spt(spt); 777 } 778 779 static int ppgtt_handle_guest_write_page_table_bytes( 780 struct intel_vgpu_ppgtt_spt *spt, 781 u64 pa, void *p_data, int bytes); 782 783 static int ppgtt_write_protection_handler( 784 struct intel_vgpu_page_track *page_track, 785 u64 gpa, void *data, int bytes) 786 { 787 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data; 788 789 int ret; 790 791 if (bytes != 4 && bytes != 8) 792 return -EINVAL; 793 794 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes); 795 if (ret) 796 return ret; 797 return ret; 798 } 799 800 /* Find a spt by guest gfn. */ 801 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn( 802 struct intel_vgpu *vgpu, unsigned long gfn) 803 { 804 struct intel_vgpu_page_track *track; 805 806 track = intel_vgpu_find_page_track(vgpu, gfn); 807 if (track && track->handler == ppgtt_write_protection_handler) 808 return track->priv_data; 809 810 return NULL; 811 } 812 813 /* Find the spt by shadow page mfn. */ 814 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( 815 struct intel_vgpu *vgpu, unsigned long mfn) 816 { 817 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn); 818 } 819 820 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); 821 822 /* Allocate shadow page table without guest page. */ 823 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( 824 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) 825 { 826 struct device *kdev = vgpu->gvt->gt->i915->drm.dev; 827 struct intel_vgpu_ppgtt_spt *spt = NULL; 828 dma_addr_t daddr; 829 int ret; 830 831 retry: 832 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); 833 if (!spt) { 834 if (reclaim_one_ppgtt_mm(vgpu->gvt)) 835 goto retry; 836 837 gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); 838 return ERR_PTR(-ENOMEM); 839 } 840 841 spt->vgpu = vgpu; 842 atomic_set(&spt->refcount, 1); 843 INIT_LIST_HEAD(&spt->post_shadow_list); 844 845 /* 846 * Init shadow_page. 847 */ 848 spt->shadow_page.type = type; 849 daddr = dma_map_page(kdev, spt->shadow_page.page, 850 0, 4096, DMA_BIDIRECTIONAL); 851 if (dma_mapping_error(kdev, daddr)) { 852 gvt_vgpu_err("fail to map dma addr\n"); 853 ret = -EINVAL; 854 goto err_free_spt; 855 } 856 spt->shadow_page.vaddr = page_address(spt->shadow_page.page); 857 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; 858 859 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); 860 if (ret) 861 goto err_unmap_dma; 862 863 return spt; 864 865 err_unmap_dma: 866 dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL); 867 err_free_spt: 868 free_spt(spt); 869 return ERR_PTR(ret); 870 } 871 872 /* Allocate shadow page table associated with specific gfn. */ 873 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( 874 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type, 875 unsigned long gfn, bool guest_pde_ips) 876 { 877 struct intel_vgpu_ppgtt_spt *spt; 878 int ret; 879 880 spt = ppgtt_alloc_spt(vgpu, type); 881 if (IS_ERR(spt)) 882 return spt; 883 884 /* 885 * Init guest_page. 886 */ 887 ret = intel_vgpu_register_page_track(vgpu, gfn, 888 ppgtt_write_protection_handler, spt); 889 if (ret) { 890 ppgtt_free_spt(spt); 891 return ERR_PTR(ret); 892 } 893 894 spt->guest_page.type = type; 895 spt->guest_page.gfn = gfn; 896 spt->guest_page.pde_ips = guest_pde_ips; 897 898 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); 899 900 return spt; 901 } 902 903 #define pt_entry_size_shift(spt) \ 904 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) 905 906 #define pt_entries(spt) \ 907 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) 908 909 #define for_each_present_guest_entry(spt, e, i) \ 910 for (i = 0; i < pt_entries(spt); \ 911 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ 912 if (!ppgtt_get_guest_entry(spt, e, i) && \ 913 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 914 915 #define for_each_present_shadow_entry(spt, e, i) \ 916 for (i = 0; i < pt_entries(spt); \ 917 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \ 918 if (!ppgtt_get_shadow_entry(spt, e, i) && \ 919 spt->vgpu->gvt->gtt.pte_ops->test_present(e)) 920 921 #define for_each_shadow_entry(spt, e, i) \ 922 for (i = 0; i < pt_entries(spt); \ 923 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \ 924 if (!ppgtt_get_shadow_entry(spt, e, i)) 925 926 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt) 927 { 928 int v = atomic_read(&spt->refcount); 929 930 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); 931 atomic_inc(&spt->refcount); 932 } 933 934 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt) 935 { 936 int v = atomic_read(&spt->refcount); 937 938 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); 939 return atomic_dec_return(&spt->refcount); 940 } 941 942 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt); 943 944 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, 945 struct intel_gvt_gtt_entry *e) 946 { 947 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; 948 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 949 struct intel_vgpu_ppgtt_spt *s; 950 enum intel_gvt_gtt_type cur_pt_type; 951 952 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); 953 954 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY 955 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 956 cur_pt_type = get_next_pt_type(e->type); 957 958 if (!gtt_type_is_pt(cur_pt_type) || 959 !gtt_type_is_pt(cur_pt_type + 1)) { 960 drm_WARN(&i915->drm, 1, 961 "Invalid page table type, cur_pt_type is: %d\n", 962 cur_pt_type); 963 return -EINVAL; 964 } 965 966 cur_pt_type += 1; 967 968 if (ops->get_pfn(e) == 969 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) 970 return 0; 971 } 972 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); 973 if (!s) { 974 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", 975 ops->get_pfn(e)); 976 return -ENXIO; 977 } 978 return ppgtt_invalidate_spt(s); 979 } 980 981 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, 982 struct intel_gvt_gtt_entry *entry) 983 { 984 struct intel_vgpu *vgpu = spt->vgpu; 985 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 986 unsigned long pfn; 987 int type; 988 989 pfn = ops->get_pfn(entry); 990 type = spt->shadow_page.type; 991 992 /* Uninitialized spte or unshadowed spte. */ 993 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) 994 return; 995 996 intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); 997 } 998 999 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt) 1000 { 1001 struct intel_vgpu *vgpu = spt->vgpu; 1002 struct intel_gvt_gtt_entry e; 1003 unsigned long index; 1004 int ret; 1005 1006 trace_spt_change(spt->vgpu->id, "die", spt, 1007 spt->guest_page.gfn, spt->shadow_page.type); 1008 1009 if (ppgtt_put_spt(spt) > 0) 1010 return 0; 1011 1012 for_each_present_shadow_entry(spt, &e, index) { 1013 switch (e.type) { 1014 case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 1015 gvt_vdbg_mm("invalidate 4K entry\n"); 1016 ppgtt_invalidate_pte(spt, &e); 1017 break; 1018 case GTT_TYPE_PPGTT_PTE_64K_ENTRY: 1019 /* We don't setup 64K shadow entry so far. */ 1020 WARN(1, "suspicious 64K gtt entry\n"); 1021 continue; 1022 case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 1023 gvt_vdbg_mm("invalidate 2M entry\n"); 1024 continue; 1025 case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 1026 WARN(1, "GVT doesn't support 1GB page\n"); 1027 continue; 1028 case GTT_TYPE_PPGTT_PML4_ENTRY: 1029 case GTT_TYPE_PPGTT_PDP_ENTRY: 1030 case GTT_TYPE_PPGTT_PDE_ENTRY: 1031 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n"); 1032 ret = ppgtt_invalidate_spt_by_shadow_entry( 1033 spt->vgpu, &e); 1034 if (ret) 1035 goto fail; 1036 break; 1037 default: 1038 GEM_BUG_ON(1); 1039 } 1040 } 1041 1042 trace_spt_change(spt->vgpu->id, "release", spt, 1043 spt->guest_page.gfn, spt->shadow_page.type); 1044 ppgtt_free_spt(spt); 1045 return 0; 1046 fail: 1047 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", 1048 spt, e.val64, e.type); 1049 return ret; 1050 } 1051 1052 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) 1053 { 1054 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; 1055 1056 if (GRAPHICS_VER(dev_priv) == 9) { 1057 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & 1058 GAMW_ECO_ENABLE_64K_IPS_FIELD; 1059 1060 return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD; 1061 } else if (GRAPHICS_VER(dev_priv) >= 11) { 1062 /* 64K paging only controlled by IPS bit in PTE now. */ 1063 return true; 1064 } else 1065 return false; 1066 } 1067 1068 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); 1069 1070 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( 1071 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) 1072 { 1073 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1074 struct intel_vgpu_ppgtt_spt *spt = NULL; 1075 bool ips = false; 1076 int ret; 1077 1078 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); 1079 1080 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) 1081 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); 1082 1083 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); 1084 if (spt) { 1085 ppgtt_get_spt(spt); 1086 1087 if (ips != spt->guest_page.pde_ips) { 1088 spt->guest_page.pde_ips = ips; 1089 1090 gvt_dbg_mm("reshadow PDE since ips changed\n"); 1091 clear_page(spt->shadow_page.vaddr); 1092 ret = ppgtt_populate_spt(spt); 1093 if (ret) { 1094 ppgtt_put_spt(spt); 1095 goto err; 1096 } 1097 } 1098 } else { 1099 int type = get_next_pt_type(we->type); 1100 1101 if (!gtt_type_is_pt(type)) { 1102 ret = -EINVAL; 1103 goto err; 1104 } 1105 1106 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); 1107 if (IS_ERR(spt)) { 1108 ret = PTR_ERR(spt); 1109 goto err; 1110 } 1111 1112 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); 1113 if (ret) 1114 goto err_free_spt; 1115 1116 ret = ppgtt_populate_spt(spt); 1117 if (ret) 1118 goto err_free_spt; 1119 1120 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, 1121 spt->shadow_page.type); 1122 } 1123 return spt; 1124 1125 err_free_spt: 1126 ppgtt_free_spt(spt); 1127 spt = NULL; 1128 err: 1129 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1130 spt, we->val64, we->type); 1131 return ERR_PTR(ret); 1132 } 1133 1134 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, 1135 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge) 1136 { 1137 const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; 1138 1139 se->type = ge->type; 1140 se->val64 = ge->val64; 1141 1142 /* Because we always split 64KB pages, so clear IPS in shadow PDE. */ 1143 if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY) 1144 ops->clear_ips(se); 1145 1146 ops->set_pfn(se, s->shadow_page.mfn); 1147 } 1148 1149 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, 1150 struct intel_vgpu_ppgtt_spt *spt, unsigned long index, 1151 struct intel_gvt_gtt_entry *se) 1152 { 1153 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1154 struct intel_vgpu_ppgtt_spt *sub_spt; 1155 struct intel_gvt_gtt_entry sub_se; 1156 unsigned long start_gfn; 1157 dma_addr_t dma_addr; 1158 unsigned long sub_index; 1159 int ret; 1160 1161 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index); 1162 1163 start_gfn = ops->get_pfn(se); 1164 1165 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT); 1166 if (IS_ERR(sub_spt)) 1167 return PTR_ERR(sub_spt); 1168 1169 for_each_shadow_entry(sub_spt, &sub_se, sub_index) { 1170 ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index, 1171 PAGE_SIZE, &dma_addr); 1172 if (ret) 1173 goto err; 1174 sub_se.val64 = se->val64; 1175 1176 /* Copy the PAT field from PDE. */ 1177 sub_se.val64 &= ~_PAGE_PAT; 1178 sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5; 1179 1180 ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT); 1181 ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index); 1182 } 1183 1184 /* Clear dirty field. */ 1185 se->val64 &= ~_PAGE_DIRTY; 1186 1187 ops->clear_pse(se); 1188 ops->clear_ips(se); 1189 ops->set_pfn(se, sub_spt->shadow_page.mfn); 1190 ppgtt_set_shadow_entry(spt, se, index); 1191 return 0; 1192 err: 1193 /* Cancel the existing address mappings of DMA addr. */ 1194 for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { 1195 gvt_vdbg_mm("invalidate 4K entry\n"); 1196 ppgtt_invalidate_pte(sub_spt, &sub_se); 1197 } 1198 /* Release the new allocated spt. */ 1199 trace_spt_change(sub_spt->vgpu->id, "release", sub_spt, 1200 sub_spt->guest_page.gfn, sub_spt->shadow_page.type); 1201 ppgtt_free_spt(sub_spt); 1202 return ret; 1203 } 1204 1205 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, 1206 struct intel_vgpu_ppgtt_spt *spt, unsigned long index, 1207 struct intel_gvt_gtt_entry *se) 1208 { 1209 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1210 struct intel_gvt_gtt_entry entry = *se; 1211 unsigned long start_gfn; 1212 dma_addr_t dma_addr; 1213 int i, ret; 1214 1215 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index); 1216 1217 GEM_BUG_ON(index % GTT_64K_PTE_STRIDE); 1218 1219 start_gfn = ops->get_pfn(se); 1220 1221 entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY; 1222 ops->set_64k_splited(&entry); 1223 1224 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { 1225 ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i, 1226 PAGE_SIZE, &dma_addr); 1227 if (ret) 1228 return ret; 1229 1230 ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT); 1231 ppgtt_set_shadow_entry(spt, &entry, index + i); 1232 } 1233 return 0; 1234 } 1235 1236 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, 1237 struct intel_vgpu_ppgtt_spt *spt, unsigned long index, 1238 struct intel_gvt_gtt_entry *ge) 1239 { 1240 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 1241 struct intel_gvt_gtt_entry se = *ge; 1242 unsigned long gfn; 1243 dma_addr_t dma_addr; 1244 int ret; 1245 1246 if (!pte_ops->test_present(ge)) 1247 return 0; 1248 1249 gfn = pte_ops->get_pfn(ge); 1250 1251 switch (ge->type) { 1252 case GTT_TYPE_PPGTT_PTE_4K_ENTRY: 1253 gvt_vdbg_mm("shadow 4K gtt entry\n"); 1254 ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr); 1255 if (ret) 1256 return -ENXIO; 1257 break; 1258 case GTT_TYPE_PPGTT_PTE_64K_ENTRY: 1259 gvt_vdbg_mm("shadow 64K gtt entry\n"); 1260 /* 1261 * The layout of 64K page is special, the page size is 1262 * controlled by uper PDE. To be simple, we always split 1263 * 64K page to smaller 4K pages in shadow PT. 1264 */ 1265 return split_64KB_gtt_entry(vgpu, spt, index, &se); 1266 case GTT_TYPE_PPGTT_PTE_2M_ENTRY: 1267 gvt_vdbg_mm("shadow 2M gtt entry\n"); 1268 if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) || 1269 intel_gvt_dma_map_guest_page(vgpu, gfn, 1270 I915_GTT_PAGE_SIZE_2M, &dma_addr)) 1271 return split_2MB_gtt_entry(vgpu, spt, index, &se); 1272 break; 1273 case GTT_TYPE_PPGTT_PTE_1G_ENTRY: 1274 gvt_vgpu_err("GVT doesn't support 1GB entry\n"); 1275 return -EINVAL; 1276 default: 1277 GEM_BUG_ON(1); 1278 return -EINVAL; 1279 } 1280 1281 /* Successfully shadowed a 4K or 2M page (without splitting). */ 1282 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT); 1283 ppgtt_set_shadow_entry(spt, &se, index); 1284 return 0; 1285 } 1286 1287 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) 1288 { 1289 struct intel_vgpu *vgpu = spt->vgpu; 1290 struct intel_vgpu_ppgtt_spt *s; 1291 struct intel_gvt_gtt_entry se, ge; 1292 unsigned long i; 1293 int ret; 1294 1295 trace_spt_change(spt->vgpu->id, "born", spt, 1296 spt->guest_page.gfn, spt->shadow_page.type); 1297 1298 for_each_present_guest_entry(spt, &ge, i) { 1299 if (gtt_type_is_pt(get_next_pt_type(ge.type))) { 1300 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); 1301 if (IS_ERR(s)) { 1302 ret = PTR_ERR(s); 1303 goto fail; 1304 } 1305 ppgtt_get_shadow_entry(spt, &se, i); 1306 ppgtt_generate_shadow_entry(&se, s, &ge); 1307 ppgtt_set_shadow_entry(spt, &se, i); 1308 } else { 1309 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge); 1310 if (ret) 1311 goto fail; 1312 } 1313 } 1314 return 0; 1315 fail: 1316 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1317 spt, ge.val64, ge.type); 1318 return ret; 1319 } 1320 1321 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, 1322 struct intel_gvt_gtt_entry *se, unsigned long index) 1323 { 1324 struct intel_vgpu *vgpu = spt->vgpu; 1325 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1326 int ret; 1327 1328 trace_spt_guest_change(spt->vgpu->id, "remove", spt, 1329 spt->shadow_page.type, se->val64, index); 1330 1331 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n", 1332 se->type, index, se->val64); 1333 1334 if (!ops->test_present(se)) 1335 return 0; 1336 1337 if (ops->get_pfn(se) == 1338 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn) 1339 return 0; 1340 1341 if (gtt_type_is_pt(get_next_pt_type(se->type))) { 1342 struct intel_vgpu_ppgtt_spt *s = 1343 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se)); 1344 if (!s) { 1345 gvt_vgpu_err("fail to find guest page\n"); 1346 ret = -ENXIO; 1347 goto fail; 1348 } 1349 ret = ppgtt_invalidate_spt(s); 1350 if (ret) 1351 goto fail; 1352 } else { 1353 /* We don't setup 64K shadow entry so far. */ 1354 WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY, 1355 "suspicious 64K entry\n"); 1356 ppgtt_invalidate_pte(spt, se); 1357 } 1358 1359 return 0; 1360 fail: 1361 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1362 spt, se->val64, se->type); 1363 return ret; 1364 } 1365 1366 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt, 1367 struct intel_gvt_gtt_entry *we, unsigned long index) 1368 { 1369 struct intel_vgpu *vgpu = spt->vgpu; 1370 struct intel_gvt_gtt_entry m; 1371 struct intel_vgpu_ppgtt_spt *s; 1372 int ret; 1373 1374 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type, 1375 we->val64, index); 1376 1377 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n", 1378 we->type, index, we->val64); 1379 1380 if (gtt_type_is_pt(get_next_pt_type(we->type))) { 1381 s = ppgtt_populate_spt_by_guest_entry(vgpu, we); 1382 if (IS_ERR(s)) { 1383 ret = PTR_ERR(s); 1384 goto fail; 1385 } 1386 ppgtt_get_shadow_entry(spt, &m, index); 1387 ppgtt_generate_shadow_entry(&m, s, we); 1388 ppgtt_set_shadow_entry(spt, &m, index); 1389 } else { 1390 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we); 1391 if (ret) 1392 goto fail; 1393 } 1394 return 0; 1395 fail: 1396 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", 1397 spt, we->val64, we->type); 1398 return ret; 1399 } 1400 1401 static int sync_oos_page(struct intel_vgpu *vgpu, 1402 struct intel_vgpu_oos_page *oos_page) 1403 { 1404 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1405 struct intel_gvt *gvt = vgpu->gvt; 1406 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 1407 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; 1408 struct intel_gvt_gtt_entry old, new; 1409 int index; 1410 int ret; 1411 1412 trace_oos_change(vgpu->id, "sync", oos_page->id, 1413 spt, spt->guest_page.type); 1414 1415 old.type = new.type = get_entry_type(spt->guest_page.type); 1416 old.val64 = new.val64 = 0; 1417 1418 for (index = 0; index < (I915_GTT_PAGE_SIZE >> 1419 info->gtt_entry_size_shift); index++) { 1420 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); 1421 ops->get_entry(NULL, &new, index, true, 1422 spt->guest_page.gfn << PAGE_SHIFT, vgpu); 1423 1424 if (old.val64 == new.val64 1425 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) 1426 continue; 1427 1428 trace_oos_sync(vgpu->id, oos_page->id, 1429 spt, spt->guest_page.type, 1430 new.val64, index); 1431 1432 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new); 1433 if (ret) 1434 return ret; 1435 1436 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); 1437 } 1438 1439 spt->guest_page.write_cnt = 0; 1440 list_del_init(&spt->post_shadow_list); 1441 return 0; 1442 } 1443 1444 static int detach_oos_page(struct intel_vgpu *vgpu, 1445 struct intel_vgpu_oos_page *oos_page) 1446 { 1447 struct intel_gvt *gvt = vgpu->gvt; 1448 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; 1449 1450 trace_oos_change(vgpu->id, "detach", oos_page->id, 1451 spt, spt->guest_page.type); 1452 1453 spt->guest_page.write_cnt = 0; 1454 spt->guest_page.oos_page = NULL; 1455 oos_page->spt = NULL; 1456 1457 list_del_init(&oos_page->vm_list); 1458 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); 1459 1460 return 0; 1461 } 1462 1463 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, 1464 struct intel_vgpu_ppgtt_spt *spt) 1465 { 1466 struct intel_gvt *gvt = spt->vgpu->gvt; 1467 int ret; 1468 1469 ret = intel_gvt_read_gpa(spt->vgpu, 1470 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, 1471 oos_page->mem, I915_GTT_PAGE_SIZE); 1472 if (ret) 1473 return ret; 1474 1475 oos_page->spt = spt; 1476 spt->guest_page.oos_page = oos_page; 1477 1478 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); 1479 1480 trace_oos_change(spt->vgpu->id, "attach", oos_page->id, 1481 spt, spt->guest_page.type); 1482 return 0; 1483 } 1484 1485 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt) 1486 { 1487 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1488 int ret; 1489 1490 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn); 1491 if (ret) 1492 return ret; 1493 1494 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id, 1495 spt, spt->guest_page.type); 1496 1497 list_del_init(&oos_page->vm_list); 1498 return sync_oos_page(spt->vgpu, oos_page); 1499 } 1500 1501 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt) 1502 { 1503 struct intel_gvt *gvt = spt->vgpu->gvt; 1504 struct intel_gvt_gtt *gtt = &gvt->gtt; 1505 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1506 int ret; 1507 1508 WARN(oos_page, "shadow PPGTT page has already has a oos page\n"); 1509 1510 if (list_empty(>t->oos_page_free_list_head)) { 1511 oos_page = container_of(gtt->oos_page_use_list_head.next, 1512 struct intel_vgpu_oos_page, list); 1513 ret = ppgtt_set_guest_page_sync(oos_page->spt); 1514 if (ret) 1515 return ret; 1516 ret = detach_oos_page(spt->vgpu, oos_page); 1517 if (ret) 1518 return ret; 1519 } else 1520 oos_page = container_of(gtt->oos_page_free_list_head.next, 1521 struct intel_vgpu_oos_page, list); 1522 return attach_oos_page(oos_page, spt); 1523 } 1524 1525 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt) 1526 { 1527 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; 1528 1529 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) 1530 return -EINVAL; 1531 1532 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id, 1533 spt, spt->guest_page.type); 1534 1535 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head); 1536 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn); 1537 } 1538 1539 /** 1540 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU 1541 * @vgpu: a vGPU 1542 * 1543 * This function is called before submitting a guest workload to host, 1544 * to sync all the out-of-synced shadow for vGPU 1545 * 1546 * Returns: 1547 * Zero on success, negative error code if failed. 1548 */ 1549 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) 1550 { 1551 struct list_head *pos, *n; 1552 struct intel_vgpu_oos_page *oos_page; 1553 int ret; 1554 1555 if (!enable_out_of_sync) 1556 return 0; 1557 1558 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { 1559 oos_page = container_of(pos, 1560 struct intel_vgpu_oos_page, vm_list); 1561 ret = ppgtt_set_guest_page_sync(oos_page->spt); 1562 if (ret) 1563 return ret; 1564 } 1565 return 0; 1566 } 1567 1568 /* 1569 * The heart of PPGTT shadow page table. 1570 */ 1571 static int ppgtt_handle_guest_write_page_table( 1572 struct intel_vgpu_ppgtt_spt *spt, 1573 struct intel_gvt_gtt_entry *we, unsigned long index) 1574 { 1575 struct intel_vgpu *vgpu = spt->vgpu; 1576 int type = spt->shadow_page.type; 1577 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1578 struct intel_gvt_gtt_entry old_se; 1579 int new_present; 1580 int i, ret; 1581 1582 new_present = ops->test_present(we); 1583 1584 /* 1585 * Adding the new entry first and then removing the old one, that can 1586 * guarantee the ppgtt table is validated during the window between 1587 * adding and removal. 1588 */ 1589 ppgtt_get_shadow_entry(spt, &old_se, index); 1590 1591 if (new_present) { 1592 ret = ppgtt_handle_guest_entry_add(spt, we, index); 1593 if (ret) 1594 goto fail; 1595 } 1596 1597 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index); 1598 if (ret) 1599 goto fail; 1600 1601 if (!new_present) { 1602 /* For 64KB splited entries, we need clear them all. */ 1603 if (ops->test_64k_splited(&old_se) && 1604 !(index % GTT_64K_PTE_STRIDE)) { 1605 gvt_vdbg_mm("remove splited 64K shadow entries\n"); 1606 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) { 1607 ops->clear_64k_splited(&old_se); 1608 ops->set_pfn(&old_se, 1609 vgpu->gtt.scratch_pt[type].page_mfn); 1610 ppgtt_set_shadow_entry(spt, &old_se, index + i); 1611 } 1612 } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY || 1613 old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { 1614 ops->clear_pse(&old_se); 1615 ops->set_pfn(&old_se, 1616 vgpu->gtt.scratch_pt[type].page_mfn); 1617 ppgtt_set_shadow_entry(spt, &old_se, index); 1618 } else { 1619 ops->set_pfn(&old_se, 1620 vgpu->gtt.scratch_pt[type].page_mfn); 1621 ppgtt_set_shadow_entry(spt, &old_se, index); 1622 } 1623 } 1624 1625 return 0; 1626 fail: 1627 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", 1628 spt, we->val64, we->type); 1629 return ret; 1630 } 1631 1632 1633 1634 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt) 1635 { 1636 return enable_out_of_sync 1637 && gtt_type_is_pte_pt(spt->guest_page.type) 1638 && spt->guest_page.write_cnt >= 2; 1639 } 1640 1641 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt, 1642 unsigned long index) 1643 { 1644 set_bit(index, spt->post_shadow_bitmap); 1645 if (!list_empty(&spt->post_shadow_list)) 1646 return; 1647 1648 list_add_tail(&spt->post_shadow_list, 1649 &spt->vgpu->gtt.post_shadow_list_head); 1650 } 1651 1652 /** 1653 * intel_vgpu_flush_post_shadow - flush the post shadow transactions 1654 * @vgpu: a vGPU 1655 * 1656 * This function is called before submitting a guest workload to host, 1657 * to flush all the post shadows for a vGPU. 1658 * 1659 * Returns: 1660 * Zero on success, negative error code if failed. 1661 */ 1662 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) 1663 { 1664 struct list_head *pos, *n; 1665 struct intel_vgpu_ppgtt_spt *spt; 1666 struct intel_gvt_gtt_entry ge; 1667 unsigned long index; 1668 int ret; 1669 1670 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) { 1671 spt = container_of(pos, struct intel_vgpu_ppgtt_spt, 1672 post_shadow_list); 1673 1674 for_each_set_bit(index, spt->post_shadow_bitmap, 1675 GTT_ENTRY_NUM_IN_ONE_PAGE) { 1676 ppgtt_get_guest_entry(spt, &ge, index); 1677 1678 ret = ppgtt_handle_guest_write_page_table(spt, 1679 &ge, index); 1680 if (ret) 1681 return ret; 1682 clear_bit(index, spt->post_shadow_bitmap); 1683 } 1684 list_del_init(&spt->post_shadow_list); 1685 } 1686 return 0; 1687 } 1688 1689 static int ppgtt_handle_guest_write_page_table_bytes( 1690 struct intel_vgpu_ppgtt_spt *spt, 1691 u64 pa, void *p_data, int bytes) 1692 { 1693 struct intel_vgpu *vgpu = spt->vgpu; 1694 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 1695 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 1696 struct intel_gvt_gtt_entry we, se; 1697 unsigned long index; 1698 int ret; 1699 1700 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; 1701 1702 ppgtt_get_guest_entry(spt, &we, index); 1703 1704 /* 1705 * For page table which has 64K gtt entry, only PTE#0, PTE#16, 1706 * PTE#32, ... PTE#496 are used. Unused PTEs update should be 1707 * ignored. 1708 */ 1709 if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY && 1710 (index % GTT_64K_PTE_STRIDE)) { 1711 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n", 1712 index); 1713 return 0; 1714 } 1715 1716 if (bytes == info->gtt_entry_size) { 1717 ret = ppgtt_handle_guest_write_page_table(spt, &we, index); 1718 if (ret) 1719 return ret; 1720 } else { 1721 if (!test_bit(index, spt->post_shadow_bitmap)) { 1722 int type = spt->shadow_page.type; 1723 1724 ppgtt_get_shadow_entry(spt, &se, index); 1725 ret = ppgtt_handle_guest_entry_removal(spt, &se, index); 1726 if (ret) 1727 return ret; 1728 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); 1729 ppgtt_set_shadow_entry(spt, &se, index); 1730 } 1731 ppgtt_set_post_shadow(spt, index); 1732 } 1733 1734 if (!enable_out_of_sync) 1735 return 0; 1736 1737 spt->guest_page.write_cnt++; 1738 1739 if (spt->guest_page.oos_page) 1740 ops->set_entry(spt->guest_page.oos_page->mem, &we, index, 1741 false, 0, vgpu); 1742 1743 if (can_do_out_of_sync(spt)) { 1744 if (!spt->guest_page.oos_page) 1745 ppgtt_allocate_oos_page(spt); 1746 1747 ret = ppgtt_set_guest_page_oos(spt); 1748 if (ret < 0) 1749 return ret; 1750 } 1751 return 0; 1752 } 1753 1754 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) 1755 { 1756 struct intel_vgpu *vgpu = mm->vgpu; 1757 struct intel_gvt *gvt = vgpu->gvt; 1758 struct intel_gvt_gtt *gtt = &gvt->gtt; 1759 const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1760 struct intel_gvt_gtt_entry se; 1761 int index; 1762 1763 if (!mm->ppgtt_mm.shadowed) 1764 return; 1765 1766 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) { 1767 ppgtt_get_shadow_root_entry(mm, &se, index); 1768 1769 if (!ops->test_present(&se)) 1770 continue; 1771 1772 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se); 1773 se.val64 = 0; 1774 ppgtt_set_shadow_root_entry(mm, &se, index); 1775 1776 trace_spt_guest_change(vgpu->id, "destroy root pointer", 1777 NULL, se.type, se.val64, index); 1778 } 1779 1780 mm->ppgtt_mm.shadowed = false; 1781 } 1782 1783 1784 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) 1785 { 1786 struct intel_vgpu *vgpu = mm->vgpu; 1787 struct intel_gvt *gvt = vgpu->gvt; 1788 struct intel_gvt_gtt *gtt = &gvt->gtt; 1789 const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; 1790 struct intel_vgpu_ppgtt_spt *spt; 1791 struct intel_gvt_gtt_entry ge, se; 1792 int index, ret; 1793 1794 if (mm->ppgtt_mm.shadowed) 1795 return 0; 1796 1797 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) 1798 return -EINVAL; 1799 1800 mm->ppgtt_mm.shadowed = true; 1801 1802 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { 1803 ppgtt_get_guest_root_entry(mm, &ge, index); 1804 1805 if (!ops->test_present(&ge)) 1806 continue; 1807 1808 trace_spt_guest_change(vgpu->id, __func__, NULL, 1809 ge.type, ge.val64, index); 1810 1811 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); 1812 if (IS_ERR(spt)) { 1813 gvt_vgpu_err("fail to populate guest root pointer\n"); 1814 ret = PTR_ERR(spt); 1815 goto fail; 1816 } 1817 ppgtt_generate_shadow_entry(&se, spt, &ge); 1818 ppgtt_set_shadow_root_entry(mm, &se, index); 1819 1820 trace_spt_guest_change(vgpu->id, "populate root pointer", 1821 NULL, se.type, se.val64, index); 1822 } 1823 1824 return 0; 1825 fail: 1826 invalidate_ppgtt_mm(mm); 1827 return ret; 1828 } 1829 1830 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu) 1831 { 1832 struct intel_vgpu_mm *mm; 1833 1834 mm = kzalloc(sizeof(*mm), GFP_KERNEL); 1835 if (!mm) 1836 return NULL; 1837 1838 mm->vgpu = vgpu; 1839 kref_init(&mm->ref); 1840 atomic_set(&mm->pincount, 0); 1841 1842 return mm; 1843 } 1844 1845 static void vgpu_free_mm(struct intel_vgpu_mm *mm) 1846 { 1847 kfree(mm); 1848 } 1849 1850 /** 1851 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU 1852 * @vgpu: a vGPU 1853 * @root_entry_type: ppgtt root entry type 1854 * @pdps: guest pdps. 1855 * 1856 * This function is used to create a ppgtt mm object for a vGPU. 1857 * 1858 * Returns: 1859 * Zero on success, negative error code in pointer if failed. 1860 */ 1861 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 1862 enum intel_gvt_gtt_type root_entry_type, u64 pdps[]) 1863 { 1864 struct intel_gvt *gvt = vgpu->gvt; 1865 struct intel_vgpu_mm *mm; 1866 int ret; 1867 1868 mm = vgpu_alloc_mm(vgpu); 1869 if (!mm) 1870 return ERR_PTR(-ENOMEM); 1871 1872 mm->type = INTEL_GVT_MM_PPGTT; 1873 1874 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY && 1875 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY); 1876 mm->ppgtt_mm.root_entry_type = root_entry_type; 1877 1878 INIT_LIST_HEAD(&mm->ppgtt_mm.list); 1879 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list); 1880 INIT_LIST_HEAD(&mm->ppgtt_mm.link); 1881 1882 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) 1883 mm->ppgtt_mm.guest_pdps[0] = pdps[0]; 1884 else 1885 memcpy(mm->ppgtt_mm.guest_pdps, pdps, 1886 sizeof(mm->ppgtt_mm.guest_pdps)); 1887 1888 ret = shadow_ppgtt_mm(mm); 1889 if (ret) { 1890 gvt_vgpu_err("failed to shadow ppgtt mm\n"); 1891 vgpu_free_mm(mm); 1892 return ERR_PTR(ret); 1893 } 1894 1895 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); 1896 1897 mutex_lock(&gvt->gtt.ppgtt_mm_lock); 1898 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); 1899 mutex_unlock(&gvt->gtt.ppgtt_mm_lock); 1900 1901 return mm; 1902 } 1903 1904 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) 1905 { 1906 struct intel_vgpu_mm *mm; 1907 unsigned long nr_entries; 1908 1909 mm = vgpu_alloc_mm(vgpu); 1910 if (!mm) 1911 return ERR_PTR(-ENOMEM); 1912 1913 mm->type = INTEL_GVT_MM_GGTT; 1914 1915 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; 1916 mm->ggtt_mm.virtual_ggtt = 1917 vzalloc(array_size(nr_entries, 1918 vgpu->gvt->device_info.gtt_entry_size)); 1919 if (!mm->ggtt_mm.virtual_ggtt) { 1920 vgpu_free_mm(mm); 1921 return ERR_PTR(-ENOMEM); 1922 } 1923 1924 mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); 1925 if (!mm->ggtt_mm.host_ggtt_aperture) { 1926 vfree(mm->ggtt_mm.virtual_ggtt); 1927 vgpu_free_mm(mm); 1928 return ERR_PTR(-ENOMEM); 1929 } 1930 1931 mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); 1932 if (!mm->ggtt_mm.host_ggtt_hidden) { 1933 vfree(mm->ggtt_mm.host_ggtt_aperture); 1934 vfree(mm->ggtt_mm.virtual_ggtt); 1935 vgpu_free_mm(mm); 1936 return ERR_PTR(-ENOMEM); 1937 } 1938 1939 return mm; 1940 } 1941 1942 /** 1943 * _intel_vgpu_mm_release - destroy a mm object 1944 * @mm_ref: a kref object 1945 * 1946 * This function is used to destroy a mm object for vGPU 1947 * 1948 */ 1949 void _intel_vgpu_mm_release(struct kref *mm_ref) 1950 { 1951 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref); 1952 1953 if (GEM_WARN_ON(atomic_read(&mm->pincount))) 1954 gvt_err("vgpu mm pin count bug detected\n"); 1955 1956 if (mm->type == INTEL_GVT_MM_PPGTT) { 1957 list_del(&mm->ppgtt_mm.list); 1958 1959 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); 1960 list_del(&mm->ppgtt_mm.lru_list); 1961 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); 1962 1963 invalidate_ppgtt_mm(mm); 1964 } else { 1965 vfree(mm->ggtt_mm.virtual_ggtt); 1966 vfree(mm->ggtt_mm.host_ggtt_aperture); 1967 vfree(mm->ggtt_mm.host_ggtt_hidden); 1968 } 1969 1970 vgpu_free_mm(mm); 1971 } 1972 1973 /** 1974 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object 1975 * @mm: a vGPU mm object 1976 * 1977 * This function is called when user doesn't want to use a vGPU mm object 1978 */ 1979 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1980 { 1981 atomic_dec_if_positive(&mm->pincount); 1982 } 1983 1984 /** 1985 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object 1986 * @mm: target vgpu mm 1987 * 1988 * This function is called when user wants to use a vGPU mm object. If this 1989 * mm object hasn't been shadowed yet, the shadow will be populated at this 1990 * time. 1991 * 1992 * Returns: 1993 * Zero on success, negative error code if failed. 1994 */ 1995 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) 1996 { 1997 int ret; 1998 1999 atomic_inc(&mm->pincount); 2000 2001 if (mm->type == INTEL_GVT_MM_PPGTT) { 2002 ret = shadow_ppgtt_mm(mm); 2003 if (ret) 2004 return ret; 2005 2006 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); 2007 list_move_tail(&mm->ppgtt_mm.lru_list, 2008 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); 2009 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); 2010 } 2011 2012 return 0; 2013 } 2014 2015 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt) 2016 { 2017 struct intel_vgpu_mm *mm; 2018 struct list_head *pos, *n; 2019 2020 mutex_lock(&gvt->gtt.ppgtt_mm_lock); 2021 2022 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { 2023 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); 2024 2025 if (atomic_read(&mm->pincount)) 2026 continue; 2027 2028 list_del_init(&mm->ppgtt_mm.lru_list); 2029 mutex_unlock(&gvt->gtt.ppgtt_mm_lock); 2030 invalidate_ppgtt_mm(mm); 2031 return 1; 2032 } 2033 mutex_unlock(&gvt->gtt.ppgtt_mm_lock); 2034 return 0; 2035 } 2036 2037 /* 2038 * GMA translation APIs. 2039 */ 2040 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, 2041 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) 2042 { 2043 struct intel_vgpu *vgpu = mm->vgpu; 2044 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 2045 struct intel_vgpu_ppgtt_spt *s; 2046 2047 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); 2048 if (!s) 2049 return -ENXIO; 2050 2051 if (!guest) 2052 ppgtt_get_shadow_entry(s, e, index); 2053 else 2054 ppgtt_get_guest_entry(s, e, index); 2055 return 0; 2056 } 2057 2058 /** 2059 * intel_vgpu_gma_to_gpa - translate a gma to GPA 2060 * @mm: mm object. could be a PPGTT or GGTT mm object 2061 * @gma: graphics memory address in this mm object 2062 * 2063 * This function is used to translate a graphics memory address in specific 2064 * graphics memory space to guest physical address. 2065 * 2066 * Returns: 2067 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed. 2068 */ 2069 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) 2070 { 2071 struct intel_vgpu *vgpu = mm->vgpu; 2072 struct intel_gvt *gvt = vgpu->gvt; 2073 const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; 2074 const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; 2075 unsigned long gpa = INTEL_GVT_INVALID_ADDR; 2076 unsigned long gma_index[4]; 2077 struct intel_gvt_gtt_entry e; 2078 int i, levels = 0; 2079 int ret; 2080 2081 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT && 2082 mm->type != INTEL_GVT_MM_PPGTT); 2083 2084 if (mm->type == INTEL_GVT_MM_GGTT) { 2085 if (!vgpu_gmadr_is_valid(vgpu, gma)) 2086 goto err; 2087 2088 ggtt_get_guest_entry(mm, &e, 2089 gma_ops->gma_to_ggtt_pte_index(gma)); 2090 2091 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) 2092 + (gma & ~I915_GTT_PAGE_MASK); 2093 2094 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); 2095 } else { 2096 switch (mm->ppgtt_mm.root_entry_type) { 2097 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: 2098 ppgtt_get_shadow_root_entry(mm, &e, 0); 2099 2100 gma_index[0] = gma_ops->gma_to_pml4_index(gma); 2101 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); 2102 gma_index[2] = gma_ops->gma_to_pde_index(gma); 2103 gma_index[3] = gma_ops->gma_to_pte_index(gma); 2104 levels = 4; 2105 break; 2106 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: 2107 ppgtt_get_shadow_root_entry(mm, &e, 2108 gma_ops->gma_to_l3_pdp_index(gma)); 2109 2110 gma_index[0] = gma_ops->gma_to_pde_index(gma); 2111 gma_index[1] = gma_ops->gma_to_pte_index(gma); 2112 levels = 2; 2113 break; 2114 default: 2115 GEM_BUG_ON(1); 2116 } 2117 2118 /* walk the shadow page table and get gpa from guest entry */ 2119 for (i = 0; i < levels; i++) { 2120 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i], 2121 (i == levels - 1)); 2122 if (ret) 2123 goto err; 2124 2125 if (!pte_ops->test_present(&e)) { 2126 gvt_dbg_core("GMA 0x%lx is not present\n", gma); 2127 goto err; 2128 } 2129 } 2130 2131 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + 2132 (gma & ~I915_GTT_PAGE_MASK); 2133 trace_gma_translate(vgpu->id, "ppgtt", 0, 2134 mm->ppgtt_mm.root_entry_type, gma, gpa); 2135 } 2136 2137 return gpa; 2138 err: 2139 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); 2140 return INTEL_GVT_INVALID_ADDR; 2141 } 2142 2143 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, 2144 unsigned int off, void *p_data, unsigned int bytes) 2145 { 2146 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 2147 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 2148 unsigned long index = off >> info->gtt_entry_size_shift; 2149 unsigned long gma; 2150 struct intel_gvt_gtt_entry e; 2151 2152 if (bytes != 4 && bytes != 8) 2153 return -EINVAL; 2154 2155 gma = index << I915_GTT_PAGE_SHIFT; 2156 if (!intel_gvt_ggtt_validate_range(vgpu, 2157 gma, 1 << I915_GTT_PAGE_SHIFT)) { 2158 gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma); 2159 memset(p_data, 0, bytes); 2160 return 0; 2161 } 2162 2163 ggtt_get_guest_entry(ggtt_mm, &e, index); 2164 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), 2165 bytes); 2166 return 0; 2167 } 2168 2169 /** 2170 * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read 2171 * @vgpu: a vGPU 2172 * @off: register offset 2173 * @p_data: data will be returned to guest 2174 * @bytes: data length 2175 * 2176 * This function is used to emulate the GTT MMIO register read 2177 * 2178 * Returns: 2179 * Zero on success, error code if failed. 2180 */ 2181 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, 2182 void *p_data, unsigned int bytes) 2183 { 2184 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 2185 int ret; 2186 2187 if (bytes != 4 && bytes != 8) 2188 return -EINVAL; 2189 2190 off -= info->gtt_start_offset; 2191 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes); 2192 return ret; 2193 } 2194 2195 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, 2196 struct intel_gvt_gtt_entry *entry) 2197 { 2198 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2199 unsigned long pfn; 2200 2201 pfn = pte_ops->get_pfn(entry); 2202 if (pfn != vgpu->gvt->gtt.scratch_mfn) 2203 intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT); 2204 } 2205 2206 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, 2207 void *p_data, unsigned int bytes) 2208 { 2209 struct intel_gvt *gvt = vgpu->gvt; 2210 const struct intel_gvt_device_info *info = &gvt->device_info; 2211 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; 2212 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; 2213 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; 2214 unsigned long gma, gfn; 2215 struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; 2216 struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; 2217 dma_addr_t dma_addr; 2218 int ret; 2219 struct intel_gvt_partial_pte *partial_pte, *pos, *n; 2220 bool partial_update = false; 2221 2222 if (bytes != 4 && bytes != 8) 2223 return -EINVAL; 2224 2225 gma = g_gtt_index << I915_GTT_PAGE_SHIFT; 2226 2227 /* the VM may configure the whole GM space when ballooning is used */ 2228 if (!vgpu_gmadr_is_valid(vgpu, gma)) 2229 return 0; 2230 2231 e.type = GTT_TYPE_GGTT_PTE; 2232 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 2233 bytes); 2234 2235 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes 2236 * write, save the first 4 bytes in a list and update virtual 2237 * PTE. Only update shadow PTE when the second 4 bytes comes. 2238 */ 2239 if (bytes < info->gtt_entry_size) { 2240 bool found = false; 2241 2242 list_for_each_entry_safe(pos, n, 2243 &ggtt_mm->ggtt_mm.partial_pte_list, list) { 2244 if (g_gtt_index == pos->offset >> 2245 info->gtt_entry_size_shift) { 2246 if (off != pos->offset) { 2247 /* the second partial part*/ 2248 int last_off = pos->offset & 2249 (info->gtt_entry_size - 1); 2250 2251 memcpy((void *)&e.val64 + last_off, 2252 (void *)&pos->data + last_off, 2253 bytes); 2254 2255 list_del(&pos->list); 2256 kfree(pos); 2257 found = true; 2258 break; 2259 } 2260 2261 /* update of the first partial part */ 2262 pos->data = e.val64; 2263 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); 2264 return 0; 2265 } 2266 } 2267 2268 if (!found) { 2269 /* the first partial part */ 2270 partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL); 2271 if (!partial_pte) 2272 return -ENOMEM; 2273 partial_pte->offset = off; 2274 partial_pte->data = e.val64; 2275 list_add_tail(&partial_pte->list, 2276 &ggtt_mm->ggtt_mm.partial_pte_list); 2277 partial_update = true; 2278 } 2279 } 2280 2281 if (!partial_update && (ops->test_present(&e))) { 2282 gfn = ops->get_pfn(&e); 2283 m.val64 = e.val64; 2284 m.type = e.type; 2285 2286 ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, 2287 &dma_addr); 2288 if (ret) { 2289 gvt_vgpu_err("fail to populate guest ggtt entry\n"); 2290 /* guest driver may read/write the entry when partial 2291 * update the entry in this situation p2m will fail 2292 * setting the shadow entry to point to a scratch page 2293 */ 2294 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 2295 } else 2296 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); 2297 } else { 2298 ops->set_pfn(&m, gvt->gtt.scratch_mfn); 2299 ops->clear_present(&m); 2300 } 2301 2302 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); 2303 2304 ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index); 2305 ggtt_invalidate_pte(vgpu, &e); 2306 2307 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); 2308 ggtt_invalidate(gvt->gt); 2309 return 0; 2310 } 2311 2312 /* 2313 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write 2314 * @vgpu: a vGPU 2315 * @off: register offset 2316 * @p_data: data from guest write 2317 * @bytes: data length 2318 * 2319 * This function is used to emulate the GTT MMIO register write 2320 * 2321 * Returns: 2322 * Zero on success, error code if failed. 2323 */ 2324 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, 2325 unsigned int off, void *p_data, unsigned int bytes) 2326 { 2327 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 2328 int ret; 2329 struct intel_vgpu_submission *s = &vgpu->submission; 2330 struct intel_engine_cs *engine; 2331 int i; 2332 2333 if (bytes != 4 && bytes != 8) 2334 return -EINVAL; 2335 2336 off -= info->gtt_start_offset; 2337 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes); 2338 2339 /* if ggtt of last submitted context is written, 2340 * that context is probably got unpinned. 2341 * Set last shadowed ctx to invalid. 2342 */ 2343 for_each_engine(engine, vgpu->gvt->gt, i) { 2344 if (!s->last_ctx[i].valid) 2345 continue; 2346 2347 if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift)) 2348 s->last_ctx[i].valid = false; 2349 } 2350 return ret; 2351 } 2352 2353 static int alloc_scratch_pages(struct intel_vgpu *vgpu, 2354 enum intel_gvt_gtt_type type) 2355 { 2356 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; 2357 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 2358 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 2359 int page_entry_num = I915_GTT_PAGE_SIZE >> 2360 vgpu->gvt->device_info.gtt_entry_size_shift; 2361 void *scratch_pt; 2362 int i; 2363 struct device *dev = vgpu->gvt->gt->i915->drm.dev; 2364 dma_addr_t daddr; 2365 2366 if (drm_WARN_ON(&i915->drm, 2367 type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) 2368 return -EINVAL; 2369 2370 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 2371 if (!scratch_pt) { 2372 gvt_vgpu_err("fail to allocate scratch page\n"); 2373 return -ENOMEM; 2374 } 2375 2376 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL); 2377 if (dma_mapping_error(dev, daddr)) { 2378 gvt_vgpu_err("fail to dmamap scratch_pt\n"); 2379 __free_page(virt_to_page(scratch_pt)); 2380 return -ENOMEM; 2381 } 2382 gtt->scratch_pt[type].page_mfn = 2383 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); 2384 gtt->scratch_pt[type].page = virt_to_page(scratch_pt); 2385 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", 2386 vgpu->id, type, gtt->scratch_pt[type].page_mfn); 2387 2388 /* Build the tree by full filled the scratch pt with the entries which 2389 * point to the next level scratch pt or scratch page. The 2390 * scratch_pt[type] indicate the scratch pt/scratch page used by the 2391 * 'type' pt. 2392 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by 2393 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self 2394 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn. 2395 */ 2396 if (type > GTT_TYPE_PPGTT_PTE_PT) { 2397 struct intel_gvt_gtt_entry se; 2398 2399 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); 2400 se.type = get_entry_type(type - 1); 2401 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); 2402 2403 /* The entry parameters like present/writeable/cache type 2404 * set to the same as i915's scratch page tree. 2405 */ 2406 se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW; 2407 if (type == GTT_TYPE_PPGTT_PDE_PT) 2408 se.val64 |= PPAT_CACHED; 2409 2410 for (i = 0; i < page_entry_num; i++) 2411 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); 2412 } 2413 2414 return 0; 2415 } 2416 2417 static int release_scratch_page_tree(struct intel_vgpu *vgpu) 2418 { 2419 int i; 2420 struct device *dev = vgpu->gvt->gt->i915->drm.dev; 2421 dma_addr_t daddr; 2422 2423 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2424 if (vgpu->gtt.scratch_pt[i].page != NULL) { 2425 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << 2426 I915_GTT_PAGE_SHIFT); 2427 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); 2428 __free_page(vgpu->gtt.scratch_pt[i].page); 2429 vgpu->gtt.scratch_pt[i].page = NULL; 2430 vgpu->gtt.scratch_pt[i].page_mfn = 0; 2431 } 2432 } 2433 2434 return 0; 2435 } 2436 2437 static int create_scratch_page_tree(struct intel_vgpu *vgpu) 2438 { 2439 int i, ret; 2440 2441 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2442 ret = alloc_scratch_pages(vgpu, i); 2443 if (ret) 2444 goto err; 2445 } 2446 2447 return 0; 2448 2449 err: 2450 release_scratch_page_tree(vgpu); 2451 return ret; 2452 } 2453 2454 /** 2455 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization 2456 * @vgpu: a vGPU 2457 * 2458 * This function is used to initialize per-vGPU graphics memory virtualization 2459 * components. 2460 * 2461 * Returns: 2462 * Zero on success, error code if failed. 2463 */ 2464 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) 2465 { 2466 struct intel_vgpu_gtt *gtt = &vgpu->gtt; 2467 2468 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL); 2469 2470 INIT_LIST_HEAD(>t->ppgtt_mm_list_head); 2471 INIT_LIST_HEAD(>t->oos_page_list_head); 2472 INIT_LIST_HEAD(>t->post_shadow_list_head); 2473 2474 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu); 2475 if (IS_ERR(gtt->ggtt_mm)) { 2476 gvt_vgpu_err("fail to create mm for ggtt.\n"); 2477 return PTR_ERR(gtt->ggtt_mm); 2478 } 2479 2480 intel_vgpu_reset_ggtt(vgpu, false); 2481 2482 INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list); 2483 2484 return create_scratch_page_tree(vgpu); 2485 } 2486 2487 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) 2488 { 2489 struct list_head *pos, *n; 2490 struct intel_vgpu_mm *mm; 2491 2492 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2493 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2494 intel_vgpu_destroy_mm(mm); 2495 } 2496 2497 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head))) 2498 gvt_err("vgpu ppgtt mm is not fully destroyed\n"); 2499 2500 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) { 2501 gvt_err("Why we still has spt not freed?\n"); 2502 ppgtt_free_all_spt(vgpu); 2503 } 2504 } 2505 2506 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) 2507 { 2508 struct intel_gvt_partial_pte *pos, *next; 2509 2510 list_for_each_entry_safe(pos, next, 2511 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, 2512 list) { 2513 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n", 2514 pos->offset, pos->data); 2515 kfree(pos); 2516 } 2517 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); 2518 vgpu->gtt.ggtt_mm = NULL; 2519 } 2520 2521 /** 2522 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization 2523 * @vgpu: a vGPU 2524 * 2525 * This function is used to clean up per-vGPU graphics memory virtualization 2526 * components. 2527 * 2528 * Returns: 2529 * Zero on success, error code if failed. 2530 */ 2531 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) 2532 { 2533 intel_vgpu_destroy_all_ppgtt_mm(vgpu); 2534 intel_vgpu_destroy_ggtt_mm(vgpu); 2535 release_scratch_page_tree(vgpu); 2536 } 2537 2538 static void clean_spt_oos(struct intel_gvt *gvt) 2539 { 2540 struct intel_gvt_gtt *gtt = &gvt->gtt; 2541 struct list_head *pos, *n; 2542 struct intel_vgpu_oos_page *oos_page; 2543 2544 WARN(!list_empty(>t->oos_page_use_list_head), 2545 "someone is still using oos page\n"); 2546 2547 list_for_each_safe(pos, n, >t->oos_page_free_list_head) { 2548 oos_page = container_of(pos, struct intel_vgpu_oos_page, list); 2549 list_del(&oos_page->list); 2550 free_page((unsigned long)oos_page->mem); 2551 kfree(oos_page); 2552 } 2553 } 2554 2555 static int setup_spt_oos(struct intel_gvt *gvt) 2556 { 2557 struct intel_gvt_gtt *gtt = &gvt->gtt; 2558 struct intel_vgpu_oos_page *oos_page; 2559 int i; 2560 int ret; 2561 2562 INIT_LIST_HEAD(>t->oos_page_free_list_head); 2563 INIT_LIST_HEAD(>t->oos_page_use_list_head); 2564 2565 for (i = 0; i < preallocated_oos_pages; i++) { 2566 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2567 if (!oos_page) { 2568 ret = -ENOMEM; 2569 goto fail; 2570 } 2571 oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0); 2572 if (!oos_page->mem) { 2573 ret = -ENOMEM; 2574 kfree(oos_page); 2575 goto fail; 2576 } 2577 2578 INIT_LIST_HEAD(&oos_page->list); 2579 INIT_LIST_HEAD(&oos_page->vm_list); 2580 oos_page->id = i; 2581 list_add_tail(&oos_page->list, >t->oos_page_free_list_head); 2582 } 2583 2584 gvt_dbg_mm("%d oos pages preallocated\n", i); 2585 2586 return 0; 2587 fail: 2588 clean_spt_oos(gvt); 2589 return ret; 2590 } 2591 2592 /** 2593 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object 2594 * @vgpu: a vGPU 2595 * @pdps: pdp root array 2596 * 2597 * This function is used to find a PPGTT mm object from mm object pool 2598 * 2599 * Returns: 2600 * pointer to mm object on success, NULL if failed. 2601 */ 2602 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, 2603 u64 pdps[]) 2604 { 2605 struct intel_vgpu_mm *mm; 2606 struct list_head *pos; 2607 2608 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) { 2609 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2610 2611 switch (mm->ppgtt_mm.root_entry_type) { 2612 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY: 2613 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0]) 2614 return mm; 2615 break; 2616 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY: 2617 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps, 2618 sizeof(mm->ppgtt_mm.guest_pdps))) 2619 return mm; 2620 break; 2621 default: 2622 GEM_BUG_ON(1); 2623 } 2624 } 2625 return NULL; 2626 } 2627 2628 /** 2629 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object. 2630 * @vgpu: a vGPU 2631 * @root_entry_type: ppgtt root entry type 2632 * @pdps: guest pdps 2633 * 2634 * This function is used to find or create a PPGTT mm object from a guest. 2635 * 2636 * Returns: 2637 * Zero on success, negative error code if failed. 2638 */ 2639 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, 2640 enum intel_gvt_gtt_type root_entry_type, u64 pdps[]) 2641 { 2642 struct intel_vgpu_mm *mm; 2643 2644 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 2645 if (mm) { 2646 intel_vgpu_mm_get(mm); 2647 } else { 2648 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps); 2649 if (IS_ERR(mm)) 2650 gvt_vgpu_err("fail to create mm\n"); 2651 } 2652 return mm; 2653 } 2654 2655 /** 2656 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object. 2657 * @vgpu: a vGPU 2658 * @pdps: guest pdps 2659 * 2660 * This function is used to find a PPGTT mm object from a guest and destroy it. 2661 * 2662 * Returns: 2663 * Zero on success, negative error code if failed. 2664 */ 2665 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]) 2666 { 2667 struct intel_vgpu_mm *mm; 2668 2669 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); 2670 if (!mm) { 2671 gvt_vgpu_err("fail to find ppgtt instance.\n"); 2672 return -EINVAL; 2673 } 2674 intel_vgpu_mm_put(mm); 2675 return 0; 2676 } 2677 2678 /** 2679 * intel_gvt_init_gtt - initialize mm components of a GVT device 2680 * @gvt: GVT device 2681 * 2682 * This function is called at the initialization stage, to initialize 2683 * the mm components of a GVT device. 2684 * 2685 * Returns: 2686 * zero on success, negative error code if failed. 2687 */ 2688 int intel_gvt_init_gtt(struct intel_gvt *gvt) 2689 { 2690 int ret; 2691 void *page; 2692 struct device *dev = gvt->gt->i915->drm.dev; 2693 dma_addr_t daddr; 2694 2695 gvt_dbg_core("init gtt\n"); 2696 2697 gvt->gtt.pte_ops = &gen8_gtt_pte_ops; 2698 gvt->gtt.gma_ops = &gen8_gtt_gma_ops; 2699 2700 page = (void *)get_zeroed_page(GFP_KERNEL); 2701 if (!page) { 2702 gvt_err("fail to allocate scratch ggtt page\n"); 2703 return -ENOMEM; 2704 } 2705 2706 daddr = dma_map_page(dev, virt_to_page(page), 0, 2707 4096, DMA_BIDIRECTIONAL); 2708 if (dma_mapping_error(dev, daddr)) { 2709 gvt_err("fail to dmamap scratch ggtt page\n"); 2710 __free_page(virt_to_page(page)); 2711 return -ENOMEM; 2712 } 2713 2714 gvt->gtt.scratch_page = virt_to_page(page); 2715 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); 2716 2717 if (enable_out_of_sync) { 2718 ret = setup_spt_oos(gvt); 2719 if (ret) { 2720 gvt_err("fail to initialize SPT oos\n"); 2721 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); 2722 __free_page(gvt->gtt.scratch_page); 2723 return ret; 2724 } 2725 } 2726 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); 2727 mutex_init(&gvt->gtt.ppgtt_mm_lock); 2728 return 0; 2729 } 2730 2731 /** 2732 * intel_gvt_clean_gtt - clean up mm components of a GVT device 2733 * @gvt: GVT device 2734 * 2735 * This function is called at the driver unloading stage, to clean up 2736 * the mm components of a GVT device. 2737 * 2738 */ 2739 void intel_gvt_clean_gtt(struct intel_gvt *gvt) 2740 { 2741 struct device *dev = gvt->gt->i915->drm.dev; 2742 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << 2743 I915_GTT_PAGE_SHIFT); 2744 2745 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL); 2746 2747 __free_page(gvt->gtt.scratch_page); 2748 2749 if (enable_out_of_sync) 2750 clean_spt_oos(gvt); 2751 } 2752 2753 /** 2754 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances 2755 * @vgpu: a vGPU 2756 * 2757 * This function is called when invalidate all PPGTT instances of a vGPU. 2758 * 2759 */ 2760 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) 2761 { 2762 struct list_head *pos, *n; 2763 struct intel_vgpu_mm *mm; 2764 2765 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2766 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2767 if (mm->type == INTEL_GVT_MM_PPGTT) { 2768 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock); 2769 list_del_init(&mm->ppgtt_mm.lru_list); 2770 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock); 2771 if (mm->ppgtt_mm.shadowed) 2772 invalidate_ppgtt_mm(mm); 2773 } 2774 } 2775 } 2776 2777 /** 2778 * intel_vgpu_reset_ggtt - reset the GGTT entry 2779 * @vgpu: a vGPU 2780 * @invalidate_old: invalidate old entries 2781 * 2782 * This function is called at the vGPU create stage 2783 * to reset all the GGTT entries. 2784 * 2785 */ 2786 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) 2787 { 2788 struct intel_gvt *gvt = vgpu->gvt; 2789 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; 2790 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; 2791 struct intel_gvt_gtt_entry old_entry; 2792 u32 index; 2793 u32 num_entries; 2794 2795 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn); 2796 pte_ops->set_present(&entry); 2797 2798 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2799 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2800 while (num_entries--) { 2801 if (invalidate_old) { 2802 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2803 ggtt_invalidate_pte(vgpu, &old_entry); 2804 } 2805 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2806 } 2807 2808 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2809 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2810 while (num_entries--) { 2811 if (invalidate_old) { 2812 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); 2813 ggtt_invalidate_pte(vgpu, &old_entry); 2814 } 2815 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); 2816 } 2817 2818 ggtt_invalidate(gvt->gt); 2819 } 2820 2821 /** 2822 * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries 2823 * @gvt: intel gvt device 2824 * 2825 * This function is called at driver resume stage to restore 2826 * GGTT entries of every vGPU. 2827 * 2828 */ 2829 void intel_gvt_restore_ggtt(struct intel_gvt *gvt) 2830 { 2831 struct intel_vgpu *vgpu; 2832 struct intel_vgpu_mm *mm; 2833 int id; 2834 gen8_pte_t pte; 2835 u32 idx, num_low, num_hi, offset; 2836 2837 /* Restore dirty host ggtt for all vGPUs */ 2838 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { 2839 mm = vgpu->gtt.ggtt_mm; 2840 2841 num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2842 offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2843 for (idx = 0; idx < num_low; idx++) { 2844 pte = mm->ggtt_mm.host_ggtt_aperture[idx]; 2845 if (pte & GEN8_PAGE_PRESENT) 2846 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); 2847 } 2848 2849 num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2850 offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2851 for (idx = 0; idx < num_hi; idx++) { 2852 pte = mm->ggtt_mm.host_ggtt_hidden[idx]; 2853 if (pte & GEN8_PAGE_PRESENT) 2854 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); 2855 } 2856 } 2857 } 2858