1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include <drm/ttm/ttm_bo.h> 7 8 #include "i915_vma.h" 9 #include "intel_display_core.h" 10 #include "intel_display_types.h" 11 #include "intel_dpt.h" 12 #include "intel_fb.h" 13 #include "intel_fb_pin.h" 14 #include "intel_fbdev.h" 15 #include "xe_bo.h" 16 #include "xe_device.h" 17 #include "xe_ggtt.h" 18 #include "xe_pm.h" 19 #include "xe_vram_types.h" 20 21 static void 22 write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs, 23 u32 width, u32 height, u32 src_stride, u32 dst_stride) 24 { 25 struct xe_device *xe = xe_bo_device(bo); 26 struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; 27 u32 column, row; 28 u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]); 29 30 /* TODO: Maybe rewrite so we can traverse the bo addresses sequentially, 31 * by writing dpt/ggtt in a different order? 32 */ 33 34 for (column = 0; column < width; column++) { 35 u32 src_idx = src_stride * (height - 1) + column + bo_ofs; 36 37 for (row = 0; row < height; row++) { 38 u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE); 39 40 iosys_map_wr(map, *dpt_ofs, u64, pte | addr); 41 *dpt_ofs += 8; 42 src_idx -= src_stride; 43 } 44 45 /* The DE ignores the PTEs for the padding tiles */ 46 *dpt_ofs += (dst_stride - height) * 8; 47 } 48 49 /* Align to next page */ 50 *dpt_ofs = ALIGN(*dpt_ofs, 4096); 51 } 52 53 static void 54 write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, 55 u32 bo_ofs, u32 width, u32 height, u32 src_stride, 56 u32 dst_stride) 57 { 58 struct xe_device *xe = xe_bo_device(bo); 59 struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; 60 u32 column, row; 61 u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]); 62 63 for (row = 0; row < height; row++) { 64 u32 src_idx = src_stride * row + bo_ofs; 65 66 for (column = 0; column < width; column++) { 67 u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE); 68 iosys_map_wr(map, *dpt_ofs, u64, pte | addr); 69 70 *dpt_ofs += 8; 71 src_idx++; 72 } 73 74 /* The DE ignores the PTEs for the padding tiles */ 75 *dpt_ofs += (dst_stride - width) * 8; 76 } 77 78 /* Align to next page */ 79 *dpt_ofs = ALIGN(*dpt_ofs, 4096); 80 } 81 82 static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, 83 const struct i915_gtt_view *view, 84 struct i915_vma *vma, 85 unsigned int alignment) 86 { 87 struct xe_device *xe = to_xe_device(fb->base.dev); 88 struct xe_tile *tile0 = xe_device_get_root_tile(xe); 89 struct xe_ggtt *ggtt = tile0->mem.ggtt; 90 struct drm_gem_object *obj = intel_fb_bo(&fb->base); 91 struct xe_bo *bo = gem_to_xe_bo(obj), *dpt; 92 u32 dpt_size, size = bo->ttm.base.size; 93 94 if (view->type == I915_GTT_VIEW_NORMAL) 95 dpt_size = ALIGN(size / XE_PAGE_SIZE * 8, XE_PAGE_SIZE); 96 else if (view->type == I915_GTT_VIEW_REMAPPED) 97 dpt_size = ALIGN(intel_remapped_info_size(&fb->remapped_view.gtt.remapped) * 8, 98 XE_PAGE_SIZE); 99 else 100 /* display uses 4K tiles instead of bytes here, convert to entries.. */ 101 dpt_size = ALIGN(intel_rotation_info_size(&view->rotated) * 8, 102 XE_PAGE_SIZE); 103 104 if (IS_DGFX(xe)) 105 dpt = xe_bo_create_pin_map_at_novm(xe, tile0, 106 dpt_size, ~0ull, 107 ttm_bo_type_kernel, 108 XE_BO_FLAG_VRAM0 | 109 XE_BO_FLAG_GGTT | 110 XE_BO_FLAG_PAGETABLE, 111 alignment, false); 112 else 113 dpt = xe_bo_create_pin_map_at_novm(xe, tile0, 114 dpt_size, ~0ull, 115 ttm_bo_type_kernel, 116 XE_BO_FLAG_STOLEN | 117 XE_BO_FLAG_GGTT | 118 XE_BO_FLAG_PAGETABLE, 119 alignment, false); 120 if (IS_ERR(dpt)) 121 dpt = xe_bo_create_pin_map_at_novm(xe, tile0, 122 dpt_size, ~0ull, 123 ttm_bo_type_kernel, 124 XE_BO_FLAG_SYSTEM | 125 XE_BO_FLAG_GGTT | 126 XE_BO_FLAG_PAGETABLE, 127 alignment, false); 128 if (IS_ERR(dpt)) 129 return PTR_ERR(dpt); 130 131 if (view->type == I915_GTT_VIEW_NORMAL) { 132 u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]); 133 u32 x; 134 135 for (x = 0; x < size / XE_PAGE_SIZE; x++) { 136 u64 addr = xe_bo_addr(bo, x * XE_PAGE_SIZE, XE_PAGE_SIZE); 137 138 iosys_map_wr(&dpt->vmap, x * 8, u64, pte | addr); 139 } 140 } else if (view->type == I915_GTT_VIEW_REMAPPED) { 141 const struct intel_remapped_info *remap_info = &view->remapped; 142 u32 i, dpt_ofs = 0; 143 144 for (i = 0; i < ARRAY_SIZE(remap_info->plane); i++) 145 write_dpt_remapped(bo, &dpt->vmap, &dpt_ofs, 146 remap_info->plane[i].offset, 147 remap_info->plane[i].width, 148 remap_info->plane[i].height, 149 remap_info->plane[i].src_stride, 150 remap_info->plane[i].dst_stride); 151 152 } else { 153 const struct intel_rotation_info *rot_info = &view->rotated; 154 u32 i, dpt_ofs = 0; 155 156 for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++) 157 write_dpt_rotated(bo, &dpt->vmap, &dpt_ofs, 158 rot_info->plane[i].offset, 159 rot_info->plane[i].width, 160 rot_info->plane[i].height, 161 rot_info->plane[i].src_stride, 162 rot_info->plane[i].dst_stride); 163 } 164 165 vma->dpt = dpt; 166 vma->node = dpt->ggtt_node[tile0->id]; 167 168 /* Ensure DPT writes are flushed */ 169 xe_device_l2_flush(xe); 170 return 0; 171 } 172 173 static void 174 write_ggtt_rotated(struct xe_ggtt *ggtt, u32 *ggtt_ofs, 175 u64 pte_flags, 176 xe_ggtt_set_pte_fn write_pte, 177 struct xe_bo *bo, u32 bo_ofs, 178 u32 width, u32 height, u32 src_stride, u32 dst_stride) 179 { 180 u32 column, row; 181 182 for (column = 0; column < width; column++) { 183 u32 src_idx = src_stride * (height - 1) + column + bo_ofs; 184 185 for (row = 0; row < height; row++) { 186 u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE); 187 188 write_pte(ggtt, *ggtt_ofs, pte_flags | addr); 189 *ggtt_ofs += XE_PAGE_SIZE; 190 src_idx -= src_stride; 191 } 192 193 /* The DE ignores the PTEs for the padding tiles */ 194 *ggtt_ofs += (dst_stride - height) * XE_PAGE_SIZE; 195 } 196 } 197 198 struct fb_rotate_args { 199 const struct i915_gtt_view *view; 200 struct xe_bo *bo; 201 }; 202 203 static void write_ggtt_rotated_node(struct xe_ggtt *ggtt, struct xe_ggtt_node *node, 204 u64 pte_flags, xe_ggtt_set_pte_fn write_pte, void *data) 205 { 206 struct fb_rotate_args *args = data; 207 struct xe_bo *bo = args->bo; 208 const struct intel_rotation_info *rot_info = &args->view->rotated; 209 u32 ggtt_ofs = xe_ggtt_node_addr(node); 210 211 for (u32 i = 0; i < ARRAY_SIZE(rot_info->plane); i++) 212 write_ggtt_rotated(ggtt, &ggtt_ofs, pte_flags, write_pte, 213 bo, rot_info->plane[i].offset, 214 rot_info->plane[i].width, 215 rot_info->plane[i].height, 216 rot_info->plane[i].src_stride, 217 rot_info->plane[i].dst_stride); 218 } 219 220 static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, 221 const struct i915_gtt_view *view, 222 struct i915_vma *vma, 223 unsigned int alignment) 224 { 225 struct drm_gem_object *obj = intel_fb_bo(&fb->base); 226 struct xe_bo *bo = gem_to_xe_bo(obj); 227 struct xe_device *xe = to_xe_device(fb->base.dev); 228 struct xe_tile *tile0 = xe_device_get_root_tile(xe); 229 struct xe_ggtt *ggtt = tile0->mem.ggtt; 230 u64 pte, size; 231 u32 align; 232 int ret = 0; 233 234 /* TODO: Consider sharing framebuffer mapping? 235 * embed i915_vma inside intel_framebuffer 236 */ 237 guard(xe_pm_runtime_noresume)(xe); 238 239 align = XE_PAGE_SIZE; 240 if (xe_bo_is_vram(bo) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) 241 align = max(align, SZ_64K); 242 243 /* Fast case, preallocated GGTT view? */ 244 if (bo->ggtt_node[tile0->id] && view->type == I915_GTT_VIEW_NORMAL) { 245 vma->node = bo->ggtt_node[tile0->id]; 246 return 0; 247 } 248 249 /* TODO: Consider sharing framebuffer mapping? 250 * embed i915_vma inside intel_framebuffer 251 */ 252 if (view->type == I915_GTT_VIEW_NORMAL) 253 size = xe_bo_size(bo); 254 else 255 /* display uses tiles instead of bytes here, so convert it back.. */ 256 size = intel_rotation_info_size(&view->rotated) * XE_PAGE_SIZE; 257 258 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]); 259 vma->node = xe_ggtt_node_insert_transform(ggtt, bo, pte, 260 ALIGN(size, align), align, 261 view->type == I915_GTT_VIEW_NORMAL ? 262 NULL : write_ggtt_rotated_node, 263 &(struct fb_rotate_args){view, bo}); 264 if (IS_ERR(vma->node)) 265 ret = PTR_ERR(vma->node); 266 267 return ret; 268 } 269 270 static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, 271 const struct i915_gtt_view *view, 272 unsigned int alignment) 273 { 274 struct drm_device *dev = fb->base.dev; 275 struct xe_device *xe = to_xe_device(dev); 276 struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); 277 struct drm_gem_object *obj = intel_fb_bo(&fb->base); 278 struct xe_bo *bo = gem_to_xe_bo(obj); 279 struct xe_validation_ctx ctx; 280 struct drm_exec exec; 281 int ret = 0; 282 283 if (!vma) 284 return ERR_PTR(-ENODEV); 285 286 refcount_set(&vma->ref, 1); 287 if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) && 288 intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 && 289 !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) { 290 struct xe_vram_region *vram = xe_device_get_root_tile(xe)->mem.vram; 291 292 /* 293 * If we need to able to access the clear-color value stored in 294 * the buffer, then we require that such buffers are also CPU 295 * accessible. This is important on small-bar systems where 296 * only some subset of VRAM is CPU accessible. 297 */ 298 if (xe_vram_region_io_size(vram) < xe_vram_region_usable_size(vram)) { 299 ret = -EINVAL; 300 goto err; 301 } 302 } 303 304 /* 305 * Pin the framebuffer, we can't use xe_bo_(un)pin functions as the 306 * assumptions are incorrect for framebuffers 307 */ 308 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true}, 309 ret) { 310 ret = drm_exec_lock_obj(&exec, &bo->ttm.base); 311 drm_exec_retry_on_contention(&exec); 312 if (ret) 313 break; 314 315 if (IS_DGFX(xe)) 316 ret = xe_bo_migrate(bo, XE_PL_VRAM0, NULL, &exec); 317 else 318 ret = xe_bo_validate(bo, NULL, true, &exec); 319 drm_exec_retry_on_contention(&exec); 320 xe_validation_retry_on_oom(&ctx, &ret); 321 if (!ret) 322 ttm_bo_pin(&bo->ttm); 323 } 324 if (ret) 325 goto err; 326 327 vma->bo = bo; 328 if (intel_fb_uses_dpt(&fb->base)) 329 ret = __xe_pin_fb_vma_dpt(fb, view, vma, alignment); 330 else 331 ret = __xe_pin_fb_vma_ggtt(fb, view, vma, alignment); 332 if (ret) 333 goto err_unpin; 334 335 return vma; 336 337 err_unpin: 338 ttm_bo_reserve(&bo->ttm, false, false, NULL); 339 ttm_bo_unpin(&bo->ttm); 340 ttm_bo_unreserve(&bo->ttm); 341 err: 342 kfree(vma); 343 return ERR_PTR(ret); 344 } 345 346 static void __xe_unpin_fb_vma(struct i915_vma *vma) 347 { 348 u8 tile_id = xe_device_get_root_tile(xe_bo_device(vma->bo))->id; 349 350 if (!refcount_dec_and_test(&vma->ref)) 351 return; 352 353 if (vma->dpt) 354 xe_bo_unpin_map_no_vm(vma->dpt); 355 else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) || 356 vma->bo->ggtt_node[tile_id] != vma->node) 357 xe_ggtt_node_remove(vma->node, false); 358 359 ttm_bo_reserve(&vma->bo->ttm, false, false, NULL); 360 ttm_bo_unpin(&vma->bo->ttm); 361 ttm_bo_unreserve(&vma->bo->ttm); 362 kfree(vma); 363 } 364 365 struct i915_vma * 366 intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, 367 const struct i915_gtt_view *view, 368 unsigned int alignment, 369 unsigned int phys_alignment, 370 unsigned int vtd_guard, 371 bool uses_fence, 372 unsigned long *out_flags) 373 { 374 *out_flags = 0; 375 376 return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, alignment); 377 } 378 379 void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags) 380 { 381 __xe_unpin_fb_vma(vma); 382 } 383 384 static bool reuse_vma(struct intel_plane_state *new_plane_state, 385 const struct intel_plane_state *old_plane_state) 386 { 387 struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb); 388 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); 389 struct xe_device *xe = to_xe_device(fb->base.dev); 390 struct intel_display *display = xe->display; 391 struct i915_vma *vma; 392 393 if (old_plane_state->hw.fb == new_plane_state->hw.fb && 394 !memcmp(&old_plane_state->view.gtt, 395 &new_plane_state->view.gtt, 396 sizeof(new_plane_state->view.gtt))) { 397 vma = old_plane_state->ggtt_vma; 398 goto found; 399 } 400 401 if (fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) { 402 vma = intel_fbdev_vma_pointer(display->fbdev.fbdev); 403 if (vma) 404 goto found; 405 } 406 407 return false; 408 409 found: 410 refcount_inc(&vma->ref); 411 new_plane_state->ggtt_vma = vma; 412 413 new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) + 414 plane->surf_offset(new_plane_state); 415 416 return true; 417 } 418 419 int intel_plane_pin_fb(struct intel_plane_state *new_plane_state, 420 const struct intel_plane_state *old_plane_state) 421 { 422 struct drm_framebuffer *fb = new_plane_state->hw.fb; 423 struct drm_gem_object *obj = intel_fb_bo(fb); 424 struct xe_bo *bo = gem_to_xe_bo(obj); 425 struct i915_vma *vma; 426 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 427 struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); 428 unsigned int alignment = plane->min_alignment(plane, fb, 0); 429 430 if (reuse_vma(new_plane_state, old_plane_state)) 431 return 0; 432 433 /* We reject creating !SCANOUT fb's, so this is weird.. */ 434 drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT)); 435 436 vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, alignment); 437 438 if (IS_ERR(vma)) 439 return PTR_ERR(vma); 440 441 new_plane_state->ggtt_vma = vma; 442 443 new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) + 444 plane->surf_offset(new_plane_state); 445 446 return 0; 447 } 448 449 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 450 { 451 __xe_unpin_fb_vma(old_plane_state->ggtt_vma); 452 old_plane_state->ggtt_vma = NULL; 453 } 454 455 /* 456 * For Xe introduce dummy intel_dpt_create which just return NULL, 457 * intel_dpt_destroy which does nothing, and fake intel_dpt_ofsset returning 0; 458 */ 459 struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb) 460 { 461 return NULL; 462 } 463 464 void intel_dpt_destroy(struct i915_address_space *vm) 465 { 466 return; 467 } 468 469 u64 intel_dpt_offset(struct i915_vma *dpt_vma) 470 { 471 return 0; 472 } 473 474 void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map) 475 { 476 *map = vma->bo->vmap; 477 } 478