1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_bo.h"
7
8 #include <linux/dma-buf.h>
9 #include <linux/nospec.h>
10
11 #include <drm/drm_drv.h>
12 #include <drm/drm_dumb_buffers.h>
13 #include <drm/drm_gem_ttm_helper.h>
14 #include <drm/drm_managed.h>
15 #include <drm/ttm/ttm_backup.h>
16 #include <drm/ttm/ttm_device.h>
17 #include <drm/ttm/ttm_placement.h>
18 #include <drm/ttm/ttm_tt.h>
19 #include <uapi/drm/xe_drm.h>
20
21 #include <kunit/static_stub.h>
22
23 #include <trace/events/gpu_mem.h>
24
25 #include "xe_device.h"
26 #include "xe_dma_buf.h"
27 #include "xe_drm_client.h"
28 #include "xe_ggtt.h"
29 #include "xe_gt.h"
30 #include "xe_map.h"
31 #include "xe_migrate.h"
32 #include "xe_pm.h"
33 #include "xe_preempt_fence.h"
34 #include "xe_pxp.h"
35 #include "xe_res_cursor.h"
36 #include "xe_shrinker.h"
37 #include "xe_sriov_vf_ccs.h"
38 #include "xe_tile.h"
39 #include "xe_trace_bo.h"
40 #include "xe_ttm_stolen_mgr.h"
41 #include "xe_vm.h"
42 #include "xe_vram_types.h"
43
44 const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
45 [XE_PL_SYSTEM] = "system",
46 [XE_PL_TT] = "gtt",
47 [XE_PL_VRAM0] = "vram0",
48 [XE_PL_VRAM1] = "vram1",
49 [XE_PL_STOLEN] = "stolen"
50 };
51
52 static const struct ttm_place sys_placement_flags = {
53 .fpfn = 0,
54 .lpfn = 0,
55 .mem_type = XE_PL_SYSTEM,
56 .flags = 0,
57 };
58
59 static struct ttm_placement sys_placement = {
60 .num_placement = 1,
61 .placement = &sys_placement_flags,
62 };
63
64 static struct ttm_placement purge_placement;
65
66 static const struct ttm_place tt_placement_flags[] = {
67 {
68 .fpfn = 0,
69 .lpfn = 0,
70 .mem_type = XE_PL_TT,
71 .flags = TTM_PL_FLAG_DESIRED,
72 },
73 {
74 .fpfn = 0,
75 .lpfn = 0,
76 .mem_type = XE_PL_SYSTEM,
77 .flags = TTM_PL_FLAG_FALLBACK,
78 }
79 };
80
81 static struct ttm_placement tt_placement = {
82 .num_placement = 2,
83 .placement = tt_placement_flags,
84 };
85
86 #define for_each_set_bo_vram_flag(bit__, bo_flags__) \
87 for (unsigned int __bit_tmp = BIT(0); __bit_tmp <= XE_BO_FLAG_VRAM_MASK; __bit_tmp <<= 1) \
88 for_each_if(((bit__) = __bit_tmp) & (bo_flags__) & XE_BO_FLAG_VRAM_MASK)
89
mem_type_is_vram(u32 mem_type)90 bool mem_type_is_vram(u32 mem_type)
91 {
92 return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
93 }
94
resource_is_stolen_vram(struct xe_device * xe,struct ttm_resource * res)95 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
96 {
97 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
98 }
99
resource_is_vram(struct ttm_resource * res)100 static bool resource_is_vram(struct ttm_resource *res)
101 {
102 return mem_type_is_vram(res->mem_type);
103 }
104
xe_bo_is_vram(struct xe_bo * bo)105 bool xe_bo_is_vram(struct xe_bo *bo)
106 {
107 return resource_is_vram(bo->ttm.resource) ||
108 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
109 }
110
xe_bo_is_stolen(struct xe_bo * bo)111 bool xe_bo_is_stolen(struct xe_bo *bo)
112 {
113 return bo->ttm.resource->mem_type == XE_PL_STOLEN;
114 }
115
116 /**
117 * xe_bo_has_single_placement - check if BO is placed only in one memory location
118 * @bo: The BO
119 *
120 * This function checks whether a given BO is placed in only one memory location.
121 *
122 * Returns: true if the BO is placed in a single memory location, false otherwise.
123 *
124 */
xe_bo_has_single_placement(struct xe_bo * bo)125 bool xe_bo_has_single_placement(struct xe_bo *bo)
126 {
127 return bo->placement.num_placement == 1;
128 }
129
130 /**
131 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
132 * @bo: The BO
133 *
134 * The stolen memory is accessed through the PCI BAR for both DGFX and some
135 * integrated platforms that have a dedicated bit in the PTE for devmem (DM).
136 *
137 * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise.
138 */
xe_bo_is_stolen_devmem(struct xe_bo * bo)139 bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
140 {
141 return xe_bo_is_stolen(bo) &&
142 GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
143 }
144
145 /**
146 * xe_bo_is_vm_bound - check if BO has any mappings through VM_BIND
147 * @bo: The BO
148 *
149 * Check if a given bo is bound through VM_BIND. This requires the
150 * reservation lock for the BO to be held.
151 *
152 * Returns: boolean
153 */
xe_bo_is_vm_bound(struct xe_bo * bo)154 bool xe_bo_is_vm_bound(struct xe_bo *bo)
155 {
156 xe_bo_assert_held(bo);
157
158 return !list_empty(&bo->ttm.base.gpuva.list);
159 }
160
xe_bo_is_user(struct xe_bo * bo)161 static bool xe_bo_is_user(struct xe_bo *bo)
162 {
163 return bo->flags & XE_BO_FLAG_USER;
164 }
165
166 static struct xe_migrate *
mem_type_to_migrate(struct xe_device * xe,u32 mem_type)167 mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
168 {
169 struct xe_tile *tile;
170
171 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
172 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
173 return tile->migrate;
174 }
175
res_to_mem_region(struct ttm_resource * res)176 static struct xe_vram_region *res_to_mem_region(struct ttm_resource *res)
177 {
178 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
179 struct ttm_resource_manager *mgr;
180 struct xe_ttm_vram_mgr *vram_mgr;
181
182 xe_assert(xe, resource_is_vram(res));
183 mgr = ttm_manager_type(&xe->ttm, res->mem_type);
184 vram_mgr = to_xe_ttm_vram_mgr(mgr);
185
186 return container_of(vram_mgr, struct xe_vram_region, ttm);
187 }
188
try_add_system(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,u32 * c)189 static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
190 u32 bo_flags, u32 *c)
191 {
192 if (bo_flags & XE_BO_FLAG_SYSTEM) {
193 xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
194
195 bo->placements[*c] = (struct ttm_place) {
196 .mem_type = XE_PL_TT,
197 .flags = (bo_flags & XE_BO_FLAG_VRAM_MASK) ?
198 TTM_PL_FLAG_FALLBACK : 0,
199 };
200 *c += 1;
201 }
202 }
203
force_contiguous(u32 bo_flags)204 static bool force_contiguous(u32 bo_flags)
205 {
206 if (bo_flags & XE_BO_FLAG_STOLEN)
207 return true; /* users expect this */
208 else if (bo_flags & XE_BO_FLAG_PINNED &&
209 !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
210 return true; /* needs vmap */
211 else if (bo_flags & XE_BO_FLAG_CPU_ADDR_MIRROR)
212 return true;
213
214 /*
215 * For eviction / restore on suspend / resume objects pinned in VRAM
216 * must be contiguous, also only contiguous BOs support xe_bo_vmap.
217 */
218 return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS &&
219 bo_flags & XE_BO_FLAG_PINNED;
220 }
221
vram_bo_flag_to_tile_id(struct xe_device * xe,u32 vram_bo_flag)222 static u8 vram_bo_flag_to_tile_id(struct xe_device *xe, u32 vram_bo_flag)
223 {
224 xe_assert(xe, vram_bo_flag & XE_BO_FLAG_VRAM_MASK);
225 xe_assert(xe, (vram_bo_flag & (vram_bo_flag - 1)) == 0);
226
227 return __ffs(vram_bo_flag >> (__ffs(XE_BO_FLAG_VRAM0) - 1)) - 1;
228 }
229
bo_vram_flags_to_vram_placement(struct xe_device * xe,u32 bo_flags,u32 vram_flag,enum ttm_bo_type type)230 static u32 bo_vram_flags_to_vram_placement(struct xe_device *xe, u32 bo_flags, u32 vram_flag,
231 enum ttm_bo_type type)
232 {
233 u8 tile_id = vram_bo_flag_to_tile_id(xe, vram_flag);
234
235 xe_assert(xe, tile_id < xe->info.tile_count);
236
237 if (type == ttm_bo_type_kernel && !(bo_flags & XE_BO_FLAG_FORCE_USER_VRAM))
238 return xe->tiles[tile_id].mem.kernel_vram->placement;
239 else
240 return xe->tiles[tile_id].mem.vram->placement;
241 }
242
add_vram(struct xe_device * xe,struct xe_bo * bo,struct ttm_place * places,u32 bo_flags,u32 mem_type,u32 * c)243 static void add_vram(struct xe_device *xe, struct xe_bo *bo,
244 struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
245 {
246 struct ttm_place place = { .mem_type = mem_type };
247 struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type);
248 struct xe_ttm_vram_mgr *vram_mgr = to_xe_ttm_vram_mgr(mgr);
249
250 struct xe_vram_region *vram;
251 u64 io_size;
252
253 xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
254
255 vram = container_of(vram_mgr, struct xe_vram_region, ttm);
256 xe_assert(xe, vram && vram->usable_size);
257 io_size = vram->io_size;
258
259 if (force_contiguous(bo_flags))
260 place.flags |= TTM_PL_FLAG_CONTIGUOUS;
261
262 if (io_size < vram->usable_size) {
263 if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
264 place.fpfn = 0;
265 place.lpfn = io_size >> PAGE_SHIFT;
266 } else {
267 place.flags |= TTM_PL_FLAG_TOPDOWN;
268 }
269 }
270 places[*c] = place;
271 *c += 1;
272 }
273
try_add_vram(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,enum ttm_bo_type type,u32 * c)274 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
275 u32 bo_flags, enum ttm_bo_type type, u32 *c)
276 {
277 u32 vram_flag;
278
279 for_each_set_bo_vram_flag(vram_flag, bo_flags) {
280 u32 pl = bo_vram_flags_to_vram_placement(xe, bo_flags, vram_flag, type);
281
282 add_vram(xe, bo, bo->placements, bo_flags, pl, c);
283 }
284 }
285
try_add_stolen(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,u32 * c)286 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
287 u32 bo_flags, u32 *c)
288 {
289 if (bo_flags & XE_BO_FLAG_STOLEN) {
290 xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
291
292 bo->placements[*c] = (struct ttm_place) {
293 .mem_type = XE_PL_STOLEN,
294 .flags = force_contiguous(bo_flags) ?
295 TTM_PL_FLAG_CONTIGUOUS : 0,
296 };
297 *c += 1;
298 }
299 }
300
__xe_bo_placement_for_flags(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,enum ttm_bo_type type)301 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
302 u32 bo_flags, enum ttm_bo_type type)
303 {
304 u32 c = 0;
305
306 try_add_vram(xe, bo, bo_flags, type, &c);
307 try_add_system(xe, bo, bo_flags, &c);
308 try_add_stolen(xe, bo, bo_flags, &c);
309
310 if (!c)
311 return -EINVAL;
312
313 bo->placement = (struct ttm_placement) {
314 .num_placement = c,
315 .placement = bo->placements,
316 };
317
318 return 0;
319 }
320
xe_bo_placement_for_flags(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,enum ttm_bo_type type)321 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
322 u32 bo_flags, enum ttm_bo_type type)
323 {
324 xe_bo_assert_held(bo);
325 return __xe_bo_placement_for_flags(xe, bo, bo_flags, type);
326 }
327
xe_evict_flags(struct ttm_buffer_object * tbo,struct ttm_placement * placement)328 static void xe_evict_flags(struct ttm_buffer_object *tbo,
329 struct ttm_placement *placement)
330 {
331 struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm);
332 bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
333 struct xe_bo *bo;
334
335 if (!xe_bo_is_xe_bo(tbo)) {
336 /* Don't handle scatter gather BOs */
337 if (tbo->type == ttm_bo_type_sg) {
338 placement->num_placement = 0;
339 return;
340 }
341
342 *placement = device_unplugged ? purge_placement : sys_placement;
343 return;
344 }
345
346 bo = ttm_to_xe_bo(tbo);
347 if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) {
348 *placement = sys_placement;
349 return;
350 }
351
352 if (device_unplugged && !tbo->base.dma_buf) {
353 *placement = purge_placement;
354 return;
355 }
356
357 /*
358 * For xe, sg bos that are evicted to system just triggers a
359 * rebind of the sg list upon subsequent validation to XE_PL_TT.
360 */
361 switch (tbo->resource->mem_type) {
362 case XE_PL_VRAM0:
363 case XE_PL_VRAM1:
364 case XE_PL_STOLEN:
365 *placement = tt_placement;
366 break;
367 case XE_PL_TT:
368 default:
369 *placement = sys_placement;
370 break;
371 }
372 }
373
374 /* struct xe_ttm_tt - Subclassed ttm_tt for xe */
375 struct xe_ttm_tt {
376 struct ttm_tt ttm;
377 struct sg_table sgt;
378 struct sg_table *sg;
379 /** @purgeable: Whether the content of the pages of @ttm is purgeable. */
380 bool purgeable;
381 };
382
xe_tt_map_sg(struct xe_device * xe,struct ttm_tt * tt)383 static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt)
384 {
385 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
386 unsigned long num_pages = tt->num_pages;
387 int ret;
388
389 XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
390 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE));
391
392 if (xe_tt->sg)
393 return 0;
394
395 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
396 num_pages, 0,
397 (u64)num_pages << PAGE_SHIFT,
398 xe_sg_segment_size(xe->drm.dev),
399 GFP_KERNEL);
400 if (ret)
401 return ret;
402
403 xe_tt->sg = &xe_tt->sgt;
404 ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
405 DMA_ATTR_SKIP_CPU_SYNC);
406 if (ret) {
407 sg_free_table(xe_tt->sg);
408 xe_tt->sg = NULL;
409 return ret;
410 }
411
412 return 0;
413 }
414
xe_tt_unmap_sg(struct xe_device * xe,struct ttm_tt * tt)415 static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt)
416 {
417 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
418
419 if (xe_tt->sg) {
420 dma_unmap_sgtable(xe->drm.dev, xe_tt->sg,
421 DMA_BIDIRECTIONAL, 0);
422 sg_free_table(xe_tt->sg);
423 xe_tt->sg = NULL;
424 }
425 }
426
xe_bo_sg(struct xe_bo * bo)427 struct sg_table *xe_bo_sg(struct xe_bo *bo)
428 {
429 struct ttm_tt *tt = bo->ttm.ttm;
430 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
431
432 return xe_tt->sg;
433 }
434
435 /*
436 * Account ttm pages against the device shrinker's shrinkable and
437 * purgeable counts.
438 */
xe_ttm_tt_account_add(struct xe_device * xe,struct ttm_tt * tt)439 static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt)
440 {
441 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
442
443 if (xe_tt->purgeable)
444 xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages);
445 else
446 xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0);
447 }
448
xe_ttm_tt_account_subtract(struct xe_device * xe,struct ttm_tt * tt)449 static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt)
450 {
451 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
452
453 if (xe_tt->purgeable)
454 xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages);
455 else
456 xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0);
457 }
458
update_global_total_pages(struct ttm_device * ttm_dev,long num_pages)459 static void update_global_total_pages(struct ttm_device *ttm_dev,
460 long num_pages)
461 {
462 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
463 struct xe_device *xe = ttm_to_xe_device(ttm_dev);
464 u64 global_total_pages =
465 atomic64_add_return(num_pages, &xe->global_total_pages);
466
467 trace_gpu_mem_total(xe->drm.primary->index, 0,
468 global_total_pages << PAGE_SHIFT);
469 #endif
470 }
471
xe_ttm_tt_create(struct ttm_buffer_object * ttm_bo,u32 page_flags)472 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
473 u32 page_flags)
474 {
475 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
476 struct xe_device *xe = xe_bo_device(bo);
477 struct xe_ttm_tt *xe_tt;
478 struct ttm_tt *tt;
479 unsigned long extra_pages;
480 enum ttm_caching caching = ttm_cached;
481 int err;
482
483 xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL);
484 if (!xe_tt)
485 return NULL;
486
487 tt = &xe_tt->ttm;
488
489 extra_pages = 0;
490 if (xe_bo_needs_ccs_pages(bo))
491 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)),
492 PAGE_SIZE);
493
494 /*
495 * DGFX system memory is always WB / ttm_cached, since
496 * other caching modes are only supported on x86. DGFX
497 * GPU system memory accesses are always coherent with the
498 * CPU.
499 */
500 if (!IS_DGFX(xe)) {
501 switch (bo->cpu_caching) {
502 case DRM_XE_GEM_CPU_CACHING_WC:
503 caching = ttm_write_combined;
504 break;
505 default:
506 caching = ttm_cached;
507 break;
508 }
509
510 WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
511
512 /*
513 * Display scanout is always non-coherent with the CPU cache.
514 *
515 * For Xe_LPG and beyond, PPGTT PTE lookups are also
516 * non-coherent and require a CPU:WC mapping.
517 */
518 if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
519 (xe->info.graphics_verx100 >= 1270 &&
520 bo->flags & XE_BO_FLAG_PAGETABLE))
521 caching = ttm_write_combined;
522 }
523
524 if (bo->flags & XE_BO_FLAG_NEEDS_UC) {
525 /*
526 * Valid only for internally-created buffers only, for
527 * which cpu_caching is never initialized.
528 */
529 xe_assert(xe, bo->cpu_caching == 0);
530 caching = ttm_uncached;
531 }
532
533 if (ttm_bo->type != ttm_bo_type_sg)
534 page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
535
536 err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages);
537 if (err) {
538 kfree(xe_tt);
539 return NULL;
540 }
541
542 if (ttm_bo->type != ttm_bo_type_sg) {
543 err = ttm_tt_setup_backup(tt);
544 if (err) {
545 ttm_tt_fini(tt);
546 kfree(xe_tt);
547 return NULL;
548 }
549 }
550
551 return tt;
552 }
553
xe_ttm_tt_populate(struct ttm_device * ttm_dev,struct ttm_tt * tt,struct ttm_operation_ctx * ctx)554 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
555 struct ttm_operation_ctx *ctx)
556 {
557 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
558 int err;
559
560 /*
561 * dma-bufs are not populated with pages, and the dma-
562 * addresses are set up when moved to XE_PL_TT.
563 */
564 if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
565 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
566 return 0;
567
568 if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) {
569 err = ttm_tt_restore(ttm_dev, tt, ctx);
570 } else {
571 ttm_tt_clear_backed_up(tt);
572 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
573 }
574 if (err)
575 return err;
576
577 xe_tt->purgeable = false;
578 xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt);
579 update_global_total_pages(ttm_dev, tt->num_pages);
580
581 return 0;
582 }
583
xe_ttm_tt_unpopulate(struct ttm_device * ttm_dev,struct ttm_tt * tt)584 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
585 {
586 struct xe_device *xe = ttm_to_xe_device(ttm_dev);
587
588 if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
589 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
590 return;
591
592 xe_tt_unmap_sg(xe, tt);
593
594 ttm_pool_free(&ttm_dev->pool, tt);
595 xe_ttm_tt_account_subtract(xe, tt);
596 update_global_total_pages(ttm_dev, -(long)tt->num_pages);
597 }
598
xe_ttm_tt_destroy(struct ttm_device * ttm_dev,struct ttm_tt * tt)599 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
600 {
601 ttm_tt_fini(tt);
602 kfree(tt);
603 }
604
xe_ttm_resource_visible(struct ttm_resource * mem)605 static bool xe_ttm_resource_visible(struct ttm_resource *mem)
606 {
607 struct xe_ttm_vram_mgr_resource *vres =
608 to_xe_ttm_vram_mgr_resource(mem);
609
610 return vres->used_visible_size == mem->size;
611 }
612
613 /**
614 * xe_bo_is_visible_vram - check if BO is placed entirely in visible VRAM.
615 * @bo: The BO
616 *
617 * This function checks whether a given BO resides entirely in memory visible from the CPU
618 *
619 * Returns: true if the BO is entirely visible, false otherwise.
620 *
621 */
xe_bo_is_visible_vram(struct xe_bo * bo)622 bool xe_bo_is_visible_vram(struct xe_bo *bo)
623 {
624 if (drm_WARN_ON(bo->ttm.base.dev, !xe_bo_is_vram(bo)))
625 return false;
626
627 return xe_ttm_resource_visible(bo->ttm.resource);
628 }
629
xe_ttm_io_mem_reserve(struct ttm_device * bdev,struct ttm_resource * mem)630 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
631 struct ttm_resource *mem)
632 {
633 struct xe_device *xe = ttm_to_xe_device(bdev);
634
635 switch (mem->mem_type) {
636 case XE_PL_SYSTEM:
637 case XE_PL_TT:
638 return 0;
639 case XE_PL_VRAM0:
640 case XE_PL_VRAM1: {
641 struct xe_vram_region *vram = res_to_mem_region(mem);
642
643 if (!xe_ttm_resource_visible(mem))
644 return -EINVAL;
645
646 mem->bus.offset = mem->start << PAGE_SHIFT;
647
648 if (vram->mapping &&
649 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
650 mem->bus.addr = (u8 __force *)vram->mapping +
651 mem->bus.offset;
652
653 mem->bus.offset += vram->io_start;
654 mem->bus.is_iomem = true;
655
656 #if !IS_ENABLED(CONFIG_X86)
657 mem->bus.caching = ttm_write_combined;
658 #endif
659 return 0;
660 } case XE_PL_STOLEN:
661 return xe_ttm_stolen_io_mem_reserve(xe, mem);
662 default:
663 return -EINVAL;
664 }
665 }
666
xe_bo_trigger_rebind(struct xe_device * xe,struct xe_bo * bo,const struct ttm_operation_ctx * ctx)667 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
668 const struct ttm_operation_ctx *ctx)
669 {
670 struct dma_resv_iter cursor;
671 struct dma_fence *fence;
672 struct drm_gem_object *obj = &bo->ttm.base;
673 struct drm_gpuvm_bo *vm_bo;
674 bool idle = false;
675 int ret = 0;
676
677 dma_resv_assert_held(bo->ttm.base.resv);
678
679 if (!list_empty(&bo->ttm.base.gpuva.list)) {
680 dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
681 DMA_RESV_USAGE_BOOKKEEP);
682 dma_resv_for_each_fence_unlocked(&cursor, fence)
683 dma_fence_enable_sw_signaling(fence);
684 dma_resv_iter_end(&cursor);
685 }
686
687 drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
688 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
689 struct drm_gpuva *gpuva;
690
691 if (!xe_vm_in_fault_mode(vm)) {
692 drm_gpuvm_bo_evict(vm_bo, true);
693 continue;
694 }
695
696 if (!idle) {
697 long timeout;
698
699 if (ctx->no_wait_gpu &&
700 !dma_resv_test_signaled(bo->ttm.base.resv,
701 DMA_RESV_USAGE_BOOKKEEP))
702 return -EBUSY;
703
704 timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
705 DMA_RESV_USAGE_BOOKKEEP,
706 ctx->interruptible,
707 MAX_SCHEDULE_TIMEOUT);
708 if (!timeout)
709 return -ETIME;
710 if (timeout < 0)
711 return timeout;
712
713 idle = true;
714 }
715
716 drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
717 struct xe_vma *vma = gpuva_to_vma(gpuva);
718
719 trace_xe_vma_evict(vma);
720 ret = xe_vm_invalidate_vma(vma);
721 if (XE_WARN_ON(ret))
722 return ret;
723 }
724 }
725
726 return ret;
727 }
728
729 /*
730 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
731 * Note that unmapping the attachment is deferred to the next
732 * map_attachment time, or to bo destroy (after idling) whichever comes first.
733 * This is to avoid syncing before unmap_attachment(), assuming that the
734 * caller relies on idling the reservation object before moving the
735 * backing store out. Should that assumption not hold, then we will be able
736 * to unconditionally call unmap_attachment() when moving out to system.
737 */
xe_bo_move_dmabuf(struct ttm_buffer_object * ttm_bo,struct ttm_resource * new_res)738 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
739 struct ttm_resource *new_res)
740 {
741 struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
742 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
743 ttm);
744 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
745 bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
746 struct sg_table *sg;
747
748 xe_assert(xe, attach);
749 xe_assert(xe, ttm_bo->ttm);
750
751 if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM &&
752 ttm_bo->sg) {
753 dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
754 false, MAX_SCHEDULE_TIMEOUT);
755 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
756 ttm_bo->sg = NULL;
757 }
758
759 if (new_res->mem_type == XE_PL_SYSTEM)
760 goto out;
761
762 if (ttm_bo->sg) {
763 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
764 ttm_bo->sg = NULL;
765 }
766
767 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
768 if (IS_ERR(sg))
769 return PTR_ERR(sg);
770
771 ttm_bo->sg = sg;
772 xe_tt->sg = sg;
773
774 out:
775 ttm_bo_move_null(ttm_bo, new_res);
776
777 return 0;
778 }
779
780 /**
781 * xe_bo_move_notify - Notify subsystems of a pending move
782 * @bo: The buffer object
783 * @ctx: The struct ttm_operation_ctx controlling locking and waits.
784 *
785 * This function notifies subsystems of an upcoming buffer move.
786 * Upon receiving such a notification, subsystems should schedule
787 * halting access to the underlying pages and optionally add a fence
788 * to the buffer object's dma_resv object, that signals when access is
789 * stopped. The caller will wait on all dma_resv fences before
790 * starting the move.
791 *
792 * A subsystem may commence access to the object after obtaining
793 * bindings to the new backing memory under the object lock.
794 *
795 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
796 * negative error code on error.
797 */
xe_bo_move_notify(struct xe_bo * bo,const struct ttm_operation_ctx * ctx)798 static int xe_bo_move_notify(struct xe_bo *bo,
799 const struct ttm_operation_ctx *ctx)
800 {
801 struct ttm_buffer_object *ttm_bo = &bo->ttm;
802 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
803 struct ttm_resource *old_mem = ttm_bo->resource;
804 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
805 int ret;
806
807 /*
808 * If this starts to call into many components, consider
809 * using a notification chain here.
810 */
811
812 if (xe_bo_is_pinned(bo))
813 return -EINVAL;
814
815 xe_bo_vunmap(bo);
816 ret = xe_bo_trigger_rebind(xe, bo, ctx);
817 if (ret)
818 return ret;
819
820 /* Don't call move_notify() for imported dma-bufs. */
821 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
822 dma_buf_move_notify(ttm_bo->base.dma_buf);
823
824 /*
825 * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual),
826 * so if we moved from VRAM make sure to unlink this from the userfault
827 * tracking.
828 */
829 if (mem_type_is_vram(old_mem_type)) {
830 mutex_lock(&xe->mem_access.vram_userfault.lock);
831 if (!list_empty(&bo->vram_userfault_link))
832 list_del_init(&bo->vram_userfault_link);
833 mutex_unlock(&xe->mem_access.vram_userfault.lock);
834 }
835
836 return 0;
837 }
838
xe_bo_move(struct ttm_buffer_object * ttm_bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_resource * new_mem,struct ttm_place * hop)839 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
840 struct ttm_operation_ctx *ctx,
841 struct ttm_resource *new_mem,
842 struct ttm_place *hop)
843 {
844 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
845 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
846 struct ttm_resource *old_mem = ttm_bo->resource;
847 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
848 struct ttm_tt *ttm = ttm_bo->ttm;
849 struct xe_migrate *migrate = NULL;
850 struct dma_fence *fence;
851 bool move_lacks_source;
852 bool tt_has_data;
853 bool needs_clear;
854 bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) &&
855 ttm && ttm_tt_is_populated(ttm)) ? true : false;
856 int ret = 0;
857
858 /* Bo creation path, moving to system or TT. */
859 if ((!old_mem && ttm) && !handle_system_ccs) {
860 if (new_mem->mem_type == XE_PL_TT)
861 ret = xe_tt_map_sg(xe, ttm);
862 if (!ret)
863 ttm_bo_move_null(ttm_bo, new_mem);
864 goto out;
865 }
866
867 if (ttm_bo->type == ttm_bo_type_sg) {
868 if (new_mem->mem_type == XE_PL_SYSTEM)
869 ret = xe_bo_move_notify(bo, ctx);
870 if (!ret)
871 ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
872 return ret;
873 }
874
875 tt_has_data = ttm && (ttm_tt_is_populated(ttm) || ttm_tt_is_swapped(ttm));
876
877 move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) :
878 (!mem_type_is_vram(old_mem_type) && !tt_has_data));
879
880 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
881 (!ttm && ttm_bo->type == ttm_bo_type_device);
882
883 if (new_mem->mem_type == XE_PL_TT) {
884 ret = xe_tt_map_sg(xe, ttm);
885 if (ret)
886 goto out;
887 }
888
889 if ((move_lacks_source && !needs_clear)) {
890 ttm_bo_move_null(ttm_bo, new_mem);
891 goto out;
892 }
893
894 if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) &&
895 new_mem->mem_type == XE_PL_SYSTEM) {
896 ret = xe_svm_bo_evict(bo);
897 if (!ret) {
898 drm_dbg(&xe->drm, "Evict system allocator BO success\n");
899 ttm_bo_move_null(ttm_bo, new_mem);
900 } else {
901 drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n",
902 ERR_PTR(ret));
903 }
904
905 goto out;
906 }
907
908 if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
909 ttm_bo_move_null(ttm_bo, new_mem);
910 goto out;
911 }
912
913 /*
914 * Failed multi-hop where the old_mem is still marked as
915 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
916 */
917 if (old_mem_type == XE_PL_TT &&
918 new_mem->mem_type == XE_PL_TT) {
919 ttm_bo_move_null(ttm_bo, new_mem);
920 goto out;
921 }
922
923 if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
924 ret = xe_bo_move_notify(bo, ctx);
925 if (ret)
926 goto out;
927 }
928
929 if (old_mem_type == XE_PL_TT &&
930 new_mem->mem_type == XE_PL_SYSTEM) {
931 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
932 DMA_RESV_USAGE_BOOKKEEP,
933 false,
934 MAX_SCHEDULE_TIMEOUT);
935 if (timeout < 0) {
936 ret = timeout;
937 goto out;
938 }
939
940 if (!handle_system_ccs) {
941 ttm_bo_move_null(ttm_bo, new_mem);
942 goto out;
943 }
944 }
945
946 if (!move_lacks_source &&
947 ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
948 (mem_type_is_vram(old_mem_type) &&
949 new_mem->mem_type == XE_PL_SYSTEM))) {
950 hop->fpfn = 0;
951 hop->lpfn = 0;
952 hop->mem_type = XE_PL_TT;
953 hop->flags = TTM_PL_FLAG_TEMPORARY;
954 ret = -EMULTIHOP;
955 goto out;
956 }
957
958 if (bo->tile)
959 migrate = bo->tile->migrate;
960 else if (resource_is_vram(new_mem))
961 migrate = mem_type_to_migrate(xe, new_mem->mem_type);
962 else if (mem_type_is_vram(old_mem_type))
963 migrate = mem_type_to_migrate(xe, old_mem_type);
964 else
965 migrate = xe->tiles[0].migrate;
966
967 xe_assert(xe, migrate);
968 trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
969 if (xe_rpm_reclaim_safe(xe)) {
970 /*
971 * We might be called through swapout in the validation path of
972 * another TTM device, so acquire rpm here.
973 */
974 xe_pm_runtime_get(xe);
975 } else {
976 drm_WARN_ON(&xe->drm, handle_system_ccs);
977 xe_pm_runtime_get_noresume(xe);
978 }
979
980 if (move_lacks_source) {
981 u32 flags = 0;
982
983 if (mem_type_is_vram(new_mem->mem_type))
984 flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
985 else if (handle_system_ccs)
986 flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
987
988 fence = xe_migrate_clear(migrate, bo, new_mem, flags);
989 } else {
990 fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem,
991 handle_system_ccs);
992 }
993 if (IS_ERR(fence)) {
994 ret = PTR_ERR(fence);
995 xe_pm_runtime_put(xe);
996 goto out;
997 }
998 if (!move_lacks_source) {
999 ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
1000 new_mem);
1001 if (ret) {
1002 dma_fence_wait(fence, false);
1003 ttm_bo_move_null(ttm_bo, new_mem);
1004 ret = 0;
1005 }
1006 } else {
1007 /*
1008 * ttm_bo_move_accel_cleanup() may blow up if
1009 * bo->resource == NULL, so just attach the
1010 * fence and set the new resource.
1011 */
1012 dma_resv_add_fence(ttm_bo->base.resv, fence,
1013 DMA_RESV_USAGE_KERNEL);
1014 ttm_bo_move_null(ttm_bo, new_mem);
1015 }
1016
1017 dma_fence_put(fence);
1018 xe_pm_runtime_put(xe);
1019
1020 /*
1021 * CCS meta data is migrated from TT -> SMEM. So, let us detach the
1022 * BBs from BO as it is no longer needed.
1023 */
1024 if (IS_VF_CCS_READY(xe) && old_mem_type == XE_PL_TT &&
1025 new_mem->mem_type == XE_PL_SYSTEM)
1026 xe_sriov_vf_ccs_detach_bo(bo);
1027
1028 if (IS_VF_CCS_READY(xe) &&
1029 ((move_lacks_source && new_mem->mem_type == XE_PL_TT) ||
1030 (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT)) &&
1031 handle_system_ccs)
1032 ret = xe_sriov_vf_ccs_attach_bo(bo);
1033
1034 out:
1035 if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
1036 ttm_bo->ttm) {
1037 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
1038 DMA_RESV_USAGE_KERNEL,
1039 false,
1040 MAX_SCHEDULE_TIMEOUT);
1041 if (timeout < 0)
1042 ret = timeout;
1043
1044 if (IS_VF_CCS_READY(xe))
1045 xe_sriov_vf_ccs_detach_bo(bo);
1046
1047 xe_tt_unmap_sg(xe, ttm_bo->ttm);
1048 }
1049
1050 return ret;
1051 }
1052
xe_bo_shrink_purge(struct ttm_operation_ctx * ctx,struct ttm_buffer_object * bo,unsigned long * scanned)1053 static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
1054 struct ttm_buffer_object *bo,
1055 unsigned long *scanned)
1056 {
1057 struct xe_device *xe = ttm_to_xe_device(bo->bdev);
1058 long lret;
1059
1060 /* Fake move to system, without copying data. */
1061 if (bo->resource->mem_type != XE_PL_SYSTEM) {
1062 struct ttm_resource *new_resource;
1063
1064 lret = ttm_bo_wait_ctx(bo, ctx);
1065 if (lret)
1066 return lret;
1067
1068 lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx);
1069 if (lret)
1070 return lret;
1071
1072 xe_tt_unmap_sg(xe, bo->ttm);
1073 ttm_bo_move_null(bo, new_resource);
1074 }
1075
1076 *scanned += bo->ttm->num_pages;
1077 lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
1078 {.purge = true,
1079 .writeback = false,
1080 .allow_move = false});
1081
1082 if (lret > 0)
1083 xe_ttm_tt_account_subtract(xe, bo->ttm);
1084
1085 return lret;
1086 }
1087
1088 static bool
xe_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)1089 xe_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place)
1090 {
1091 struct drm_gpuvm_bo *vm_bo;
1092
1093 if (!ttm_bo_eviction_valuable(bo, place))
1094 return false;
1095
1096 if (!xe_bo_is_xe_bo(bo))
1097 return true;
1098
1099 drm_gem_for_each_gpuvm_bo(vm_bo, &bo->base) {
1100 if (xe_vm_is_validating(gpuvm_to_vm(vm_bo->vm)))
1101 return false;
1102 }
1103
1104 return true;
1105 }
1106
1107 /**
1108 * xe_bo_shrink() - Try to shrink an xe bo.
1109 * @ctx: The struct ttm_operation_ctx used for shrinking.
1110 * @bo: The TTM buffer object whose pages to shrink.
1111 * @flags: Flags governing the shrink behaviour.
1112 * @scanned: Pointer to a counter of the number of pages
1113 * attempted to shrink.
1114 *
1115 * Try to shrink- or purge a bo, and if it succeeds, unmap dma.
1116 * Note that we need to be able to handle also non xe bos
1117 * (ghost bos), but only if the struct ttm_tt is embedded in
1118 * a struct xe_ttm_tt. When the function attempts to shrink
1119 * the pages of a buffer object, The value pointed to by @scanned
1120 * is updated.
1121 *
1122 * Return: The number of pages shrunken or purged, or negative error
1123 * code on failure.
1124 */
xe_bo_shrink(struct ttm_operation_ctx * ctx,struct ttm_buffer_object * bo,const struct xe_bo_shrink_flags flags,unsigned long * scanned)1125 long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
1126 const struct xe_bo_shrink_flags flags,
1127 unsigned long *scanned)
1128 {
1129 struct ttm_tt *tt = bo->ttm;
1130 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
1131 struct ttm_place place = {.mem_type = bo->resource->mem_type};
1132 struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
1133 struct xe_device *xe = ttm_to_xe_device(bo->bdev);
1134 bool needs_rpm;
1135 long lret = 0L;
1136
1137 if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) ||
1138 (flags.purge && !xe_tt->purgeable))
1139 return -EBUSY;
1140
1141 if (!xe_bo_eviction_valuable(bo, &place))
1142 return -EBUSY;
1143
1144 if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo))
1145 return xe_bo_shrink_purge(ctx, bo, scanned);
1146
1147 if (xe_tt->purgeable) {
1148 if (bo->resource->mem_type != XE_PL_SYSTEM)
1149 lret = xe_bo_move_notify(xe_bo, ctx);
1150 if (!lret)
1151 lret = xe_bo_shrink_purge(ctx, bo, scanned);
1152 goto out_unref;
1153 }
1154
1155 /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */
1156 needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM &&
1157 xe_bo_needs_ccs_pages(xe_bo));
1158 if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
1159 goto out_unref;
1160
1161 *scanned += tt->num_pages;
1162 lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
1163 {.purge = false,
1164 .writeback = flags.writeback,
1165 .allow_move = true});
1166 if (needs_rpm)
1167 xe_pm_runtime_put(xe);
1168
1169 if (lret > 0)
1170 xe_ttm_tt_account_subtract(xe, tt);
1171
1172 out_unref:
1173 xe_bo_put(xe_bo);
1174
1175 return lret;
1176 }
1177
1178 /**
1179 * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
1180 * up in system memory.
1181 * @bo: The buffer object to prepare.
1182 *
1183 * On successful completion, the object backup pages are allocated. Expectation
1184 * is that this is called from the PM notifier, prior to suspend/hibernation.
1185 *
1186 * Return: 0 on success. Negative error code on failure.
1187 */
xe_bo_notifier_prepare_pinned(struct xe_bo * bo)1188 int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
1189 {
1190 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1191 struct xe_validation_ctx ctx;
1192 struct drm_exec exec;
1193 struct xe_bo *backup;
1194 int ret = 0;
1195
1196 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
1197 ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
1198 drm_exec_retry_on_contention(&exec);
1199 xe_assert(xe, !ret);
1200 xe_assert(xe, !bo->backup_obj);
1201
1202 /*
1203 * Since this is called from the PM notifier we might have raced with
1204 * someone unpinning this after we dropped the pinned list lock and
1205 * grabbing the above bo lock.
1206 */
1207 if (!xe_bo_is_pinned(bo))
1208 break;
1209
1210 if (!xe_bo_is_vram(bo))
1211 break;
1212
1213 if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1214 break;
1215
1216 backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
1217 DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1218 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1219 XE_BO_FLAG_PINNED, &exec);
1220 if (IS_ERR(backup)) {
1221 drm_exec_retry_on_contention(&exec);
1222 ret = PTR_ERR(backup);
1223 xe_validation_retry_on_oom(&ctx, &ret);
1224 break;
1225 }
1226
1227 backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1228 ttm_bo_pin(&backup->ttm);
1229 bo->backup_obj = backup;
1230 }
1231
1232 return ret;
1233 }
1234
1235 /**
1236 * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
1237 * @bo: The buffer object to undo the prepare for.
1238 *
1239 * Always returns 0. The backup object is removed, if still present. Expectation
1240 * it that this called from the PM notifier when undoing the prepare step.
1241 *
1242 * Return: Always returns 0.
1243 */
xe_bo_notifier_unprepare_pinned(struct xe_bo * bo)1244 int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
1245 {
1246 xe_bo_lock(bo, false);
1247 if (bo->backup_obj) {
1248 ttm_bo_unpin(&bo->backup_obj->ttm);
1249 xe_bo_put(bo->backup_obj);
1250 bo->backup_obj = NULL;
1251 }
1252 xe_bo_unlock(bo);
1253
1254 return 0;
1255 }
1256
xe_bo_evict_pinned_copy(struct xe_bo * bo,struct xe_bo * backup)1257 static int xe_bo_evict_pinned_copy(struct xe_bo *bo, struct xe_bo *backup)
1258 {
1259 struct xe_device *xe = xe_bo_device(bo);
1260 bool unmap = false;
1261 int ret = 0;
1262
1263 if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
1264 struct xe_migrate *migrate;
1265 struct dma_fence *fence;
1266
1267 if (bo->tile)
1268 migrate = bo->tile->migrate;
1269 else
1270 migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
1271
1272 xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv);
1273 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
1274 if (ret)
1275 goto out_backup;
1276
1277 fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
1278 backup->ttm.resource, false);
1279 if (IS_ERR(fence)) {
1280 ret = PTR_ERR(fence);
1281 goto out_backup;
1282 }
1283
1284 dma_resv_add_fence(bo->ttm.base.resv, fence,
1285 DMA_RESV_USAGE_KERNEL);
1286 dma_fence_put(fence);
1287 } else {
1288 ret = xe_bo_vmap(backup);
1289 if (ret)
1290 goto out_backup;
1291
1292 if (iosys_map_is_null(&bo->vmap)) {
1293 ret = xe_bo_vmap(bo);
1294 if (ret)
1295 goto out_vunmap;
1296 unmap = true;
1297 }
1298
1299 xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
1300 xe_bo_size(bo));
1301 }
1302
1303 if (!bo->backup_obj)
1304 bo->backup_obj = backup;
1305 out_vunmap:
1306 xe_bo_vunmap(backup);
1307 out_backup:
1308 if (unmap)
1309 xe_bo_vunmap(bo);
1310
1311 return ret;
1312 }
1313
1314 /**
1315 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
1316 * @bo: The buffer object to move.
1317 *
1318 * On successful completion, the object memory will be moved to system memory.
1319 *
1320 * This is needed to for special handling of pinned VRAM object during
1321 * suspend-resume.
1322 *
1323 * Return: 0 on success. Negative error code on failure.
1324 */
xe_bo_evict_pinned(struct xe_bo * bo)1325 int xe_bo_evict_pinned(struct xe_bo *bo)
1326 {
1327 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1328 struct xe_validation_ctx ctx;
1329 struct drm_exec exec;
1330 struct xe_bo *backup = bo->backup_obj;
1331 bool backup_created = false;
1332 int ret = 0;
1333
1334 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
1335 ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
1336 drm_exec_retry_on_contention(&exec);
1337 xe_assert(xe, !ret);
1338
1339 if (WARN_ON(!bo->ttm.resource)) {
1340 ret = -EINVAL;
1341 break;
1342 }
1343
1344 if (WARN_ON(!xe_bo_is_pinned(bo))) {
1345 ret = -EINVAL;
1346 break;
1347 }
1348
1349 if (!xe_bo_is_vram(bo))
1350 break;
1351
1352 if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1353 break;
1354
1355 if (!backup) {
1356 backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL,
1357 xe_bo_size(bo),
1358 DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1359 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1360 XE_BO_FLAG_PINNED, &exec);
1361 if (IS_ERR(backup)) {
1362 drm_exec_retry_on_contention(&exec);
1363 ret = PTR_ERR(backup);
1364 xe_validation_retry_on_oom(&ctx, &ret);
1365 break;
1366 }
1367 backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1368 backup_created = true;
1369 }
1370
1371 ret = xe_bo_evict_pinned_copy(bo, backup);
1372 }
1373
1374 if (ret && backup_created)
1375 xe_bo_put(backup);
1376
1377 return ret;
1378 }
1379
1380 /**
1381 * xe_bo_restore_pinned() - Restore a pinned VRAM object
1382 * @bo: The buffer object to move.
1383 *
1384 * On successful completion, the object memory will be moved back to VRAM.
1385 *
1386 * This is needed to for special handling of pinned VRAM object during
1387 * suspend-resume.
1388 *
1389 * Return: 0 on success. Negative error code on failure.
1390 */
xe_bo_restore_pinned(struct xe_bo * bo)1391 int xe_bo_restore_pinned(struct xe_bo *bo)
1392 {
1393 struct ttm_operation_ctx ctx = {
1394 .interruptible = false,
1395 .gfp_retry_mayfail = false,
1396 };
1397 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1398 struct xe_bo *backup = bo->backup_obj;
1399 bool unmap = false;
1400 int ret;
1401
1402 if (!backup)
1403 return 0;
1404
1405 xe_bo_lock(bo, false);
1406
1407 if (!xe_bo_is_pinned(backup)) {
1408 ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
1409 if (ret)
1410 goto out_unlock_bo;
1411 }
1412
1413 if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
1414 struct xe_migrate *migrate;
1415 struct dma_fence *fence;
1416
1417 if (bo->tile)
1418 migrate = bo->tile->migrate;
1419 else
1420 migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
1421
1422 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
1423 if (ret)
1424 goto out_unlock_bo;
1425
1426 fence = xe_migrate_copy(migrate, backup, bo,
1427 backup->ttm.resource, bo->ttm.resource,
1428 false);
1429 if (IS_ERR(fence)) {
1430 ret = PTR_ERR(fence);
1431 goto out_unlock_bo;
1432 }
1433
1434 dma_resv_add_fence(bo->ttm.base.resv, fence,
1435 DMA_RESV_USAGE_KERNEL);
1436 dma_fence_put(fence);
1437 } else {
1438 ret = xe_bo_vmap(backup);
1439 if (ret)
1440 goto out_unlock_bo;
1441
1442 if (iosys_map_is_null(&bo->vmap)) {
1443 ret = xe_bo_vmap(bo);
1444 if (ret)
1445 goto out_backup;
1446 unmap = true;
1447 }
1448
1449 xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
1450 xe_bo_size(bo));
1451 }
1452
1453 bo->backup_obj = NULL;
1454
1455 out_backup:
1456 xe_bo_vunmap(backup);
1457 if (!bo->backup_obj) {
1458 if (xe_bo_is_pinned(backup))
1459 ttm_bo_unpin(&backup->ttm);
1460 xe_bo_put(backup);
1461 }
1462 out_unlock_bo:
1463 if (unmap)
1464 xe_bo_vunmap(bo);
1465 xe_bo_unlock(bo);
1466 return ret;
1467 }
1468
xe_bo_dma_unmap_pinned(struct xe_bo * bo)1469 int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
1470 {
1471 struct ttm_buffer_object *ttm_bo = &bo->ttm;
1472 struct ttm_tt *tt = ttm_bo->ttm;
1473
1474 if (tt) {
1475 struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm);
1476
1477 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1478 dma_buf_unmap_attachment(ttm_bo->base.import_attach,
1479 ttm_bo->sg,
1480 DMA_BIDIRECTIONAL);
1481 ttm_bo->sg = NULL;
1482 xe_tt->sg = NULL;
1483 } else if (xe_tt->sg) {
1484 dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev,
1485 xe_tt->sg,
1486 DMA_BIDIRECTIONAL, 0);
1487 sg_free_table(xe_tt->sg);
1488 xe_tt->sg = NULL;
1489 }
1490 }
1491
1492 return 0;
1493 }
1494
xe_ttm_io_mem_pfn(struct ttm_buffer_object * ttm_bo,unsigned long page_offset)1495 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
1496 unsigned long page_offset)
1497 {
1498 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1499 struct xe_res_cursor cursor;
1500 struct xe_vram_region *vram;
1501
1502 if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
1503 return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
1504
1505 vram = res_to_mem_region(ttm_bo->resource);
1506 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
1507 return (vram->io_start + cursor.start) >> PAGE_SHIFT;
1508 }
1509
1510 static void __xe_bo_vunmap(struct xe_bo *bo);
1511
1512 /*
1513 * TODO: Move this function to TTM so we don't rely on how TTM does its
1514 * locking, thereby abusing TTM internals.
1515 */
xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object * ttm_bo)1516 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
1517 {
1518 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1519 bool locked;
1520
1521 xe_assert(xe, !kref_read(&ttm_bo->kref));
1522
1523 /*
1524 * We can typically only race with TTM trylocking under the
1525 * lru_lock, which will immediately be unlocked again since
1526 * the ttm_bo refcount is zero at this point. So trylocking *should*
1527 * always succeed here, as long as we hold the lru lock.
1528 */
1529 spin_lock(&ttm_bo->bdev->lru_lock);
1530 locked = dma_resv_trylock(&ttm_bo->base._resv);
1531 spin_unlock(&ttm_bo->bdev->lru_lock);
1532 xe_assert(xe, locked);
1533
1534 return locked;
1535 }
1536
xe_ttm_bo_release_notify(struct ttm_buffer_object * ttm_bo)1537 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
1538 {
1539 struct dma_resv_iter cursor;
1540 struct dma_fence *fence;
1541 struct dma_fence *replacement = NULL;
1542 struct xe_bo *bo;
1543
1544 if (!xe_bo_is_xe_bo(ttm_bo))
1545 return;
1546
1547 bo = ttm_to_xe_bo(ttm_bo);
1548 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
1549
1550 if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
1551 return;
1552
1553 /*
1554 * Scrub the preempt fences if any. The unbind fence is already
1555 * attached to the resv.
1556 * TODO: Don't do this for external bos once we scrub them after
1557 * unbind.
1558 */
1559 dma_resv_for_each_fence(&cursor, &ttm_bo->base._resv,
1560 DMA_RESV_USAGE_BOOKKEEP, fence) {
1561 if (xe_fence_is_xe_preempt(fence) &&
1562 !dma_fence_is_signaled(fence)) {
1563 if (!replacement)
1564 replacement = dma_fence_get_stub();
1565
1566 dma_resv_replace_fences(&ttm_bo->base._resv,
1567 fence->context,
1568 replacement,
1569 DMA_RESV_USAGE_BOOKKEEP);
1570 }
1571 }
1572 dma_fence_put(replacement);
1573
1574 dma_resv_unlock(&ttm_bo->base._resv);
1575 }
1576
xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object * ttm_bo)1577 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
1578 {
1579 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1580
1581 if (!xe_bo_is_xe_bo(ttm_bo))
1582 return;
1583
1584 if (IS_VF_CCS_READY(ttm_to_xe_device(ttm_bo->bdev)))
1585 xe_sriov_vf_ccs_detach_bo(bo);
1586
1587 /*
1588 * Object is idle and about to be destroyed. Release the
1589 * dma-buf attachment.
1590 */
1591 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1592 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
1593 struct xe_ttm_tt, ttm);
1594
1595 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
1596 DMA_BIDIRECTIONAL);
1597 ttm_bo->sg = NULL;
1598 xe_tt->sg = NULL;
1599 }
1600 }
1601
xe_ttm_bo_purge(struct ttm_buffer_object * ttm_bo,struct ttm_operation_ctx * ctx)1602 static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operation_ctx *ctx)
1603 {
1604 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1605
1606 if (ttm_bo->ttm) {
1607 struct ttm_placement place = {};
1608 int ret = ttm_bo_validate(ttm_bo, &place, ctx);
1609
1610 drm_WARN_ON(&xe->drm, ret);
1611 }
1612 }
1613
xe_ttm_bo_swap_notify(struct ttm_buffer_object * ttm_bo)1614 static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
1615 {
1616 struct ttm_operation_ctx ctx = {
1617 .interruptible = false,
1618 .gfp_retry_mayfail = false,
1619 };
1620
1621 if (ttm_bo->ttm) {
1622 struct xe_ttm_tt *xe_tt =
1623 container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm);
1624
1625 if (xe_tt->purgeable)
1626 xe_ttm_bo_purge(ttm_bo, &ctx);
1627 }
1628 }
1629
xe_ttm_access_memory(struct ttm_buffer_object * ttm_bo,unsigned long offset,void * buf,int len,int write)1630 static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
1631 unsigned long offset, void *buf, int len,
1632 int write)
1633 {
1634 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1635 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1636 struct iosys_map vmap;
1637 struct xe_res_cursor cursor;
1638 struct xe_vram_region *vram;
1639 int bytes_left = len;
1640 int err = 0;
1641
1642 xe_bo_assert_held(bo);
1643 xe_device_assert_mem_access(xe);
1644
1645 if (!mem_type_is_vram(ttm_bo->resource->mem_type))
1646 return -EIO;
1647
1648 if (!xe_bo_is_visible_vram(bo) || len >= SZ_16K) {
1649 struct xe_migrate *migrate =
1650 mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
1651
1652 err = xe_migrate_access_memory(migrate, bo, offset, buf, len,
1653 write);
1654 goto out;
1655 }
1656
1657 vram = res_to_mem_region(ttm_bo->resource);
1658 xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
1659 xe_bo_size(bo) - (offset & PAGE_MASK), &cursor);
1660
1661 do {
1662 unsigned long page_offset = (offset & ~PAGE_MASK);
1663 int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left);
1664
1665 iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping +
1666 cursor.start);
1667 if (write)
1668 xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count);
1669 else
1670 xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count);
1671
1672 buf += byte_count;
1673 offset += byte_count;
1674 bytes_left -= byte_count;
1675 if (bytes_left)
1676 xe_res_next(&cursor, PAGE_SIZE);
1677 } while (bytes_left);
1678
1679 out:
1680 return err ?: len;
1681 }
1682
1683 const struct ttm_device_funcs xe_ttm_funcs = {
1684 .ttm_tt_create = xe_ttm_tt_create,
1685 .ttm_tt_populate = xe_ttm_tt_populate,
1686 .ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
1687 .ttm_tt_destroy = xe_ttm_tt_destroy,
1688 .evict_flags = xe_evict_flags,
1689 .move = xe_bo_move,
1690 .io_mem_reserve = xe_ttm_io_mem_reserve,
1691 .io_mem_pfn = xe_ttm_io_mem_pfn,
1692 .access_memory = xe_ttm_access_memory,
1693 .release_notify = xe_ttm_bo_release_notify,
1694 .eviction_valuable = xe_bo_eviction_valuable,
1695 .delete_mem_notify = xe_ttm_bo_delete_mem_notify,
1696 .swap_notify = xe_ttm_bo_swap_notify,
1697 };
1698
xe_ttm_bo_destroy(struct ttm_buffer_object * ttm_bo)1699 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
1700 {
1701 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1702 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1703 struct xe_tile *tile;
1704 u8 id;
1705
1706 if (bo->ttm.base.import_attach)
1707 drm_prime_gem_destroy(&bo->ttm.base, NULL);
1708 drm_gem_object_release(&bo->ttm.base);
1709
1710 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
1711
1712 for_each_tile(tile, xe, id)
1713 if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size)
1714 xe_ggtt_remove_bo(tile->mem.ggtt, bo);
1715
1716 #ifdef CONFIG_PROC_FS
1717 if (bo->client)
1718 xe_drm_client_remove_bo(bo);
1719 #endif
1720
1721 if (bo->vm && xe_bo_is_user(bo))
1722 xe_vm_put(bo->vm);
1723
1724 if (bo->parent_obj)
1725 xe_bo_put(bo->parent_obj);
1726
1727 mutex_lock(&xe->mem_access.vram_userfault.lock);
1728 if (!list_empty(&bo->vram_userfault_link))
1729 list_del(&bo->vram_userfault_link);
1730 mutex_unlock(&xe->mem_access.vram_userfault.lock);
1731
1732 kfree(bo);
1733 }
1734
xe_gem_object_free(struct drm_gem_object * obj)1735 static void xe_gem_object_free(struct drm_gem_object *obj)
1736 {
1737 /* Our BO reference counting scheme works as follows:
1738 *
1739 * The gem object kref is typically used throughout the driver,
1740 * and the gem object holds a ttm_buffer_object refcount, so
1741 * that when the last gem object reference is put, which is when
1742 * we end up in this function, we put also that ttm_buffer_object
1743 * refcount. Anything using gem interfaces is then no longer
1744 * allowed to access the object in a way that requires a gem
1745 * refcount, including locking the object.
1746 *
1747 * driver ttm callbacks is allowed to use the ttm_buffer_object
1748 * refcount directly if needed.
1749 */
1750 __xe_bo_vunmap(gem_to_xe_bo(obj));
1751 ttm_bo_fini(container_of(obj, struct ttm_buffer_object, base));
1752 }
1753
xe_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)1754 static void xe_gem_object_close(struct drm_gem_object *obj,
1755 struct drm_file *file_priv)
1756 {
1757 struct xe_bo *bo = gem_to_xe_bo(obj);
1758
1759 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
1760 xe_assert(xe_bo_device(bo), xe_bo_is_user(bo));
1761
1762 xe_bo_lock(bo, false);
1763 ttm_bo_set_bulk_move(&bo->ttm, NULL);
1764 xe_bo_unlock(bo);
1765 }
1766 }
1767
should_migrate_to_smem(struct xe_bo * bo)1768 static bool should_migrate_to_smem(struct xe_bo *bo)
1769 {
1770 /*
1771 * NOTE: The following atomic checks are platform-specific. For example,
1772 * if a device supports CXL atomics, these may not be necessary or
1773 * may behave differently.
1774 */
1775
1776 return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL ||
1777 bo->attr.atomic_access == DRM_XE_ATOMIC_CPU;
1778 }
1779
xe_bo_wait_usage_kernel(struct xe_bo * bo,struct ttm_operation_ctx * ctx)1780 static int xe_bo_wait_usage_kernel(struct xe_bo *bo, struct ttm_operation_ctx *ctx)
1781 {
1782 long lerr;
1783
1784 if (ctx->no_wait_gpu)
1785 return dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL) ?
1786 0 : -EBUSY;
1787
1788 lerr = dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
1789 ctx->interruptible, MAX_SCHEDULE_TIMEOUT);
1790 if (lerr < 0)
1791 return lerr;
1792 if (lerr == 0)
1793 return -EBUSY;
1794
1795 return 0;
1796 }
1797
1798 /* Populate the bo if swapped out, or migrate if the access mode requires that. */
xe_bo_fault_migrate(struct xe_bo * bo,struct ttm_operation_ctx * ctx,struct drm_exec * exec)1799 static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx,
1800 struct drm_exec *exec)
1801 {
1802 struct ttm_buffer_object *tbo = &bo->ttm;
1803 int err = 0;
1804
1805 if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) {
1806 err = xe_bo_wait_usage_kernel(bo, ctx);
1807 if (!err)
1808 err = ttm_bo_populate(&bo->ttm, ctx);
1809 } else if (should_migrate_to_smem(bo)) {
1810 xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM);
1811 err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec);
1812 }
1813
1814 return err;
1815 }
1816
1817 /* Call into TTM to populate PTEs, and register bo for PTE removal on runtime suspend. */
__xe_bo_cpu_fault(struct vm_fault * vmf,struct xe_device * xe,struct xe_bo * bo)1818 static vm_fault_t __xe_bo_cpu_fault(struct vm_fault *vmf, struct xe_device *xe, struct xe_bo *bo)
1819 {
1820 vm_fault_t ret;
1821
1822 trace_xe_bo_cpu_fault(bo);
1823
1824 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
1825 TTM_BO_VM_NUM_PREFAULT);
1826 /*
1827 * When TTM is actually called to insert PTEs, ensure no blocking conditions
1828 * remain, in which case TTM may drop locks and return VM_FAULT_RETRY.
1829 */
1830 xe_assert(xe, ret != VM_FAULT_RETRY);
1831
1832 if (ret == VM_FAULT_NOPAGE &&
1833 mem_type_is_vram(bo->ttm.resource->mem_type)) {
1834 mutex_lock(&xe->mem_access.vram_userfault.lock);
1835 if (list_empty(&bo->vram_userfault_link))
1836 list_add(&bo->vram_userfault_link,
1837 &xe->mem_access.vram_userfault.list);
1838 mutex_unlock(&xe->mem_access.vram_userfault.lock);
1839 }
1840
1841 return ret;
1842 }
1843
xe_err_to_fault_t(int err)1844 static vm_fault_t xe_err_to_fault_t(int err)
1845 {
1846 switch (err) {
1847 case 0:
1848 case -EINTR:
1849 case -ERESTARTSYS:
1850 case -EAGAIN:
1851 return VM_FAULT_NOPAGE;
1852 case -ENOMEM:
1853 case -ENOSPC:
1854 return VM_FAULT_OOM;
1855 default:
1856 break;
1857 }
1858 return VM_FAULT_SIGBUS;
1859 }
1860
xe_ttm_bo_is_imported(struct ttm_buffer_object * tbo)1861 static bool xe_ttm_bo_is_imported(struct ttm_buffer_object *tbo)
1862 {
1863 dma_resv_assert_held(tbo->base.resv);
1864
1865 return tbo->ttm &&
1866 (tbo->ttm->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE)) ==
1867 TTM_TT_FLAG_EXTERNAL;
1868 }
1869
xe_bo_cpu_fault_fastpath(struct vm_fault * vmf,struct xe_device * xe,struct xe_bo * bo,bool needs_rpm)1870 static vm_fault_t xe_bo_cpu_fault_fastpath(struct vm_fault *vmf, struct xe_device *xe,
1871 struct xe_bo *bo, bool needs_rpm)
1872 {
1873 struct ttm_buffer_object *tbo = &bo->ttm;
1874 vm_fault_t ret = VM_FAULT_RETRY;
1875 struct xe_validation_ctx ctx;
1876 struct ttm_operation_ctx tctx = {
1877 .interruptible = true,
1878 .no_wait_gpu = true,
1879 .gfp_retry_mayfail = true,
1880
1881 };
1882 int err;
1883
1884 if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
1885 return VM_FAULT_RETRY;
1886
1887 err = xe_validation_ctx_init(&ctx, &xe->val, NULL,
1888 (struct xe_val_flags) {
1889 .interruptible = true,
1890 .no_block = true
1891 });
1892 if (err)
1893 goto out_pm;
1894
1895 if (!dma_resv_trylock(tbo->base.resv))
1896 goto out_validation;
1897
1898 if (xe_ttm_bo_is_imported(tbo)) {
1899 ret = VM_FAULT_SIGBUS;
1900 drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
1901 goto out_unlock;
1902 }
1903
1904 err = xe_bo_fault_migrate(bo, &tctx, NULL);
1905 if (err) {
1906 /* Return VM_FAULT_RETRY on these errors. */
1907 if (err != -ENOMEM && err != -ENOSPC && err != -EBUSY)
1908 ret = xe_err_to_fault_t(err);
1909 goto out_unlock;
1910 }
1911
1912 if (dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL))
1913 ret = __xe_bo_cpu_fault(vmf, xe, bo);
1914
1915 out_unlock:
1916 dma_resv_unlock(tbo->base.resv);
1917 out_validation:
1918 xe_validation_ctx_fini(&ctx);
1919 out_pm:
1920 if (needs_rpm)
1921 xe_pm_runtime_put(xe);
1922
1923 return ret;
1924 }
1925
xe_bo_cpu_fault(struct vm_fault * vmf)1926 static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf)
1927 {
1928 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
1929 struct drm_device *ddev = tbo->base.dev;
1930 struct xe_device *xe = to_xe_device(ddev);
1931 struct xe_bo *bo = ttm_to_xe_bo(tbo);
1932 bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
1933 bool retry_after_wait = false;
1934 struct xe_validation_ctx ctx;
1935 struct drm_exec exec;
1936 vm_fault_t ret;
1937 int err = 0;
1938 int idx;
1939
1940 if (!drm_dev_enter(&xe->drm, &idx))
1941 return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1942
1943 ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm);
1944 if (ret != VM_FAULT_RETRY)
1945 goto out;
1946
1947 if (fault_flag_allow_retry_first(vmf->flags)) {
1948 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
1949 goto out;
1950 retry_after_wait = true;
1951 xe_bo_get(bo);
1952 mmap_read_unlock(vmf->vma->vm_mm);
1953 } else {
1954 ret = VM_FAULT_NOPAGE;
1955 }
1956
1957 /*
1958 * The fastpath failed and we were not required to return and retry immediately.
1959 * We're now running in one of two modes:
1960 *
1961 * 1) retry_after_wait == true: The mmap_read_lock() is dropped, and we're trying
1962 * to resolve blocking waits. But we can't resolve the fault since the
1963 * mmap_read_lock() is dropped. After retrying the fault, the aim is that the fastpath
1964 * should succeed. But it may fail since we drop the bo lock.
1965 *
1966 * 2) retry_after_wait == false: The fastpath failed, typically even after
1967 * a retry. Do whatever's necessary to resolve the fault.
1968 *
1969 * This construct is recommended to avoid excessive waits under the mmap_lock.
1970 */
1971
1972 if (needs_rpm)
1973 xe_pm_runtime_get(xe);
1974
1975 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
1976 err) {
1977 struct ttm_operation_ctx tctx = {
1978 .interruptible = true,
1979 .no_wait_gpu = false,
1980 .gfp_retry_mayfail = retry_after_wait,
1981 };
1982
1983 err = drm_exec_lock_obj(&exec, &tbo->base);
1984 drm_exec_retry_on_contention(&exec);
1985 if (err)
1986 break;
1987
1988 if (xe_ttm_bo_is_imported(tbo)) {
1989 err = -EFAULT;
1990 drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
1991 break;
1992 }
1993
1994 err = xe_bo_fault_migrate(bo, &tctx, &exec);
1995 if (err) {
1996 drm_exec_retry_on_contention(&exec);
1997 xe_validation_retry_on_oom(&ctx, &err);
1998 break;
1999 }
2000
2001 err = xe_bo_wait_usage_kernel(bo, &tctx);
2002 if (err)
2003 break;
2004
2005 if (!retry_after_wait)
2006 ret = __xe_bo_cpu_fault(vmf, xe, bo);
2007 }
2008 /* if retry_after_wait == true, we *must* return VM_FAULT_RETRY. */
2009 if (err && !retry_after_wait)
2010 ret = xe_err_to_fault_t(err);
2011
2012 if (needs_rpm)
2013 xe_pm_runtime_put(xe);
2014
2015 if (retry_after_wait)
2016 xe_bo_put(bo);
2017 out:
2018 drm_dev_exit(idx);
2019
2020 return ret;
2021 }
2022
xe_bo_vm_access(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)2023 static int xe_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
2024 void *buf, int len, int write)
2025 {
2026 struct ttm_buffer_object *ttm_bo = vma->vm_private_data;
2027 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
2028 struct xe_device *xe = xe_bo_device(bo);
2029 int ret;
2030
2031 xe_pm_runtime_get(xe);
2032 ret = ttm_bo_vm_access(vma, addr, buf, len, write);
2033 xe_pm_runtime_put(xe);
2034
2035 return ret;
2036 }
2037
2038 /**
2039 * xe_bo_read() - Read from an xe_bo
2040 * @bo: The buffer object to read from.
2041 * @offset: The byte offset to start reading from.
2042 * @dst: Location to store the read.
2043 * @size: Size in bytes for the read.
2044 *
2045 * Read @size bytes from the @bo, starting from @offset, storing into @dst.
2046 *
2047 * Return: Zero on success, or negative error.
2048 */
xe_bo_read(struct xe_bo * bo,u64 offset,void * dst,int size)2049 int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
2050 {
2051 int ret;
2052
2053 ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0);
2054 if (ret >= 0 && ret != size)
2055 ret = -EIO;
2056 else if (ret == size)
2057 ret = 0;
2058
2059 return ret;
2060 }
2061
2062 static const struct vm_operations_struct xe_gem_vm_ops = {
2063 .fault = xe_bo_cpu_fault,
2064 .open = ttm_bo_vm_open,
2065 .close = ttm_bo_vm_close,
2066 .access = xe_bo_vm_access,
2067 };
2068
2069 static const struct drm_gem_object_funcs xe_gem_object_funcs = {
2070 .free = xe_gem_object_free,
2071 .close = xe_gem_object_close,
2072 .mmap = drm_gem_ttm_mmap,
2073 .export = xe_gem_prime_export,
2074 .vm_ops = &xe_gem_vm_ops,
2075 };
2076
2077 /**
2078 * xe_bo_alloc - Allocate storage for a struct xe_bo
2079 *
2080 * This function is intended to allocate storage to be used for input
2081 * to __xe_bo_create_locked(), in the case a pointer to the bo to be
2082 * created is needed before the call to __xe_bo_create_locked().
2083 * If __xe_bo_create_locked ends up never to be called, then the
2084 * storage allocated with this function needs to be freed using
2085 * xe_bo_free().
2086 *
2087 * Return: A pointer to an uninitialized struct xe_bo on success,
2088 * ERR_PTR(-ENOMEM) on error.
2089 */
xe_bo_alloc(void)2090 struct xe_bo *xe_bo_alloc(void)
2091 {
2092 struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);
2093
2094 if (!bo)
2095 return ERR_PTR(-ENOMEM);
2096
2097 return bo;
2098 }
2099
2100 /**
2101 * xe_bo_free - Free storage allocated using xe_bo_alloc()
2102 * @bo: The buffer object storage.
2103 *
2104 * Refer to xe_bo_alloc() documentation for valid use-cases.
2105 */
xe_bo_free(struct xe_bo * bo)2106 void xe_bo_free(struct xe_bo *bo)
2107 {
2108 kfree(bo);
2109 }
2110
2111 /**
2112 * xe_bo_init_locked() - Initialize or create an xe_bo.
2113 * @xe: The xe device.
2114 * @bo: An already allocated buffer object or NULL
2115 * if the function should allocate a new one.
2116 * @tile: The tile to select for migration of this bo, and the tile used for
2117 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2118 * @resv: Pointer to a locked shared reservation object to use for this bo,
2119 * or NULL for the xe_bo to use its own.
2120 * @bulk: The bulk move to use for LRU bumping, or NULL for external bos.
2121 * @size: The storage size to use for the bo.
2122 * @cpu_caching: The cpu caching used for system memory backing store.
2123 * @type: The TTM buffer object type.
2124 * @flags: XE_BO_FLAG_ flags.
2125 * @exec: The drm_exec transaction to use for exhaustive eviction.
2126 *
2127 * Initialize or create an xe buffer object. On failure, any allocated buffer
2128 * object passed in @bo will have been unreferenced.
2129 *
2130 * Return: The buffer object on success. Negative error pointer on failure.
2131 */
xe_bo_init_locked(struct xe_device * xe,struct xe_bo * bo,struct xe_tile * tile,struct dma_resv * resv,struct ttm_lru_bulk_move * bulk,size_t size,u16 cpu_caching,enum ttm_bo_type type,u32 flags,struct drm_exec * exec)2132 struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
2133 struct xe_tile *tile, struct dma_resv *resv,
2134 struct ttm_lru_bulk_move *bulk, size_t size,
2135 u16 cpu_caching, enum ttm_bo_type type,
2136 u32 flags, struct drm_exec *exec)
2137 {
2138 struct ttm_operation_ctx ctx = {
2139 .interruptible = true,
2140 .no_wait_gpu = false,
2141 .gfp_retry_mayfail = true,
2142 };
2143 struct ttm_placement *placement;
2144 uint32_t alignment;
2145 size_t aligned_size;
2146 int err;
2147
2148 /* Only kernel objects should set GT */
2149 xe_assert(xe, !tile || type == ttm_bo_type_kernel);
2150
2151 if (XE_WARN_ON(!size)) {
2152 xe_bo_free(bo);
2153 return ERR_PTR(-EINVAL);
2154 }
2155
2156 /* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */
2157 if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT))
2158 return ERR_PTR(-EINVAL);
2159
2160 if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
2161 !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
2162 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
2163 (flags & (XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_NEEDS_2M)))) {
2164 size_t align = flags & XE_BO_FLAG_NEEDS_2M ? SZ_2M : SZ_64K;
2165
2166 aligned_size = ALIGN(size, align);
2167 if (type != ttm_bo_type_device)
2168 size = ALIGN(size, align);
2169 flags |= XE_BO_FLAG_INTERNAL_64K;
2170 alignment = align >> PAGE_SHIFT;
2171 } else {
2172 aligned_size = ALIGN(size, SZ_4K);
2173 flags &= ~XE_BO_FLAG_INTERNAL_64K;
2174 alignment = SZ_4K >> PAGE_SHIFT;
2175 }
2176
2177 if (type == ttm_bo_type_device && aligned_size != size)
2178 return ERR_PTR(-EINVAL);
2179
2180 if (!bo) {
2181 bo = xe_bo_alloc();
2182 if (IS_ERR(bo))
2183 return bo;
2184 }
2185
2186 bo->ccs_cleared = false;
2187 bo->tile = tile;
2188 bo->flags = flags;
2189 bo->cpu_caching = cpu_caching;
2190 bo->ttm.base.funcs = &xe_gem_object_funcs;
2191 bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
2192 INIT_LIST_HEAD(&bo->pinned_link);
2193 #ifdef CONFIG_PROC_FS
2194 INIT_LIST_HEAD(&bo->client_link);
2195 #endif
2196 INIT_LIST_HEAD(&bo->vram_userfault_link);
2197
2198 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
2199
2200 if (resv) {
2201 ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT);
2202 ctx.resv = resv;
2203 }
2204
2205 xe_validation_assert_exec(xe, exec, &bo->ttm.base);
2206 if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
2207 err = __xe_bo_placement_for_flags(xe, bo, bo->flags, type);
2208 if (WARN_ON(err)) {
2209 xe_ttm_bo_destroy(&bo->ttm);
2210 return ERR_PTR(err);
2211 }
2212 }
2213
2214 /* Defer populating type_sg bos */
2215 placement = (type == ttm_bo_type_sg ||
2216 bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement :
2217 &bo->placement;
2218 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
2219 placement, alignment,
2220 &ctx, NULL, resv, xe_ttm_bo_destroy);
2221 if (err)
2222 return ERR_PTR(err);
2223
2224 /*
2225 * The VRAM pages underneath are potentially still being accessed by the
2226 * GPU, as per async GPU clearing and async evictions. However TTM makes
2227 * sure to add any corresponding move/clear fences into the objects
2228 * dma-resv using the DMA_RESV_USAGE_KERNEL slot.
2229 *
2230 * For KMD internal buffers we don't care about GPU clearing, however we
2231 * still need to handle async evictions, where the VRAM is still being
2232 * accessed by the GPU. Most internal callers are not expecting this,
2233 * since they are missing the required synchronisation before accessing
2234 * the memory. To keep things simple just sync wait any kernel fences
2235 * here, if the buffer is designated KMD internal.
2236 *
2237 * For normal userspace objects we should already have the required
2238 * pipelining or sync waiting elsewhere, since we already have to deal
2239 * with things like async GPU clearing.
2240 */
2241 if (type == ttm_bo_type_kernel) {
2242 long timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
2243 DMA_RESV_USAGE_KERNEL,
2244 ctx.interruptible,
2245 MAX_SCHEDULE_TIMEOUT);
2246
2247 if (timeout < 0) {
2248 if (!resv)
2249 dma_resv_unlock(bo->ttm.base.resv);
2250 xe_bo_put(bo);
2251 return ERR_PTR(timeout);
2252 }
2253 }
2254
2255 bo->created = true;
2256 if (bulk)
2257 ttm_bo_set_bulk_move(&bo->ttm, bulk);
2258 else
2259 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2260
2261 return bo;
2262 }
2263
__xe_bo_fixed_placement(struct xe_device * xe,struct xe_bo * bo,enum ttm_bo_type type,u32 flags,u64 start,u64 end,u64 size)2264 static int __xe_bo_fixed_placement(struct xe_device *xe,
2265 struct xe_bo *bo, enum ttm_bo_type type,
2266 u32 flags,
2267 u64 start, u64 end, u64 size)
2268 {
2269 struct ttm_place *place = bo->placements;
2270 u32 vram_flag, vram_stolen_flags;
2271
2272 /*
2273 * to allow fixed placement in GGTT of a VF, post-migration fixups would have to
2274 * include selecting a new fixed offset and shifting the page ranges for it
2275 */
2276 xe_assert(xe, !IS_SRIOV_VF(xe) || !(bo->flags & XE_BO_FLAG_GGTT));
2277
2278 if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
2279 return -EINVAL;
2280
2281 vram_flag = flags & XE_BO_FLAG_VRAM_MASK;
2282 vram_stolen_flags = (flags & (XE_BO_FLAG_STOLEN)) | vram_flag;
2283
2284 /* check if more than one VRAM/STOLEN flag is set */
2285 if (hweight32(vram_stolen_flags) > 1)
2286 return -EINVAL;
2287
2288 place->flags = TTM_PL_FLAG_CONTIGUOUS;
2289 place->fpfn = start >> PAGE_SHIFT;
2290 place->lpfn = end >> PAGE_SHIFT;
2291
2292 if (flags & XE_BO_FLAG_STOLEN)
2293 place->mem_type = XE_PL_STOLEN;
2294 else
2295 place->mem_type = bo_vram_flags_to_vram_placement(xe, flags, vram_flag, type);
2296
2297 bo->placement = (struct ttm_placement) {
2298 .num_placement = 1,
2299 .placement = place,
2300 };
2301
2302 return 0;
2303 }
2304
2305 static struct xe_bo *
__xe_bo_create_locked(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,u64 start,u64 end,u16 cpu_caching,enum ttm_bo_type type,u32 flags,u64 alignment,struct drm_exec * exec)2306 __xe_bo_create_locked(struct xe_device *xe,
2307 struct xe_tile *tile, struct xe_vm *vm,
2308 size_t size, u64 start, u64 end,
2309 u16 cpu_caching, enum ttm_bo_type type, u32 flags,
2310 u64 alignment, struct drm_exec *exec)
2311 {
2312 struct xe_bo *bo = NULL;
2313 int err;
2314
2315 if (vm)
2316 xe_vm_assert_held(vm);
2317
2318 if (start || end != ~0ULL) {
2319 bo = xe_bo_alloc();
2320 if (IS_ERR(bo))
2321 return bo;
2322
2323 flags |= XE_BO_FLAG_FIXED_PLACEMENT;
2324 err = __xe_bo_fixed_placement(xe, bo, type, flags, start, end, size);
2325 if (err) {
2326 xe_bo_free(bo);
2327 return ERR_PTR(err);
2328 }
2329 }
2330
2331 bo = xe_bo_init_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
2332 vm && !xe_vm_in_fault_mode(vm) &&
2333 flags & XE_BO_FLAG_USER ?
2334 &vm->lru_bulk_move : NULL, size,
2335 cpu_caching, type, flags, exec);
2336 if (IS_ERR(bo))
2337 return bo;
2338
2339 bo->min_align = alignment;
2340
2341 /*
2342 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
2343 * to ensure the shared resv doesn't disappear under the bo, the bo
2344 * will keep a reference to the vm, and avoid circular references
2345 * by having all the vm's bo refereferences released at vm close
2346 * time.
2347 */
2348 if (vm && xe_bo_is_user(bo))
2349 xe_vm_get(vm);
2350 bo->vm = vm;
2351
2352 if (bo->flags & XE_BO_FLAG_GGTT) {
2353 struct xe_tile *t;
2354 u8 id;
2355
2356 if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) {
2357 if (!tile && flags & XE_BO_FLAG_STOLEN)
2358 tile = xe_device_get_root_tile(xe);
2359
2360 xe_assert(xe, tile);
2361 }
2362
2363 for_each_tile(t, xe, id) {
2364 if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t)))
2365 continue;
2366
2367 if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
2368 err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
2369 start + xe_bo_size(bo), U64_MAX,
2370 exec);
2371 } else {
2372 err = xe_ggtt_insert_bo(t->mem.ggtt, bo, exec);
2373 }
2374 if (err)
2375 goto err_unlock_put_bo;
2376 }
2377 }
2378
2379 trace_xe_bo_create(bo);
2380 return bo;
2381
2382 err_unlock_put_bo:
2383 __xe_bo_unset_bulk_move(bo);
2384 xe_bo_unlock_vm_held(bo);
2385 xe_bo_put(bo);
2386 return ERR_PTR(err);
2387 }
2388
2389 /**
2390 * xe_bo_create_locked() - Create a BO
2391 * @xe: The xe device.
2392 * @tile: The tile to select for migration of this bo, and the tile used for
2393 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2394 * @vm: The local vm or NULL for external objects.
2395 * @size: The storage size to use for the bo.
2396 * @type: The TTM buffer object type.
2397 * @flags: XE_BO_FLAG_ flags.
2398 * @exec: The drm_exec transaction to use for exhaustive eviction.
2399 *
2400 * Create a locked xe BO with no range- nor alignment restrictions.
2401 *
2402 * Return: The buffer object on success. Negative error pointer on failure.
2403 */
xe_bo_create_locked(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,enum ttm_bo_type type,u32 flags,struct drm_exec * exec)2404 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
2405 struct xe_vm *vm, size_t size,
2406 enum ttm_bo_type type, u32 flags,
2407 struct drm_exec *exec)
2408 {
2409 return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
2410 flags, 0, exec);
2411 }
2412
xe_bo_create_novm(struct xe_device * xe,struct xe_tile * tile,size_t size,u16 cpu_caching,enum ttm_bo_type type,u32 flags,u64 alignment,bool intr)2413 static struct xe_bo *xe_bo_create_novm(struct xe_device *xe, struct xe_tile *tile,
2414 size_t size, u16 cpu_caching,
2415 enum ttm_bo_type type, u32 flags,
2416 u64 alignment, bool intr)
2417 {
2418 struct xe_validation_ctx ctx;
2419 struct drm_exec exec;
2420 struct xe_bo *bo;
2421 int ret = 0;
2422
2423 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
2424 ret) {
2425 bo = __xe_bo_create_locked(xe, tile, NULL, size, 0, ~0ULL,
2426 cpu_caching, type, flags, alignment, &exec);
2427 drm_exec_retry_on_contention(&exec);
2428 if (IS_ERR(bo)) {
2429 ret = PTR_ERR(bo);
2430 xe_validation_retry_on_oom(&ctx, &ret);
2431 } else {
2432 xe_bo_unlock(bo);
2433 }
2434 }
2435
2436 return ret ? ERR_PTR(ret) : bo;
2437 }
2438
2439 /**
2440 * xe_bo_create_user() - Create a user BO
2441 * @xe: The xe device.
2442 * @vm: The local vm or NULL for external objects.
2443 * @size: The storage size to use for the bo.
2444 * @cpu_caching: The caching mode to be used for system backing store.
2445 * @flags: XE_BO_FLAG_ flags.
2446 * @exec: The drm_exec transaction to use for exhaustive eviction, or NULL
2447 * if such a transaction should be initiated by the call.
2448 *
2449 * Create a bo on behalf of user-space.
2450 *
2451 * Return: The buffer object on success. Negative error pointer on failure.
2452 */
xe_bo_create_user(struct xe_device * xe,struct xe_vm * vm,size_t size,u16 cpu_caching,u32 flags,struct drm_exec * exec)2453 struct xe_bo *xe_bo_create_user(struct xe_device *xe,
2454 struct xe_vm *vm, size_t size,
2455 u16 cpu_caching,
2456 u32 flags, struct drm_exec *exec)
2457 {
2458 struct xe_bo *bo;
2459
2460 flags |= XE_BO_FLAG_USER;
2461
2462 if (vm || exec) {
2463 xe_assert(xe, exec);
2464 bo = __xe_bo_create_locked(xe, NULL, vm, size, 0, ~0ULL,
2465 cpu_caching, ttm_bo_type_device,
2466 flags, 0, exec);
2467 if (!IS_ERR(bo))
2468 xe_bo_unlock_vm_held(bo);
2469 } else {
2470 bo = xe_bo_create_novm(xe, NULL, size, cpu_caching,
2471 ttm_bo_type_device, flags, 0, true);
2472 }
2473
2474 return bo;
2475 }
2476
2477 /**
2478 * xe_bo_create_pin_range_novm() - Create and pin a BO with range options.
2479 * @xe: The xe device.
2480 * @tile: The tile to select for migration of this bo, and the tile used for
2481 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2482 * @size: The storage size to use for the bo.
2483 * @start: Start of fixed VRAM range or 0.
2484 * @end: End of fixed VRAM range or ~0ULL.
2485 * @type: The TTM buffer object type.
2486 * @flags: XE_BO_FLAG_ flags.
2487 *
2488 * Create an Xe BO with range- and options. If @start and @end indicate
2489 * a fixed VRAM range, this must be a ttm_bo_type_kernel bo with VRAM placement
2490 * only.
2491 *
2492 * Return: The buffer object on success. Negative error pointer on failure.
2493 */
xe_bo_create_pin_range_novm(struct xe_device * xe,struct xe_tile * tile,size_t size,u64 start,u64 end,enum ttm_bo_type type,u32 flags)2494 struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
2495 size_t size, u64 start, u64 end,
2496 enum ttm_bo_type type, u32 flags)
2497 {
2498 struct xe_validation_ctx ctx;
2499 struct drm_exec exec;
2500 struct xe_bo *bo;
2501 int err = 0;
2502
2503 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
2504 bo = __xe_bo_create_locked(xe, tile, NULL, size, start, end,
2505 0, type, flags, 0, &exec);
2506 if (IS_ERR(bo)) {
2507 drm_exec_retry_on_contention(&exec);
2508 err = PTR_ERR(bo);
2509 xe_validation_retry_on_oom(&ctx, &err);
2510 break;
2511 }
2512
2513 err = xe_bo_pin(bo, &exec);
2514 xe_bo_unlock(bo);
2515 if (err) {
2516 xe_bo_put(bo);
2517 drm_exec_retry_on_contention(&exec);
2518 xe_validation_retry_on_oom(&ctx, &err);
2519 break;
2520 }
2521 }
2522
2523 return err ? ERR_PTR(err) : bo;
2524 }
2525
xe_bo_create_pin_map_at_aligned(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,u64 offset,enum ttm_bo_type type,u32 flags,u64 alignment,struct drm_exec * exec)2526 static struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
2527 struct xe_tile *tile,
2528 struct xe_vm *vm,
2529 size_t size, u64 offset,
2530 enum ttm_bo_type type, u32 flags,
2531 u64 alignment, struct drm_exec *exec)
2532 {
2533 struct xe_bo *bo;
2534 int err;
2535 u64 start = offset == ~0ull ? 0 : offset;
2536 u64 end = offset == ~0ull ? ~0ull : start + size;
2537
2538 if (flags & XE_BO_FLAG_STOLEN &&
2539 xe_ttm_stolen_cpu_access_needs_ggtt(xe))
2540 flags |= XE_BO_FLAG_GGTT;
2541
2542 bo = __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
2543 flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
2544 alignment, exec);
2545 if (IS_ERR(bo))
2546 return bo;
2547
2548 err = xe_bo_pin(bo, exec);
2549 if (err)
2550 goto err_put;
2551
2552 err = xe_bo_vmap(bo);
2553 if (err)
2554 goto err_unpin;
2555
2556 xe_bo_unlock_vm_held(bo);
2557
2558 return bo;
2559
2560 err_unpin:
2561 xe_bo_unpin(bo);
2562 err_put:
2563 xe_bo_unlock_vm_held(bo);
2564 xe_bo_put(bo);
2565 return ERR_PTR(err);
2566 }
2567
2568 /**
2569 * xe_bo_create_pin_map_at_novm() - Create pinned and mapped bo at optional VRAM offset
2570 * @xe: The xe device.
2571 * @tile: The tile to select for migration of this bo, and the tile used for
2572 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2573 * @size: The storage size to use for the bo.
2574 * @offset: Optional VRAM offset or %~0ull for don't care.
2575 * @type: The TTM buffer object type.
2576 * @flags: XE_BO_FLAG_ flags.
2577 * @alignment: GGTT alignment.
2578 * @intr: Whether to execute any waits for backing store interruptible.
2579 *
2580 * Create a pinned and optionally mapped bo with VRAM offset and GGTT alignment
2581 * options. The bo will be external and not associated with a VM.
2582 *
2583 * Return: The buffer object on success. Negative error pointer on failure.
2584 * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
2585 * to true on entry.
2586 */
2587 struct xe_bo *
xe_bo_create_pin_map_at_novm(struct xe_device * xe,struct xe_tile * tile,size_t size,u64 offset,enum ttm_bo_type type,u32 flags,u64 alignment,bool intr)2588 xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
2589 size_t size, u64 offset, enum ttm_bo_type type, u32 flags,
2590 u64 alignment, bool intr)
2591 {
2592 struct xe_validation_ctx ctx;
2593 struct drm_exec exec;
2594 struct xe_bo *bo;
2595 int ret = 0;
2596
2597 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
2598 ret) {
2599 bo = xe_bo_create_pin_map_at_aligned(xe, tile, NULL, size, offset,
2600 type, flags, alignment, &exec);
2601 if (IS_ERR(bo)) {
2602 drm_exec_retry_on_contention(&exec);
2603 ret = PTR_ERR(bo);
2604 xe_validation_retry_on_oom(&ctx, &ret);
2605 }
2606 }
2607
2608 return ret ? ERR_PTR(ret) : bo;
2609 }
2610
2611 /**
2612 * xe_bo_create_pin_map() - Create pinned and mapped bo
2613 * @xe: The xe device.
2614 * @tile: The tile to select for migration of this bo, and the tile used for
2615 * @vm: The vm to associate the buffer object with. The vm's resv must be locked
2616 * with the transaction represented by @exec.
2617 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2618 * @size: The storage size to use for the bo.
2619 * @type: The TTM buffer object type.
2620 * @flags: XE_BO_FLAG_ flags.
2621 * @exec: The drm_exec transaction to use for exhaustive eviction, and
2622 * previously used for locking @vm's resv.
2623 *
2624 * Create a pinned and mapped bo. The bo will be external and not associated
2625 * with a VM.
2626 *
2627 * Return: The buffer object on success. Negative error pointer on failure.
2628 * In particular, the function may return ERR_PTR(%-EINTR) if @exec was
2629 * configured for interruptible locking.
2630 */
xe_bo_create_pin_map(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,enum ttm_bo_type type,u32 flags,struct drm_exec * exec)2631 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
2632 struct xe_vm *vm, size_t size,
2633 enum ttm_bo_type type, u32 flags,
2634 struct drm_exec *exec)
2635 {
2636 return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, ~0ull, type, flags,
2637 0, exec);
2638 }
2639
2640 /**
2641 * xe_bo_create_pin_map_novm() - Create pinned and mapped bo
2642 * @xe: The xe device.
2643 * @tile: The tile to select for migration of this bo, and the tile used for
2644 * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
2645 * @size: The storage size to use for the bo.
2646 * @type: The TTM buffer object type.
2647 * @flags: XE_BO_FLAG_ flags.
2648 * @intr: Whether to execute any waits for backing store interruptible.
2649 *
2650 * Create a pinned and mapped bo. The bo will be external and not associated
2651 * with a VM.
2652 *
2653 * Return: The buffer object on success. Negative error pointer on failure.
2654 * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
2655 * to true on entry.
2656 */
xe_bo_create_pin_map_novm(struct xe_device * xe,struct xe_tile * tile,size_t size,enum ttm_bo_type type,u32 flags,bool intr)2657 struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
2658 size_t size, enum ttm_bo_type type, u32 flags,
2659 bool intr)
2660 {
2661 return xe_bo_create_pin_map_at_novm(xe, tile, size, ~0ull, type, flags, 0, intr);
2662 }
2663
__xe_bo_unpin_map_no_vm(void * arg)2664 static void __xe_bo_unpin_map_no_vm(void *arg)
2665 {
2666 xe_bo_unpin_map_no_vm(arg);
2667 }
2668
xe_managed_bo_create_pin_map(struct xe_device * xe,struct xe_tile * tile,size_t size,u32 flags)2669 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
2670 size_t size, u32 flags)
2671 {
2672 struct xe_bo *bo;
2673 int ret;
2674
2675 KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags);
2676 bo = xe_bo_create_pin_map_novm(xe, tile, size, ttm_bo_type_kernel, flags, true);
2677 if (IS_ERR(bo))
2678 return bo;
2679
2680 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo);
2681 if (ret)
2682 return ERR_PTR(ret);
2683
2684 return bo;
2685 }
2686
xe_managed_bo_unpin_map_no_vm(struct xe_bo * bo)2687 void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo)
2688 {
2689 devm_release_action(xe_bo_device(bo)->drm.dev, __xe_bo_unpin_map_no_vm, bo);
2690 }
2691
xe_managed_bo_create_from_data(struct xe_device * xe,struct xe_tile * tile,const void * data,size_t size,u32 flags)2692 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
2693 const void *data, size_t size, u32 flags)
2694 {
2695 struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags);
2696
2697 if (IS_ERR(bo))
2698 return bo;
2699
2700 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
2701
2702 return bo;
2703 }
2704
2705 /**
2706 * xe_managed_bo_reinit_in_vram
2707 * @xe: xe device
2708 * @tile: Tile where the new buffer will be created
2709 * @src: Managed buffer object allocated in system memory
2710 *
2711 * Replace a managed src buffer object allocated in system memory with a new
2712 * one allocated in vram, copying the data between them.
2713 * Buffer object in VRAM is not going to have the same GGTT address, the caller
2714 * is responsible for making sure that any old references to it are updated.
2715 *
2716 * Returns 0 for success, negative error code otherwise.
2717 */
xe_managed_bo_reinit_in_vram(struct xe_device * xe,struct xe_tile * tile,struct xe_bo ** src)2718 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
2719 {
2720 struct xe_bo *bo;
2721 u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
2722
2723 dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE |
2724 XE_BO_FLAG_PINNED_NORESTORE);
2725
2726 xe_assert(xe, IS_DGFX(xe));
2727 xe_assert(xe, !(*src)->vmap.is_iomem);
2728
2729 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
2730 xe_bo_size(*src), dst_flags);
2731 if (IS_ERR(bo))
2732 return PTR_ERR(bo);
2733
2734 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src);
2735 *src = bo;
2736
2737 return 0;
2738 }
2739
2740 /*
2741 * XXX: This is in the VM bind data path, likely should calculate this once and
2742 * store, with a recalculation if the BO is moved.
2743 */
vram_region_gpu_offset(struct ttm_resource * res)2744 uint64_t vram_region_gpu_offset(struct ttm_resource *res)
2745 {
2746 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
2747
2748 switch (res->mem_type) {
2749 case XE_PL_STOLEN:
2750 return xe_ttm_stolen_gpu_offset(xe);
2751 case XE_PL_TT:
2752 case XE_PL_SYSTEM:
2753 return 0;
2754 default:
2755 return res_to_mem_region(res)->dpa_base;
2756 }
2757 return 0;
2758 }
2759
2760 /**
2761 * xe_bo_pin_external - pin an external BO
2762 * @bo: buffer object to be pinned
2763 * @in_place: Pin in current placement, don't attempt to migrate.
2764 * @exec: The drm_exec transaction to use for exhaustive eviction.
2765 *
2766 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2767 * BO. Unique call compared to xe_bo_pin as this function has it own set of
2768 * asserts and code to ensure evict / restore on suspend / resume.
2769 *
2770 * Returns 0 for success, negative error code otherwise.
2771 */
xe_bo_pin_external(struct xe_bo * bo,bool in_place,struct drm_exec * exec)2772 int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec)
2773 {
2774 struct xe_device *xe = xe_bo_device(bo);
2775 int err;
2776
2777 xe_assert(xe, !bo->vm);
2778 xe_assert(xe, xe_bo_is_user(bo));
2779
2780 if (!xe_bo_is_pinned(bo)) {
2781 if (!in_place) {
2782 err = xe_bo_validate(bo, NULL, false, exec);
2783 if (err)
2784 return err;
2785 }
2786
2787 spin_lock(&xe->pinned.lock);
2788 list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
2789 spin_unlock(&xe->pinned.lock);
2790 }
2791
2792 ttm_bo_pin(&bo->ttm);
2793 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2794 xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
2795
2796 /*
2797 * FIXME: If we always use the reserve / unreserve functions for locking
2798 * we do not need this.
2799 */
2800 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2801
2802 return 0;
2803 }
2804
2805 /**
2806 * xe_bo_pin() - Pin a kernel bo after potentially migrating it
2807 * @bo: The kernel bo to pin.
2808 * @exec: The drm_exec transaction to use for exhaustive eviction.
2809 *
2810 * Attempts to migrate a bo to @bo->placement. If that succeeds,
2811 * pins the bo.
2812 *
2813 * Return: %0 on success, negative error code on migration failure.
2814 */
xe_bo_pin(struct xe_bo * bo,struct drm_exec * exec)2815 int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec)
2816 {
2817 struct ttm_place *place = &bo->placements[0];
2818 struct xe_device *xe = xe_bo_device(bo);
2819 int err;
2820
2821 /* We currently don't expect user BO to be pinned */
2822 xe_assert(xe, !xe_bo_is_user(bo));
2823
2824 /* Pinned object must be in GGTT or have pinned flag */
2825 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
2826 XE_BO_FLAG_GGTT));
2827
2828 /*
2829 * No reason we can't support pinning imported dma-bufs we just don't
2830 * expect to pin an imported dma-buf.
2831 */
2832 xe_assert(xe, !bo->ttm.base.import_attach);
2833
2834 /* We only expect at most 1 pin */
2835 xe_assert(xe, !xe_bo_is_pinned(bo));
2836
2837 err = xe_bo_validate(bo, NULL, false, exec);
2838 if (err)
2839 return err;
2840
2841 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
2842 spin_lock(&xe->pinned.lock);
2843 if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
2844 list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
2845 else
2846 list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
2847 spin_unlock(&xe->pinned.lock);
2848 }
2849
2850 ttm_bo_pin(&bo->ttm);
2851 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2852 xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
2853
2854 /*
2855 * FIXME: If we always use the reserve / unreserve functions for locking
2856 * we do not need this.
2857 */
2858 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2859
2860 return 0;
2861 }
2862
2863 /**
2864 * xe_bo_unpin_external - unpin an external BO
2865 * @bo: buffer object to be unpinned
2866 *
2867 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2868 * BO. Unique call compared to xe_bo_unpin as this function has it own set of
2869 * asserts and code to ensure evict / restore on suspend / resume.
2870 *
2871 * Returns 0 for success, negative error code otherwise.
2872 */
xe_bo_unpin_external(struct xe_bo * bo)2873 void xe_bo_unpin_external(struct xe_bo *bo)
2874 {
2875 struct xe_device *xe = xe_bo_device(bo);
2876
2877 xe_assert(xe, !bo->vm);
2878 xe_assert(xe, xe_bo_is_pinned(bo));
2879 xe_assert(xe, xe_bo_is_user(bo));
2880
2881 spin_lock(&xe->pinned.lock);
2882 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link))
2883 list_del_init(&bo->pinned_link);
2884 spin_unlock(&xe->pinned.lock);
2885
2886 ttm_bo_unpin(&bo->ttm);
2887 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2888 xe_ttm_tt_account_add(xe, bo->ttm.ttm);
2889
2890 /*
2891 * FIXME: If we always use the reserve / unreserve functions for locking
2892 * we do not need this.
2893 */
2894 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2895 }
2896
xe_bo_unpin(struct xe_bo * bo)2897 void xe_bo_unpin(struct xe_bo *bo)
2898 {
2899 struct ttm_place *place = &bo->placements[0];
2900 struct xe_device *xe = xe_bo_device(bo);
2901
2902 xe_assert(xe, !bo->ttm.base.import_attach);
2903 xe_assert(xe, xe_bo_is_pinned(bo));
2904
2905 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
2906 spin_lock(&xe->pinned.lock);
2907 xe_assert(xe, !list_empty(&bo->pinned_link));
2908 list_del_init(&bo->pinned_link);
2909 spin_unlock(&xe->pinned.lock);
2910
2911 if (bo->backup_obj) {
2912 if (xe_bo_is_pinned(bo->backup_obj))
2913 ttm_bo_unpin(&bo->backup_obj->ttm);
2914 xe_bo_put(bo->backup_obj);
2915 bo->backup_obj = NULL;
2916 }
2917 }
2918 ttm_bo_unpin(&bo->ttm);
2919 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2920 xe_ttm_tt_account_add(xe, bo->ttm.ttm);
2921 }
2922
2923 /**
2924 * xe_bo_validate() - Make sure the bo is in an allowed placement
2925 * @bo: The bo,
2926 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
2927 * NULL. Used together with @allow_res_evict.
2928 * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
2929 * reservation object.
2930 * @exec: The drm_exec transaction to use for exhaustive eviction.
2931 *
2932 * Make sure the bo is in allowed placement, migrating it if necessary. If
2933 * needed, other bos will be evicted. If bos selected for eviction shares
2934 * the @vm's reservation object, they can be evicted iff @allow_res_evict is
2935 * set to true, otherwise they will be bypassed.
2936 *
2937 * Return: 0 on success, negative error code on failure. May return
2938 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
2939 */
xe_bo_validate(struct xe_bo * bo,struct xe_vm * vm,bool allow_res_evict,struct drm_exec * exec)2940 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
2941 struct drm_exec *exec)
2942 {
2943 struct ttm_operation_ctx ctx = {
2944 .interruptible = true,
2945 .no_wait_gpu = false,
2946 .gfp_retry_mayfail = true,
2947 };
2948 int ret;
2949
2950 if (xe_bo_is_pinned(bo))
2951 return 0;
2952
2953 if (vm) {
2954 lockdep_assert_held(&vm->lock);
2955 xe_vm_assert_held(vm);
2956
2957 ctx.allow_res_evict = allow_res_evict;
2958 ctx.resv = xe_vm_resv(vm);
2959 }
2960
2961 xe_vm_set_validating(vm, allow_res_evict);
2962 trace_xe_bo_validate(bo);
2963 xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
2964 ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
2965 xe_vm_clear_validating(vm, allow_res_evict);
2966
2967 return ret;
2968 }
2969
xe_bo_is_xe_bo(struct ttm_buffer_object * bo)2970 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
2971 {
2972 if (bo->destroy == &xe_ttm_bo_destroy)
2973 return true;
2974
2975 return false;
2976 }
2977
2978 /*
2979 * Resolve a BO address. There is no assert to check if the proper lock is held
2980 * so it should only be used in cases where it is not fatal to get the wrong
2981 * address, such as printing debug information, but not in cases where memory is
2982 * written based on this result.
2983 */
__xe_bo_addr(struct xe_bo * bo,u64 offset,size_t page_size)2984 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
2985 {
2986 struct xe_device *xe = xe_bo_device(bo);
2987 struct xe_res_cursor cur;
2988 u64 page;
2989
2990 xe_assert(xe, page_size <= PAGE_SIZE);
2991 page = offset >> PAGE_SHIFT;
2992 offset &= (PAGE_SIZE - 1);
2993
2994 if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
2995 xe_assert(xe, bo->ttm.ttm);
2996
2997 xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT,
2998 page_size, &cur);
2999 return xe_res_dma(&cur) + offset;
3000 } else {
3001 struct xe_res_cursor cur;
3002
3003 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
3004 page_size, &cur);
3005 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
3006 }
3007 }
3008
xe_bo_addr(struct xe_bo * bo,u64 offset,size_t page_size)3009 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
3010 {
3011 if (!READ_ONCE(bo->ttm.pin_count))
3012 xe_bo_assert_held(bo);
3013 return __xe_bo_addr(bo, offset, page_size);
3014 }
3015
xe_bo_vmap(struct xe_bo * bo)3016 int xe_bo_vmap(struct xe_bo *bo)
3017 {
3018 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
3019 void *virtual;
3020 bool is_iomem;
3021 int ret;
3022
3023 xe_bo_assert_held(bo);
3024
3025 if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) ||
3026 !force_contiguous(bo->flags)))
3027 return -EINVAL;
3028
3029 if (!iosys_map_is_null(&bo->vmap))
3030 return 0;
3031
3032 /*
3033 * We use this more or less deprecated interface for now since
3034 * ttm_bo_vmap() doesn't offer the optimization of kmapping
3035 * single page bos, which is done here.
3036 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
3037 * to use struct iosys_map.
3038 */
3039 ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap);
3040 if (ret)
3041 return ret;
3042
3043 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
3044 if (is_iomem)
3045 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
3046 else
3047 iosys_map_set_vaddr(&bo->vmap, virtual);
3048
3049 return 0;
3050 }
3051
__xe_bo_vunmap(struct xe_bo * bo)3052 static void __xe_bo_vunmap(struct xe_bo *bo)
3053 {
3054 if (!iosys_map_is_null(&bo->vmap)) {
3055 iosys_map_clear(&bo->vmap);
3056 ttm_bo_kunmap(&bo->kmap);
3057 }
3058 }
3059
xe_bo_vunmap(struct xe_bo * bo)3060 void xe_bo_vunmap(struct xe_bo *bo)
3061 {
3062 xe_bo_assert_held(bo);
3063 __xe_bo_vunmap(bo);
3064 }
3065
gem_create_set_pxp_type(struct xe_device * xe,struct xe_bo * bo,u64 value)3066 static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value)
3067 {
3068 if (value == DRM_XE_PXP_TYPE_NONE)
3069 return 0;
3070
3071 /* we only support DRM_XE_PXP_TYPE_HWDRM for now */
3072 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
3073 return -EINVAL;
3074
3075 return xe_pxp_key_assign(xe->pxp, bo);
3076 }
3077
3078 typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe,
3079 struct xe_bo *bo,
3080 u64 value);
3081
3082 static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = {
3083 [DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE] = gem_create_set_pxp_type,
3084 };
3085
gem_create_user_ext_set_property(struct xe_device * xe,struct xe_bo * bo,u64 extension)3086 static int gem_create_user_ext_set_property(struct xe_device *xe,
3087 struct xe_bo *bo,
3088 u64 extension)
3089 {
3090 u64 __user *address = u64_to_user_ptr(extension);
3091 struct drm_xe_ext_set_property ext;
3092 int err;
3093 u32 idx;
3094
3095 err = copy_from_user(&ext, address, sizeof(ext));
3096 if (XE_IOCTL_DBG(xe, err))
3097 return -EFAULT;
3098
3099 if (XE_IOCTL_DBG(xe, ext.property >=
3100 ARRAY_SIZE(gem_create_set_property_funcs)) ||
3101 XE_IOCTL_DBG(xe, ext.pad) ||
3102 XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY))
3103 return -EINVAL;
3104
3105 idx = array_index_nospec(ext.property, ARRAY_SIZE(gem_create_set_property_funcs));
3106 if (!gem_create_set_property_funcs[idx])
3107 return -EINVAL;
3108
3109 return gem_create_set_property_funcs[idx](xe, bo, ext.value);
3110 }
3111
3112 typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe,
3113 struct xe_bo *bo,
3114 u64 extension);
3115
3116 static const xe_gem_create_user_extension_fn gem_create_user_extension_funcs[] = {
3117 [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_user_ext_set_property,
3118 };
3119
3120 #define MAX_USER_EXTENSIONS 16
gem_create_user_extensions(struct xe_device * xe,struct xe_bo * bo,u64 extensions,int ext_number)3121 static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo,
3122 u64 extensions, int ext_number)
3123 {
3124 u64 __user *address = u64_to_user_ptr(extensions);
3125 struct drm_xe_user_extension ext;
3126 int err;
3127 u32 idx;
3128
3129 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
3130 return -E2BIG;
3131
3132 err = copy_from_user(&ext, address, sizeof(ext));
3133 if (XE_IOCTL_DBG(xe, err))
3134 return -EFAULT;
3135
3136 if (XE_IOCTL_DBG(xe, ext.pad) ||
3137 XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs)))
3138 return -EINVAL;
3139
3140 idx = array_index_nospec(ext.name,
3141 ARRAY_SIZE(gem_create_user_extension_funcs));
3142 err = gem_create_user_extension_funcs[idx](xe, bo, extensions);
3143 if (XE_IOCTL_DBG(xe, err))
3144 return err;
3145
3146 if (ext.next_extension)
3147 return gem_create_user_extensions(xe, bo, ext.next_extension,
3148 ++ext_number);
3149
3150 return 0;
3151 }
3152
xe_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3153 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
3154 struct drm_file *file)
3155 {
3156 struct xe_device *xe = to_xe_device(dev);
3157 struct xe_file *xef = to_xe_file(file);
3158 struct drm_xe_gem_create *args = data;
3159 struct xe_validation_ctx ctx;
3160 struct drm_exec exec;
3161 struct xe_vm *vm = NULL;
3162 struct xe_bo *bo;
3163 unsigned int bo_flags;
3164 u32 handle;
3165 int err;
3166
3167 if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
3168 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
3169 return -EINVAL;
3170
3171 /* at least one valid memory placement must be specified */
3172 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) ||
3173 !args->placement))
3174 return -EINVAL;
3175
3176 if (XE_IOCTL_DBG(xe, args->flags &
3177 ~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING |
3178 DRM_XE_GEM_CREATE_FLAG_SCANOUT |
3179 DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM)))
3180 return -EINVAL;
3181
3182 if (XE_IOCTL_DBG(xe, args->handle))
3183 return -EINVAL;
3184
3185 if (XE_IOCTL_DBG(xe, !args->size))
3186 return -EINVAL;
3187
3188 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
3189 return -EINVAL;
3190
3191 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
3192 return -EINVAL;
3193
3194 bo_flags = 0;
3195 if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
3196 bo_flags |= XE_BO_FLAG_DEFER_BACKING;
3197
3198 if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
3199 bo_flags |= XE_BO_FLAG_SCANOUT;
3200
3201 bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
3202
3203 /* CCS formats need physical placement at a 64K alignment in VRAM. */
3204 if ((bo_flags & XE_BO_FLAG_VRAM_MASK) &&
3205 (bo_flags & XE_BO_FLAG_SCANOUT) &&
3206 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) &&
3207 IS_ALIGNED(args->size, SZ_64K))
3208 bo_flags |= XE_BO_FLAG_NEEDS_64K;
3209
3210 if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
3211 if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
3212 return -EINVAL;
3213
3214 bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS;
3215 }
3216
3217 if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
3218 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
3219 return -EINVAL;
3220
3221 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
3222 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
3223 return -EINVAL;
3224
3225 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
3226 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
3227 return -EINVAL;
3228
3229 if (args->vm_id) {
3230 vm = xe_vm_lookup(xef, args->vm_id);
3231 if (XE_IOCTL_DBG(xe, !vm))
3232 return -ENOENT;
3233 }
3234
3235 err = 0;
3236 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
3237 err) {
3238 if (vm) {
3239 err = xe_vm_drm_exec_lock(vm, &exec);
3240 drm_exec_retry_on_contention(&exec);
3241 if (err)
3242 break;
3243 }
3244 bo = xe_bo_create_user(xe, vm, args->size, args->cpu_caching,
3245 bo_flags, &exec);
3246 drm_exec_retry_on_contention(&exec);
3247 if (IS_ERR(bo)) {
3248 err = PTR_ERR(bo);
3249 xe_validation_retry_on_oom(&ctx, &err);
3250 break;
3251 }
3252 }
3253 if (err)
3254 goto out_vm;
3255
3256 if (args->extensions) {
3257 err = gem_create_user_extensions(xe, bo, args->extensions, 0);
3258 if (err)
3259 goto out_bulk;
3260 }
3261
3262 err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
3263 if (err)
3264 goto out_bulk;
3265
3266 args->handle = handle;
3267 goto out_put;
3268
3269 out_bulk:
3270 if (vm && !xe_vm_in_fault_mode(vm)) {
3271 xe_vm_lock(vm, false);
3272 __xe_bo_unset_bulk_move(bo);
3273 xe_vm_unlock(vm);
3274 }
3275 out_put:
3276 xe_bo_put(bo);
3277 out_vm:
3278 if (vm)
3279 xe_vm_put(vm);
3280
3281 return err;
3282 }
3283
xe_gem_mmap_offset_ioctl(struct drm_device * dev,void * data,struct drm_file * file)3284 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
3285 struct drm_file *file)
3286 {
3287 struct xe_device *xe = to_xe_device(dev);
3288 struct drm_xe_gem_mmap_offset *args = data;
3289 struct drm_gem_object *gem_obj;
3290
3291 if (XE_IOCTL_DBG(xe, args->extensions) ||
3292 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
3293 return -EINVAL;
3294
3295 if (XE_IOCTL_DBG(xe, args->flags &
3296 ~DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER))
3297 return -EINVAL;
3298
3299 if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) {
3300 if (XE_IOCTL_DBG(xe, !IS_DGFX(xe)))
3301 return -EINVAL;
3302
3303 if (XE_IOCTL_DBG(xe, args->handle))
3304 return -EINVAL;
3305
3306 if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K))
3307 return -EINVAL;
3308
3309 BUILD_BUG_ON(((XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT) +
3310 SZ_4K) >= DRM_FILE_PAGE_OFFSET_START);
3311 args->offset = XE_PCI_BARRIER_MMAP_OFFSET;
3312 return 0;
3313 }
3314
3315 gem_obj = drm_gem_object_lookup(file, args->handle);
3316 if (XE_IOCTL_DBG(xe, !gem_obj))
3317 return -ENOENT;
3318
3319 /* The mmap offset was set up at BO allocation time. */
3320 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
3321
3322 xe_bo_put(gem_to_xe_bo(gem_obj));
3323 return 0;
3324 }
3325
3326 /**
3327 * xe_bo_lock() - Lock the buffer object's dma_resv object
3328 * @bo: The struct xe_bo whose lock is to be taken
3329 * @intr: Whether to perform any wait interruptible
3330 *
3331 * Locks the buffer object's dma_resv object. If the buffer object is
3332 * pointing to a shared dma_resv object, that shared lock is locked.
3333 *
3334 * Return: 0 on success, -EINTR if @intr is true and the wait for a
3335 * contended lock was interrupted. If @intr is set to false, the
3336 * function always returns 0.
3337 */
xe_bo_lock(struct xe_bo * bo,bool intr)3338 int xe_bo_lock(struct xe_bo *bo, bool intr)
3339 {
3340 if (intr)
3341 return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL);
3342
3343 dma_resv_lock(bo->ttm.base.resv, NULL);
3344
3345 return 0;
3346 }
3347
3348 /**
3349 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
3350 * @bo: The struct xe_bo whose lock is to be released.
3351 *
3352 * Unlock a buffer object lock that was locked by xe_bo_lock().
3353 */
xe_bo_unlock(struct xe_bo * bo)3354 void xe_bo_unlock(struct xe_bo *bo)
3355 {
3356 dma_resv_unlock(bo->ttm.base.resv);
3357 }
3358
3359 /**
3360 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
3361 * @bo: The buffer object to migrate
3362 * @mem_type: The TTM memory type intended to migrate to
3363 *
3364 * Check whether the buffer object supports migration to the
3365 * given memory type. Note that pinning may affect the ability to migrate as
3366 * returned by this function.
3367 *
3368 * This function is primarily intended as a helper for checking the
3369 * possibility to migrate buffer objects and can be called without
3370 * the object lock held.
3371 *
3372 * Return: true if migration is possible, false otherwise.
3373 */
xe_bo_can_migrate(struct xe_bo * bo,u32 mem_type)3374 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
3375 {
3376 unsigned int cur_place;
3377
3378 if (bo->ttm.type == ttm_bo_type_kernel)
3379 return true;
3380
3381 if (bo->ttm.type == ttm_bo_type_sg)
3382 return false;
3383
3384 for (cur_place = 0; cur_place < bo->placement.num_placement;
3385 cur_place++) {
3386 if (bo->placements[cur_place].mem_type == mem_type)
3387 return true;
3388 }
3389
3390 return false;
3391 }
3392
xe_place_from_ttm_type(u32 mem_type,struct ttm_place * place)3393 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
3394 {
3395 memset(place, 0, sizeof(*place));
3396 place->mem_type = mem_type;
3397 }
3398
3399 /**
3400 * xe_bo_migrate - Migrate an object to the desired region id
3401 * @bo: The buffer object to migrate.
3402 * @mem_type: The TTM region type to migrate to.
3403 * @tctx: A pointer to a struct ttm_operation_ctx or NULL if
3404 * a default interruptibe ctx is to be used.
3405 * @exec: The drm_exec transaction to use for exhaustive eviction.
3406 *
3407 * Attempt to migrate the buffer object to the desired memory region. The
3408 * buffer object may not be pinned, and must be locked.
3409 * On successful completion, the object memory type will be updated,
3410 * but an async migration task may not have completed yet, and to
3411 * accomplish that, the object's kernel fences must be signaled with
3412 * the object lock held.
3413 *
3414 * Return: 0 on success. Negative error code on failure. In particular may
3415 * return -EINTR or -ERESTARTSYS if signal pending.
3416 */
xe_bo_migrate(struct xe_bo * bo,u32 mem_type,struct ttm_operation_ctx * tctx,struct drm_exec * exec)3417 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *tctx,
3418 struct drm_exec *exec)
3419 {
3420 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
3421 struct ttm_operation_ctx ctx = {
3422 .interruptible = true,
3423 .no_wait_gpu = false,
3424 .gfp_retry_mayfail = true,
3425 };
3426 struct ttm_placement placement;
3427 struct ttm_place requested;
3428
3429 xe_bo_assert_held(bo);
3430 tctx = tctx ? tctx : &ctx;
3431
3432 if (bo->ttm.resource->mem_type == mem_type)
3433 return 0;
3434
3435 if (xe_bo_is_pinned(bo))
3436 return -EBUSY;
3437
3438 if (!xe_bo_can_migrate(bo, mem_type))
3439 return -EINVAL;
3440
3441 xe_place_from_ttm_type(mem_type, &requested);
3442 placement.num_placement = 1;
3443 placement.placement = &requested;
3444
3445 /*
3446 * Stolen needs to be handled like below VRAM handling if we ever need
3447 * to support it.
3448 */
3449 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);
3450
3451 if (mem_type_is_vram(mem_type)) {
3452 u32 c = 0;
3453
3454 add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
3455 }
3456
3457 if (!tctx->no_wait_gpu)
3458 xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
3459 return ttm_bo_validate(&bo->ttm, &placement, tctx);
3460 }
3461
3462 /**
3463 * xe_bo_evict - Evict an object to evict placement
3464 * @bo: The buffer object to migrate.
3465 * @exec: The drm_exec transaction to use for exhaustive eviction.
3466 *
3467 * On successful completion, the object memory will be moved to evict
3468 * placement. This function blocks until the object has been fully moved.
3469 *
3470 * Return: 0 on success. Negative error code on failure.
3471 */
xe_bo_evict(struct xe_bo * bo,struct drm_exec * exec)3472 int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec)
3473 {
3474 struct ttm_operation_ctx ctx = {
3475 .interruptible = false,
3476 .no_wait_gpu = false,
3477 .gfp_retry_mayfail = true,
3478 };
3479 struct ttm_placement placement;
3480 int ret;
3481
3482 xe_evict_flags(&bo->ttm, &placement);
3483 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
3484 if (ret)
3485 return ret;
3486
3487 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
3488 false, MAX_SCHEDULE_TIMEOUT);
3489
3490 return 0;
3491 }
3492
3493 /**
3494 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
3495 * placed in system memory.
3496 * @bo: The xe_bo
3497 *
3498 * Return: true if extra pages need to be allocated, false otherwise.
3499 */
xe_bo_needs_ccs_pages(struct xe_bo * bo)3500 bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
3501 {
3502 struct xe_device *xe = xe_bo_device(bo);
3503
3504 if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))
3505 return false;
3506
3507 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device)
3508 return false;
3509
3510 /* On discrete GPUs, if the GPU can access this buffer from
3511 * system memory (i.e., it allows XE_PL_TT placement), FlatCCS
3512 * can't be used since there's no CCS storage associated with
3513 * non-VRAM addresses.
3514 */
3515 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
3516 return false;
3517
3518 /*
3519 * Compression implies coh_none, therefore we know for sure that WB
3520 * memory can't currently use compression, which is likely one of the
3521 * common cases.
3522 */
3523 if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)
3524 return false;
3525
3526 return true;
3527 }
3528
3529 /**
3530 * __xe_bo_release_dummy() - Dummy kref release function
3531 * @kref: The embedded struct kref.
3532 *
3533 * Dummy release function for xe_bo_put_deferred(). Keep off.
3534 */
__xe_bo_release_dummy(struct kref * kref)3535 void __xe_bo_release_dummy(struct kref *kref)
3536 {
3537 }
3538
3539 /**
3540 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
3541 * @deferred: The lockless list used for the call to xe_bo_put_deferred().
3542 *
3543 * Puts all bos whose put was deferred by xe_bo_put_deferred().
3544 * The @deferred list can be either an onstack local list or a global
3545 * shared list used by a workqueue.
3546 */
xe_bo_put_commit(struct llist_head * deferred)3547 void xe_bo_put_commit(struct llist_head *deferred)
3548 {
3549 struct llist_node *freed;
3550 struct xe_bo *bo, *next;
3551
3552 if (!deferred)
3553 return;
3554
3555 freed = llist_del_all(deferred);
3556 if (!freed)
3557 return;
3558
3559 llist_for_each_entry_safe(bo, next, freed, freed)
3560 drm_gem_object_free(&bo->ttm.base.refcount);
3561 }
3562
xe_bo_dev_work_func(struct work_struct * work)3563 static void xe_bo_dev_work_func(struct work_struct *work)
3564 {
3565 struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free);
3566
3567 xe_bo_put_commit(&bo_dev->async_list);
3568 }
3569
3570 /**
3571 * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing
3572 * @bo_dev: The BO dev structure
3573 */
xe_bo_dev_init(struct xe_bo_dev * bo_dev)3574 void xe_bo_dev_init(struct xe_bo_dev *bo_dev)
3575 {
3576 INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func);
3577 }
3578
3579 /**
3580 * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing
3581 * @bo_dev: The BO dev structure
3582 */
xe_bo_dev_fini(struct xe_bo_dev * bo_dev)3583 void xe_bo_dev_fini(struct xe_bo_dev *bo_dev)
3584 {
3585 flush_work(&bo_dev->async_free);
3586 }
3587
xe_bo_put(struct xe_bo * bo)3588 void xe_bo_put(struct xe_bo *bo)
3589 {
3590 struct xe_tile *tile;
3591 u8 id;
3592
3593 might_sleep();
3594 if (bo) {
3595 #ifdef CONFIG_PROC_FS
3596 if (bo->client)
3597 might_lock(&bo->client->bos_lock);
3598 #endif
3599 for_each_tile(tile, xe_bo_device(bo), id)
3600 if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
3601 xe_ggtt_might_lock(bo->ggtt_node[id]->ggtt);
3602 drm_gem_object_put(&bo->ttm.base);
3603 }
3604 }
3605
3606 /**
3607 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
3608 * @file_priv: ...
3609 * @dev: ...
3610 * @args: ...
3611 *
3612 * See dumb_create() hook in include/drm/drm_drv.h
3613 *
3614 * Return: ...
3615 */
xe_bo_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)3616 int xe_bo_dumb_create(struct drm_file *file_priv,
3617 struct drm_device *dev,
3618 struct drm_mode_create_dumb *args)
3619 {
3620 struct xe_device *xe = to_xe_device(dev);
3621 struct xe_bo *bo;
3622 uint32_t handle;
3623 int err;
3624 u32 page_size = max_t(u32, PAGE_SIZE,
3625 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
3626
3627 err = drm_mode_size_dumb(dev, args, SZ_64, page_size);
3628 if (err)
3629 return err;
3630
3631 bo = xe_bo_create_user(xe, NULL, args->size,
3632 DRM_XE_GEM_CPU_CACHING_WC,
3633 XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
3634 XE_BO_FLAG_SCANOUT |
3635 XE_BO_FLAG_NEEDS_CPU_ACCESS, NULL);
3636 if (IS_ERR(bo))
3637 return PTR_ERR(bo);
3638
3639 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
3640 /* drop reference from allocate - handle holds it now */
3641 drm_gem_object_put(&bo->ttm.base);
3642 if (!err)
3643 args->handle = handle;
3644 return err;
3645 }
3646
xe_bo_runtime_pm_release_mmap_offset(struct xe_bo * bo)3647 void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo)
3648 {
3649 struct ttm_buffer_object *tbo = &bo->ttm;
3650 struct ttm_device *bdev = tbo->bdev;
3651
3652 drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping);
3653
3654 list_del_init(&bo->vram_userfault_link);
3655 }
3656
3657 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
3658 #include "tests/xe_bo.c"
3659 #endif
3660