1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_bo.h"
7
8 #include <linux/dma-buf.h>
9 #include <linux/nospec.h>
10
11 #include <drm/drm_drv.h>
12 #include <drm/drm_gem_ttm_helper.h>
13 #include <drm/drm_managed.h>
14 #include <drm/ttm/ttm_backup.h>
15 #include <drm/ttm/ttm_device.h>
16 #include <drm/ttm/ttm_placement.h>
17 #include <drm/ttm/ttm_tt.h>
18 #include <uapi/drm/xe_drm.h>
19
20 #include <kunit/static_stub.h>
21
22 #include <trace/events/gpu_mem.h>
23
24 #include "xe_device.h"
25 #include "xe_dma_buf.h"
26 #include "xe_drm_client.h"
27 #include "xe_ggtt.h"
28 #include "xe_gt.h"
29 #include "xe_map.h"
30 #include "xe_migrate.h"
31 #include "xe_pm.h"
32 #include "xe_preempt_fence.h"
33 #include "xe_pxp.h"
34 #include "xe_res_cursor.h"
35 #include "xe_shrinker.h"
36 #include "xe_trace_bo.h"
37 #include "xe_ttm_stolen_mgr.h"
38 #include "xe_vm.h"
39
40 const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
41 [XE_PL_SYSTEM] = "system",
42 [XE_PL_TT] = "gtt",
43 [XE_PL_VRAM0] = "vram0",
44 [XE_PL_VRAM1] = "vram1",
45 [XE_PL_STOLEN] = "stolen"
46 };
47
48 static const struct ttm_place sys_placement_flags = {
49 .fpfn = 0,
50 .lpfn = 0,
51 .mem_type = XE_PL_SYSTEM,
52 .flags = 0,
53 };
54
55 static struct ttm_placement sys_placement = {
56 .num_placement = 1,
57 .placement = &sys_placement_flags,
58 };
59
60 static struct ttm_placement purge_placement;
61
62 static const struct ttm_place tt_placement_flags[] = {
63 {
64 .fpfn = 0,
65 .lpfn = 0,
66 .mem_type = XE_PL_TT,
67 .flags = TTM_PL_FLAG_DESIRED,
68 },
69 {
70 .fpfn = 0,
71 .lpfn = 0,
72 .mem_type = XE_PL_SYSTEM,
73 .flags = TTM_PL_FLAG_FALLBACK,
74 }
75 };
76
77 static struct ttm_placement tt_placement = {
78 .num_placement = 2,
79 .placement = tt_placement_flags,
80 };
81
mem_type_is_vram(u32 mem_type)82 bool mem_type_is_vram(u32 mem_type)
83 {
84 return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
85 }
86
resource_is_stolen_vram(struct xe_device * xe,struct ttm_resource * res)87 static bool resource_is_stolen_vram(struct xe_device *xe, struct ttm_resource *res)
88 {
89 return res->mem_type == XE_PL_STOLEN && IS_DGFX(xe);
90 }
91
resource_is_vram(struct ttm_resource * res)92 static bool resource_is_vram(struct ttm_resource *res)
93 {
94 return mem_type_is_vram(res->mem_type);
95 }
96
xe_bo_is_vram(struct xe_bo * bo)97 bool xe_bo_is_vram(struct xe_bo *bo)
98 {
99 return resource_is_vram(bo->ttm.resource) ||
100 resource_is_stolen_vram(xe_bo_device(bo), bo->ttm.resource);
101 }
102
xe_bo_is_stolen(struct xe_bo * bo)103 bool xe_bo_is_stolen(struct xe_bo *bo)
104 {
105 return bo->ttm.resource->mem_type == XE_PL_STOLEN;
106 }
107
108 /**
109 * xe_bo_has_single_placement - check if BO is placed only in one memory location
110 * @bo: The BO
111 *
112 * This function checks whether a given BO is placed in only one memory location.
113 *
114 * Returns: true if the BO is placed in a single memory location, false otherwise.
115 *
116 */
xe_bo_has_single_placement(struct xe_bo * bo)117 bool xe_bo_has_single_placement(struct xe_bo *bo)
118 {
119 return bo->placement.num_placement == 1;
120 }
121
122 /**
123 * xe_bo_is_stolen_devmem - check if BO is of stolen type accessed via PCI BAR
124 * @bo: The BO
125 *
126 * The stolen memory is accessed through the PCI BAR for both DGFX and some
127 * integrated platforms that have a dedicated bit in the PTE for devmem (DM).
128 *
129 * Returns: true if it's stolen memory accessed via PCI BAR, false otherwise.
130 */
xe_bo_is_stolen_devmem(struct xe_bo * bo)131 bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
132 {
133 return xe_bo_is_stolen(bo) &&
134 GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
135 }
136
137 /**
138 * xe_bo_is_vm_bound - check if BO has any mappings through VM_BIND
139 * @bo: The BO
140 *
141 * Check if a given bo is bound through VM_BIND. This requires the
142 * reservation lock for the BO to be held.
143 *
144 * Returns: boolean
145 */
xe_bo_is_vm_bound(struct xe_bo * bo)146 bool xe_bo_is_vm_bound(struct xe_bo *bo)
147 {
148 xe_bo_assert_held(bo);
149
150 return !list_empty(&bo->ttm.base.gpuva.list);
151 }
152
xe_bo_is_user(struct xe_bo * bo)153 static bool xe_bo_is_user(struct xe_bo *bo)
154 {
155 return bo->flags & XE_BO_FLAG_USER;
156 }
157
158 static struct xe_migrate *
mem_type_to_migrate(struct xe_device * xe,u32 mem_type)159 mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
160 {
161 struct xe_tile *tile;
162
163 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
164 tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
165 return tile->migrate;
166 }
167
res_to_mem_region(struct ttm_resource * res)168 static struct xe_vram_region *res_to_mem_region(struct ttm_resource *res)
169 {
170 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
171 struct ttm_resource_manager *mgr;
172 struct xe_ttm_vram_mgr *vram_mgr;
173
174 xe_assert(xe, resource_is_vram(res));
175 mgr = ttm_manager_type(&xe->ttm, res->mem_type);
176 vram_mgr = to_xe_ttm_vram_mgr(mgr);
177
178 return container_of(vram_mgr, struct xe_vram_region, ttm);
179 }
180
try_add_system(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,u32 * c)181 static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
182 u32 bo_flags, u32 *c)
183 {
184 if (bo_flags & XE_BO_FLAG_SYSTEM) {
185 xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
186
187 bo->placements[*c] = (struct ttm_place) {
188 .mem_type = XE_PL_TT,
189 };
190 *c += 1;
191 }
192 }
193
force_contiguous(u32 bo_flags)194 static bool force_contiguous(u32 bo_flags)
195 {
196 if (bo_flags & XE_BO_FLAG_STOLEN)
197 return true; /* users expect this */
198 else if (bo_flags & XE_BO_FLAG_PINNED &&
199 !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
200 return true; /* needs vmap */
201
202 /*
203 * For eviction / restore on suspend / resume objects pinned in VRAM
204 * must be contiguous, also only contiguous BOs support xe_bo_vmap.
205 */
206 return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS &&
207 bo_flags & XE_BO_FLAG_PINNED;
208 }
209
add_vram(struct xe_device * xe,struct xe_bo * bo,struct ttm_place * places,u32 bo_flags,u32 mem_type,u32 * c)210 static void add_vram(struct xe_device *xe, struct xe_bo *bo,
211 struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
212 {
213 struct ttm_place place = { .mem_type = mem_type };
214 struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type);
215 struct xe_ttm_vram_mgr *vram_mgr = to_xe_ttm_vram_mgr(mgr);
216
217 struct xe_vram_region *vram;
218 u64 io_size;
219
220 xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
221
222 vram = container_of(vram_mgr, struct xe_vram_region, ttm);
223 xe_assert(xe, vram && vram->usable_size);
224 io_size = vram->io_size;
225
226 if (force_contiguous(bo_flags))
227 place.flags |= TTM_PL_FLAG_CONTIGUOUS;
228
229 if (io_size < vram->usable_size) {
230 if (bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) {
231 place.fpfn = 0;
232 place.lpfn = io_size >> PAGE_SHIFT;
233 } else {
234 place.flags |= TTM_PL_FLAG_TOPDOWN;
235 }
236 }
237 places[*c] = place;
238 *c += 1;
239 }
240
try_add_vram(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,u32 * c)241 static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
242 u32 bo_flags, u32 *c)
243 {
244 if (bo_flags & XE_BO_FLAG_VRAM0)
245 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
246 if (bo_flags & XE_BO_FLAG_VRAM1)
247 add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
248 }
249
try_add_stolen(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags,u32 * c)250 static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
251 u32 bo_flags, u32 *c)
252 {
253 if (bo_flags & XE_BO_FLAG_STOLEN) {
254 xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
255
256 bo->placements[*c] = (struct ttm_place) {
257 .mem_type = XE_PL_STOLEN,
258 .flags = force_contiguous(bo_flags) ?
259 TTM_PL_FLAG_CONTIGUOUS : 0,
260 };
261 *c += 1;
262 }
263 }
264
__xe_bo_placement_for_flags(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags)265 static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
266 u32 bo_flags)
267 {
268 u32 c = 0;
269
270 try_add_vram(xe, bo, bo_flags, &c);
271 try_add_system(xe, bo, bo_flags, &c);
272 try_add_stolen(xe, bo, bo_flags, &c);
273
274 if (!c)
275 return -EINVAL;
276
277 bo->placement = (struct ttm_placement) {
278 .num_placement = c,
279 .placement = bo->placements,
280 };
281
282 return 0;
283 }
284
xe_bo_placement_for_flags(struct xe_device * xe,struct xe_bo * bo,u32 bo_flags)285 int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
286 u32 bo_flags)
287 {
288 xe_bo_assert_held(bo);
289 return __xe_bo_placement_for_flags(xe, bo, bo_flags);
290 }
291
xe_evict_flags(struct ttm_buffer_object * tbo,struct ttm_placement * placement)292 static void xe_evict_flags(struct ttm_buffer_object *tbo,
293 struct ttm_placement *placement)
294 {
295 struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm);
296 bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
297 struct xe_bo *bo;
298
299 if (!xe_bo_is_xe_bo(tbo)) {
300 /* Don't handle scatter gather BOs */
301 if (tbo->type == ttm_bo_type_sg) {
302 placement->num_placement = 0;
303 return;
304 }
305
306 *placement = device_unplugged ? purge_placement : sys_placement;
307 return;
308 }
309
310 bo = ttm_to_xe_bo(tbo);
311 if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) {
312 *placement = sys_placement;
313 return;
314 }
315
316 if (device_unplugged && !tbo->base.dma_buf) {
317 *placement = purge_placement;
318 return;
319 }
320
321 /*
322 * For xe, sg bos that are evicted to system just triggers a
323 * rebind of the sg list upon subsequent validation to XE_PL_TT.
324 */
325 switch (tbo->resource->mem_type) {
326 case XE_PL_VRAM0:
327 case XE_PL_VRAM1:
328 case XE_PL_STOLEN:
329 *placement = tt_placement;
330 break;
331 case XE_PL_TT:
332 default:
333 *placement = sys_placement;
334 break;
335 }
336 }
337
338 /* struct xe_ttm_tt - Subclassed ttm_tt for xe */
339 struct xe_ttm_tt {
340 struct ttm_tt ttm;
341 struct sg_table sgt;
342 struct sg_table *sg;
343 /** @purgeable: Whether the content of the pages of @ttm is purgeable. */
344 bool purgeable;
345 };
346
xe_tt_map_sg(struct xe_device * xe,struct ttm_tt * tt)347 static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt)
348 {
349 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
350 unsigned long num_pages = tt->num_pages;
351 int ret;
352
353 XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
354 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE));
355
356 if (xe_tt->sg)
357 return 0;
358
359 ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
360 num_pages, 0,
361 (u64)num_pages << PAGE_SHIFT,
362 xe_sg_segment_size(xe->drm.dev),
363 GFP_KERNEL);
364 if (ret)
365 return ret;
366
367 xe_tt->sg = &xe_tt->sgt;
368 ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
369 DMA_ATTR_SKIP_CPU_SYNC);
370 if (ret) {
371 sg_free_table(xe_tt->sg);
372 xe_tt->sg = NULL;
373 return ret;
374 }
375
376 return 0;
377 }
378
xe_tt_unmap_sg(struct xe_device * xe,struct ttm_tt * tt)379 static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt)
380 {
381 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
382
383 if (xe_tt->sg) {
384 dma_unmap_sgtable(xe->drm.dev, xe_tt->sg,
385 DMA_BIDIRECTIONAL, 0);
386 sg_free_table(xe_tt->sg);
387 xe_tt->sg = NULL;
388 }
389 }
390
xe_bo_sg(struct xe_bo * bo)391 struct sg_table *xe_bo_sg(struct xe_bo *bo)
392 {
393 struct ttm_tt *tt = bo->ttm.ttm;
394 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
395
396 return xe_tt->sg;
397 }
398
399 /*
400 * Account ttm pages against the device shrinker's shrinkable and
401 * purgeable counts.
402 */
xe_ttm_tt_account_add(struct xe_device * xe,struct ttm_tt * tt)403 static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt)
404 {
405 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
406
407 if (xe_tt->purgeable)
408 xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages);
409 else
410 xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0);
411 }
412
xe_ttm_tt_account_subtract(struct xe_device * xe,struct ttm_tt * tt)413 static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt)
414 {
415 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
416
417 if (xe_tt->purgeable)
418 xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages);
419 else
420 xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0);
421 }
422
update_global_total_pages(struct ttm_device * ttm_dev,long num_pages)423 static void update_global_total_pages(struct ttm_device *ttm_dev,
424 long num_pages)
425 {
426 #if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
427 struct xe_device *xe = ttm_to_xe_device(ttm_dev);
428 u64 global_total_pages =
429 atomic64_add_return(num_pages, &xe->global_total_pages);
430
431 trace_gpu_mem_total(xe->drm.primary->index, 0,
432 global_total_pages << PAGE_SHIFT);
433 #endif
434 }
435
xe_ttm_tt_create(struct ttm_buffer_object * ttm_bo,u32 page_flags)436 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
437 u32 page_flags)
438 {
439 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
440 struct xe_device *xe = xe_bo_device(bo);
441 struct xe_ttm_tt *xe_tt;
442 struct ttm_tt *tt;
443 unsigned long extra_pages;
444 enum ttm_caching caching = ttm_cached;
445 int err;
446
447 xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL);
448 if (!xe_tt)
449 return NULL;
450
451 tt = &xe_tt->ttm;
452
453 extra_pages = 0;
454 if (xe_bo_needs_ccs_pages(bo))
455 extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)),
456 PAGE_SIZE);
457
458 /*
459 * DGFX system memory is always WB / ttm_cached, since
460 * other caching modes are only supported on x86. DGFX
461 * GPU system memory accesses are always coherent with the
462 * CPU.
463 */
464 if (!IS_DGFX(xe)) {
465 switch (bo->cpu_caching) {
466 case DRM_XE_GEM_CPU_CACHING_WC:
467 caching = ttm_write_combined;
468 break;
469 default:
470 caching = ttm_cached;
471 break;
472 }
473
474 WARN_ON((bo->flags & XE_BO_FLAG_USER) && !bo->cpu_caching);
475
476 /*
477 * Display scanout is always non-coherent with the CPU cache.
478 *
479 * For Xe_LPG and beyond, PPGTT PTE lookups are also
480 * non-coherent and require a CPU:WC mapping.
481 */
482 if ((!bo->cpu_caching && bo->flags & XE_BO_FLAG_SCANOUT) ||
483 (xe->info.graphics_verx100 >= 1270 &&
484 bo->flags & XE_BO_FLAG_PAGETABLE))
485 caching = ttm_write_combined;
486 }
487
488 if (bo->flags & XE_BO_FLAG_NEEDS_UC) {
489 /*
490 * Valid only for internally-created buffers only, for
491 * which cpu_caching is never initialized.
492 */
493 xe_assert(xe, bo->cpu_caching == 0);
494 caching = ttm_uncached;
495 }
496
497 if (ttm_bo->type != ttm_bo_type_sg)
498 page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
499
500 err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages);
501 if (err) {
502 kfree(xe_tt);
503 return NULL;
504 }
505
506 if (ttm_bo->type != ttm_bo_type_sg) {
507 err = ttm_tt_setup_backup(tt);
508 if (err) {
509 ttm_tt_fini(tt);
510 kfree(xe_tt);
511 return NULL;
512 }
513 }
514
515 return tt;
516 }
517
xe_ttm_tt_populate(struct ttm_device * ttm_dev,struct ttm_tt * tt,struct ttm_operation_ctx * ctx)518 static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
519 struct ttm_operation_ctx *ctx)
520 {
521 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
522 int err;
523
524 /*
525 * dma-bufs are not populated with pages, and the dma-
526 * addresses are set up when moved to XE_PL_TT.
527 */
528 if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
529 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
530 return 0;
531
532 if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) {
533 err = ttm_tt_restore(ttm_dev, tt, ctx);
534 } else {
535 ttm_tt_clear_backed_up(tt);
536 err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
537 }
538 if (err)
539 return err;
540
541 xe_tt->purgeable = false;
542 xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt);
543 update_global_total_pages(ttm_dev, tt->num_pages);
544
545 return 0;
546 }
547
xe_ttm_tt_unpopulate(struct ttm_device * ttm_dev,struct ttm_tt * tt)548 static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
549 {
550 struct xe_device *xe = ttm_to_xe_device(ttm_dev);
551
552 if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
553 !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
554 return;
555
556 xe_tt_unmap_sg(xe, tt);
557
558 ttm_pool_free(&ttm_dev->pool, tt);
559 xe_ttm_tt_account_subtract(xe, tt);
560 update_global_total_pages(ttm_dev, -(long)tt->num_pages);
561 }
562
xe_ttm_tt_destroy(struct ttm_device * ttm_dev,struct ttm_tt * tt)563 static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
564 {
565 ttm_tt_fini(tt);
566 kfree(tt);
567 }
568
xe_ttm_resource_visible(struct ttm_resource * mem)569 static bool xe_ttm_resource_visible(struct ttm_resource *mem)
570 {
571 struct xe_ttm_vram_mgr_resource *vres =
572 to_xe_ttm_vram_mgr_resource(mem);
573
574 return vres->used_visible_size == mem->size;
575 }
576
xe_ttm_io_mem_reserve(struct ttm_device * bdev,struct ttm_resource * mem)577 static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
578 struct ttm_resource *mem)
579 {
580 struct xe_device *xe = ttm_to_xe_device(bdev);
581
582 switch (mem->mem_type) {
583 case XE_PL_SYSTEM:
584 case XE_PL_TT:
585 return 0;
586 case XE_PL_VRAM0:
587 case XE_PL_VRAM1: {
588 struct xe_vram_region *vram = res_to_mem_region(mem);
589
590 if (!xe_ttm_resource_visible(mem))
591 return -EINVAL;
592
593 mem->bus.offset = mem->start << PAGE_SHIFT;
594
595 if (vram->mapping &&
596 mem->placement & TTM_PL_FLAG_CONTIGUOUS)
597 mem->bus.addr = (u8 __force *)vram->mapping +
598 mem->bus.offset;
599
600 mem->bus.offset += vram->io_start;
601 mem->bus.is_iomem = true;
602
603 #if !IS_ENABLED(CONFIG_X86)
604 mem->bus.caching = ttm_write_combined;
605 #endif
606 return 0;
607 } case XE_PL_STOLEN:
608 return xe_ttm_stolen_io_mem_reserve(xe, mem);
609 default:
610 return -EINVAL;
611 }
612 }
613
xe_bo_trigger_rebind(struct xe_device * xe,struct xe_bo * bo,const struct ttm_operation_ctx * ctx)614 static int xe_bo_trigger_rebind(struct xe_device *xe, struct xe_bo *bo,
615 const struct ttm_operation_ctx *ctx)
616 {
617 struct dma_resv_iter cursor;
618 struct dma_fence *fence;
619 struct drm_gem_object *obj = &bo->ttm.base;
620 struct drm_gpuvm_bo *vm_bo;
621 bool idle = false;
622 int ret = 0;
623
624 dma_resv_assert_held(bo->ttm.base.resv);
625
626 if (!list_empty(&bo->ttm.base.gpuva.list)) {
627 dma_resv_iter_begin(&cursor, bo->ttm.base.resv,
628 DMA_RESV_USAGE_BOOKKEEP);
629 dma_resv_for_each_fence_unlocked(&cursor, fence)
630 dma_fence_enable_sw_signaling(fence);
631 dma_resv_iter_end(&cursor);
632 }
633
634 drm_gem_for_each_gpuvm_bo(vm_bo, obj) {
635 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
636 struct drm_gpuva *gpuva;
637
638 if (!xe_vm_in_fault_mode(vm)) {
639 drm_gpuvm_bo_evict(vm_bo, true);
640 continue;
641 }
642
643 if (!idle) {
644 long timeout;
645
646 if (ctx->no_wait_gpu &&
647 !dma_resv_test_signaled(bo->ttm.base.resv,
648 DMA_RESV_USAGE_BOOKKEEP))
649 return -EBUSY;
650
651 timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
652 DMA_RESV_USAGE_BOOKKEEP,
653 ctx->interruptible,
654 MAX_SCHEDULE_TIMEOUT);
655 if (!timeout)
656 return -ETIME;
657 if (timeout < 0)
658 return timeout;
659
660 idle = true;
661 }
662
663 drm_gpuvm_bo_for_each_va(gpuva, vm_bo) {
664 struct xe_vma *vma = gpuva_to_vma(gpuva);
665
666 trace_xe_vma_evict(vma);
667 ret = xe_vm_invalidate_vma(vma);
668 if (XE_WARN_ON(ret))
669 return ret;
670 }
671 }
672
673 return ret;
674 }
675
676 /*
677 * The dma-buf map_attachment() / unmap_attachment() is hooked up here.
678 * Note that unmapping the attachment is deferred to the next
679 * map_attachment time, or to bo destroy (after idling) whichever comes first.
680 * This is to avoid syncing before unmap_attachment(), assuming that the
681 * caller relies on idling the reservation object before moving the
682 * backing store out. Should that assumption not hold, then we will be able
683 * to unconditionally call unmap_attachment() when moving out to system.
684 */
xe_bo_move_dmabuf(struct ttm_buffer_object * ttm_bo,struct ttm_resource * new_res)685 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
686 struct ttm_resource *new_res)
687 {
688 struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
689 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
690 ttm);
691 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
692 bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
693 struct sg_table *sg;
694
695 xe_assert(xe, attach);
696 xe_assert(xe, ttm_bo->ttm);
697
698 if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM &&
699 ttm_bo->sg) {
700 dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
701 false, MAX_SCHEDULE_TIMEOUT);
702 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
703 ttm_bo->sg = NULL;
704 }
705
706 if (new_res->mem_type == XE_PL_SYSTEM)
707 goto out;
708
709 if (ttm_bo->sg) {
710 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
711 ttm_bo->sg = NULL;
712 }
713
714 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
715 if (IS_ERR(sg))
716 return PTR_ERR(sg);
717
718 ttm_bo->sg = sg;
719 xe_tt->sg = sg;
720
721 out:
722 ttm_bo_move_null(ttm_bo, new_res);
723
724 return 0;
725 }
726
727 /**
728 * xe_bo_move_notify - Notify subsystems of a pending move
729 * @bo: The buffer object
730 * @ctx: The struct ttm_operation_ctx controlling locking and waits.
731 *
732 * This function notifies subsystems of an upcoming buffer move.
733 * Upon receiving such a notification, subsystems should schedule
734 * halting access to the underlying pages and optionally add a fence
735 * to the buffer object's dma_resv object, that signals when access is
736 * stopped. The caller will wait on all dma_resv fences before
737 * starting the move.
738 *
739 * A subsystem may commence access to the object after obtaining
740 * bindings to the new backing memory under the object lock.
741 *
742 * Return: 0 on success, -EINTR or -ERESTARTSYS if interrupted in fault mode,
743 * negative error code on error.
744 */
xe_bo_move_notify(struct xe_bo * bo,const struct ttm_operation_ctx * ctx)745 static int xe_bo_move_notify(struct xe_bo *bo,
746 const struct ttm_operation_ctx *ctx)
747 {
748 struct ttm_buffer_object *ttm_bo = &bo->ttm;
749 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
750 struct ttm_resource *old_mem = ttm_bo->resource;
751 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
752 int ret;
753
754 /*
755 * If this starts to call into many components, consider
756 * using a notification chain here.
757 */
758
759 if (xe_bo_is_pinned(bo))
760 return -EINVAL;
761
762 xe_bo_vunmap(bo);
763 ret = xe_bo_trigger_rebind(xe, bo, ctx);
764 if (ret)
765 return ret;
766
767 /* Don't call move_notify() for imported dma-bufs. */
768 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
769 dma_buf_move_notify(ttm_bo->base.dma_buf);
770
771 /*
772 * TTM has already nuked the mmap for us (see ttm_bo_unmap_virtual),
773 * so if we moved from VRAM make sure to unlink this from the userfault
774 * tracking.
775 */
776 if (mem_type_is_vram(old_mem_type)) {
777 mutex_lock(&xe->mem_access.vram_userfault.lock);
778 if (!list_empty(&bo->vram_userfault_link))
779 list_del_init(&bo->vram_userfault_link);
780 mutex_unlock(&xe->mem_access.vram_userfault.lock);
781 }
782
783 return 0;
784 }
785
xe_bo_move(struct ttm_buffer_object * ttm_bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_resource * new_mem,struct ttm_place * hop)786 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
787 struct ttm_operation_ctx *ctx,
788 struct ttm_resource *new_mem,
789 struct ttm_place *hop)
790 {
791 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
792 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
793 struct ttm_resource *old_mem = ttm_bo->resource;
794 u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
795 struct ttm_tt *ttm = ttm_bo->ttm;
796 struct xe_migrate *migrate = NULL;
797 struct dma_fence *fence;
798 bool move_lacks_source;
799 bool tt_has_data;
800 bool needs_clear;
801 bool handle_system_ccs = (!IS_DGFX(xe) && xe_bo_needs_ccs_pages(bo) &&
802 ttm && ttm_tt_is_populated(ttm)) ? true : false;
803 int ret = 0;
804
805 /* Bo creation path, moving to system or TT. */
806 if ((!old_mem && ttm) && !handle_system_ccs) {
807 if (new_mem->mem_type == XE_PL_TT)
808 ret = xe_tt_map_sg(xe, ttm);
809 if (!ret)
810 ttm_bo_move_null(ttm_bo, new_mem);
811 goto out;
812 }
813
814 if (ttm_bo->type == ttm_bo_type_sg) {
815 ret = xe_bo_move_notify(bo, ctx);
816 if (!ret)
817 ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
818 return ret;
819 }
820
821 tt_has_data = ttm && (ttm_tt_is_populated(ttm) ||
822 (ttm->page_flags & TTM_TT_FLAG_SWAPPED));
823
824 move_lacks_source = !old_mem || (handle_system_ccs ? (!bo->ccs_cleared) :
825 (!mem_type_is_vram(old_mem_type) && !tt_has_data));
826
827 needs_clear = (ttm && ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC) ||
828 (!ttm && ttm_bo->type == ttm_bo_type_device);
829
830 if (new_mem->mem_type == XE_PL_TT) {
831 ret = xe_tt_map_sg(xe, ttm);
832 if (ret)
833 goto out;
834 }
835
836 if ((move_lacks_source && !needs_clear)) {
837 ttm_bo_move_null(ttm_bo, new_mem);
838 goto out;
839 }
840
841 if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) &&
842 new_mem->mem_type == XE_PL_SYSTEM) {
843 ret = xe_svm_bo_evict(bo);
844 if (!ret) {
845 drm_dbg(&xe->drm, "Evict system allocator BO success\n");
846 ttm_bo_move_null(ttm_bo, new_mem);
847 } else {
848 drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n",
849 ERR_PTR(ret));
850 }
851
852 goto out;
853 }
854
855 if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
856 ttm_bo_move_null(ttm_bo, new_mem);
857 goto out;
858 }
859
860 /*
861 * Failed multi-hop where the old_mem is still marked as
862 * TTM_PL_FLAG_TEMPORARY, should just be a dummy move.
863 */
864 if (old_mem_type == XE_PL_TT &&
865 new_mem->mem_type == XE_PL_TT) {
866 ttm_bo_move_null(ttm_bo, new_mem);
867 goto out;
868 }
869
870 if (!move_lacks_source && !xe_bo_is_pinned(bo)) {
871 ret = xe_bo_move_notify(bo, ctx);
872 if (ret)
873 goto out;
874 }
875
876 if (old_mem_type == XE_PL_TT &&
877 new_mem->mem_type == XE_PL_SYSTEM) {
878 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
879 DMA_RESV_USAGE_BOOKKEEP,
880 false,
881 MAX_SCHEDULE_TIMEOUT);
882 if (timeout < 0) {
883 ret = timeout;
884 goto out;
885 }
886
887 if (!handle_system_ccs) {
888 ttm_bo_move_null(ttm_bo, new_mem);
889 goto out;
890 }
891 }
892
893 if (!move_lacks_source &&
894 ((old_mem_type == XE_PL_SYSTEM && resource_is_vram(new_mem)) ||
895 (mem_type_is_vram(old_mem_type) &&
896 new_mem->mem_type == XE_PL_SYSTEM))) {
897 hop->fpfn = 0;
898 hop->lpfn = 0;
899 hop->mem_type = XE_PL_TT;
900 hop->flags = TTM_PL_FLAG_TEMPORARY;
901 ret = -EMULTIHOP;
902 goto out;
903 }
904
905 if (bo->tile)
906 migrate = bo->tile->migrate;
907 else if (resource_is_vram(new_mem))
908 migrate = mem_type_to_migrate(xe, new_mem->mem_type);
909 else if (mem_type_is_vram(old_mem_type))
910 migrate = mem_type_to_migrate(xe, old_mem_type);
911 else
912 migrate = xe->tiles[0].migrate;
913
914 xe_assert(xe, migrate);
915 trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
916 if (xe_rpm_reclaim_safe(xe)) {
917 /*
918 * We might be called through swapout in the validation path of
919 * another TTM device, so acquire rpm here.
920 */
921 xe_pm_runtime_get(xe);
922 } else {
923 drm_WARN_ON(&xe->drm, handle_system_ccs);
924 xe_pm_runtime_get_noresume(xe);
925 }
926
927 if (move_lacks_source) {
928 u32 flags = 0;
929
930 if (mem_type_is_vram(new_mem->mem_type))
931 flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
932 else if (handle_system_ccs)
933 flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
934
935 fence = xe_migrate_clear(migrate, bo, new_mem, flags);
936 } else {
937 fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem,
938 handle_system_ccs);
939 }
940 if (IS_ERR(fence)) {
941 ret = PTR_ERR(fence);
942 xe_pm_runtime_put(xe);
943 goto out;
944 }
945 if (!move_lacks_source) {
946 ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
947 new_mem);
948 if (ret) {
949 dma_fence_wait(fence, false);
950 ttm_bo_move_null(ttm_bo, new_mem);
951 ret = 0;
952 }
953 } else {
954 /*
955 * ttm_bo_move_accel_cleanup() may blow up if
956 * bo->resource == NULL, so just attach the
957 * fence and set the new resource.
958 */
959 dma_resv_add_fence(ttm_bo->base.resv, fence,
960 DMA_RESV_USAGE_KERNEL);
961 ttm_bo_move_null(ttm_bo, new_mem);
962 }
963
964 dma_fence_put(fence);
965 xe_pm_runtime_put(xe);
966
967 out:
968 if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
969 ttm_bo->ttm) {
970 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
971 DMA_RESV_USAGE_KERNEL,
972 false,
973 MAX_SCHEDULE_TIMEOUT);
974 if (timeout < 0)
975 ret = timeout;
976
977 xe_tt_unmap_sg(xe, ttm_bo->ttm);
978 }
979
980 return ret;
981 }
982
xe_bo_shrink_purge(struct ttm_operation_ctx * ctx,struct ttm_buffer_object * bo,unsigned long * scanned)983 static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
984 struct ttm_buffer_object *bo,
985 unsigned long *scanned)
986 {
987 struct xe_device *xe = ttm_to_xe_device(bo->bdev);
988 long lret;
989
990 /* Fake move to system, without copying data. */
991 if (bo->resource->mem_type != XE_PL_SYSTEM) {
992 struct ttm_resource *new_resource;
993
994 lret = ttm_bo_wait_ctx(bo, ctx);
995 if (lret)
996 return lret;
997
998 lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx);
999 if (lret)
1000 return lret;
1001
1002 xe_tt_unmap_sg(xe, bo->ttm);
1003 ttm_bo_move_null(bo, new_resource);
1004 }
1005
1006 *scanned += bo->ttm->num_pages;
1007 lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
1008 {.purge = true,
1009 .writeback = false,
1010 .allow_move = false});
1011
1012 if (lret > 0)
1013 xe_ttm_tt_account_subtract(xe, bo->ttm);
1014
1015 return lret;
1016 }
1017
1018 static bool
xe_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)1019 xe_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place)
1020 {
1021 struct drm_gpuvm_bo *vm_bo;
1022
1023 if (!ttm_bo_eviction_valuable(bo, place))
1024 return false;
1025
1026 if (!xe_bo_is_xe_bo(bo))
1027 return true;
1028
1029 drm_gem_for_each_gpuvm_bo(vm_bo, &bo->base) {
1030 if (xe_vm_is_validating(gpuvm_to_vm(vm_bo->vm)))
1031 return false;
1032 }
1033
1034 return true;
1035 }
1036
1037 /**
1038 * xe_bo_shrink() - Try to shrink an xe bo.
1039 * @ctx: The struct ttm_operation_ctx used for shrinking.
1040 * @bo: The TTM buffer object whose pages to shrink.
1041 * @flags: Flags governing the shrink behaviour.
1042 * @scanned: Pointer to a counter of the number of pages
1043 * attempted to shrink.
1044 *
1045 * Try to shrink- or purge a bo, and if it succeeds, unmap dma.
1046 * Note that we need to be able to handle also non xe bos
1047 * (ghost bos), but only if the struct ttm_tt is embedded in
1048 * a struct xe_ttm_tt. When the function attempts to shrink
1049 * the pages of a buffer object, The value pointed to by @scanned
1050 * is updated.
1051 *
1052 * Return: The number of pages shrunken or purged, or negative error
1053 * code on failure.
1054 */
xe_bo_shrink(struct ttm_operation_ctx * ctx,struct ttm_buffer_object * bo,const struct xe_bo_shrink_flags flags,unsigned long * scanned)1055 long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
1056 const struct xe_bo_shrink_flags flags,
1057 unsigned long *scanned)
1058 {
1059 struct ttm_tt *tt = bo->ttm;
1060 struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
1061 struct ttm_place place = {.mem_type = bo->resource->mem_type};
1062 struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
1063 struct xe_device *xe = ttm_to_xe_device(bo->bdev);
1064 bool needs_rpm;
1065 long lret = 0L;
1066
1067 if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) ||
1068 (flags.purge && !xe_tt->purgeable))
1069 return -EBUSY;
1070
1071 if (!xe_bo_eviction_valuable(bo, &place))
1072 return -EBUSY;
1073
1074 if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo))
1075 return xe_bo_shrink_purge(ctx, bo, scanned);
1076
1077 if (xe_tt->purgeable) {
1078 if (bo->resource->mem_type != XE_PL_SYSTEM)
1079 lret = xe_bo_move_notify(xe_bo, ctx);
1080 if (!lret)
1081 lret = xe_bo_shrink_purge(ctx, bo, scanned);
1082 goto out_unref;
1083 }
1084
1085 /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */
1086 needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM &&
1087 xe_bo_needs_ccs_pages(xe_bo));
1088 if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
1089 goto out_unref;
1090
1091 *scanned += tt->num_pages;
1092 lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
1093 {.purge = false,
1094 .writeback = flags.writeback,
1095 .allow_move = true});
1096 if (needs_rpm)
1097 xe_pm_runtime_put(xe);
1098
1099 if (lret > 0)
1100 xe_ttm_tt_account_subtract(xe, tt);
1101
1102 out_unref:
1103 xe_bo_put(xe_bo);
1104
1105 return lret;
1106 }
1107
1108 /**
1109 * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
1110 * up in system memory.
1111 * @bo: The buffer object to prepare.
1112 *
1113 * On successful completion, the object backup pages are allocated. Expectation
1114 * is that this is called from the PM notifier, prior to suspend/hibernation.
1115 *
1116 * Return: 0 on success. Negative error code on failure.
1117 */
xe_bo_notifier_prepare_pinned(struct xe_bo * bo)1118 int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
1119 {
1120 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1121 struct xe_bo *backup;
1122 int ret = 0;
1123
1124 xe_bo_lock(bo, false);
1125
1126 xe_assert(xe, !bo->backup_obj);
1127
1128 /*
1129 * Since this is called from the PM notifier we might have raced with
1130 * someone unpinning this after we dropped the pinned list lock and
1131 * grabbing the above bo lock.
1132 */
1133 if (!xe_bo_is_pinned(bo))
1134 goto out_unlock_bo;
1135
1136 if (!xe_bo_is_vram(bo))
1137 goto out_unlock_bo;
1138
1139 if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1140 goto out_unlock_bo;
1141
1142 backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
1143 DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1144 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1145 XE_BO_FLAG_PINNED);
1146 if (IS_ERR(backup)) {
1147 ret = PTR_ERR(backup);
1148 goto out_unlock_bo;
1149 }
1150
1151 backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1152 ttm_bo_pin(&backup->ttm);
1153 bo->backup_obj = backup;
1154
1155 out_unlock_bo:
1156 xe_bo_unlock(bo);
1157 return ret;
1158 }
1159
1160 /**
1161 * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
1162 * @bo: The buffer object to undo the prepare for.
1163 *
1164 * Always returns 0. The backup object is removed, if still present. Expectation
1165 * it that this called from the PM notifier when undoing the prepare step.
1166 *
1167 * Return: Always returns 0.
1168 */
xe_bo_notifier_unprepare_pinned(struct xe_bo * bo)1169 int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
1170 {
1171 xe_bo_lock(bo, false);
1172 if (bo->backup_obj) {
1173 ttm_bo_unpin(&bo->backup_obj->ttm);
1174 xe_bo_put(bo->backup_obj);
1175 bo->backup_obj = NULL;
1176 }
1177 xe_bo_unlock(bo);
1178
1179 return 0;
1180 }
1181
1182 /**
1183 * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
1184 * @bo: The buffer object to move.
1185 *
1186 * On successful completion, the object memory will be moved to system memory.
1187 *
1188 * This is needed to for special handling of pinned VRAM object during
1189 * suspend-resume.
1190 *
1191 * Return: 0 on success. Negative error code on failure.
1192 */
xe_bo_evict_pinned(struct xe_bo * bo)1193 int xe_bo_evict_pinned(struct xe_bo *bo)
1194 {
1195 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1196 struct xe_bo *backup = bo->backup_obj;
1197 bool backup_created = false;
1198 bool unmap = false;
1199 int ret = 0;
1200
1201 xe_bo_lock(bo, false);
1202
1203 if (WARN_ON(!bo->ttm.resource)) {
1204 ret = -EINVAL;
1205 goto out_unlock_bo;
1206 }
1207
1208 if (WARN_ON(!xe_bo_is_pinned(bo))) {
1209 ret = -EINVAL;
1210 goto out_unlock_bo;
1211 }
1212
1213 if (!xe_bo_is_vram(bo))
1214 goto out_unlock_bo;
1215
1216 if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
1217 goto out_unlock_bo;
1218
1219 if (!backup) {
1220 backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv,
1221 NULL, xe_bo_size(bo),
1222 DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
1223 XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
1224 XE_BO_FLAG_PINNED);
1225 if (IS_ERR(backup)) {
1226 ret = PTR_ERR(backup);
1227 goto out_unlock_bo;
1228 }
1229 backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
1230 backup_created = true;
1231 }
1232
1233 if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
1234 struct xe_migrate *migrate;
1235 struct dma_fence *fence;
1236
1237 if (bo->tile)
1238 migrate = bo->tile->migrate;
1239 else
1240 migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
1241
1242 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
1243 if (ret)
1244 goto out_backup;
1245
1246 ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
1247 if (ret)
1248 goto out_backup;
1249
1250 fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
1251 backup->ttm.resource, false);
1252 if (IS_ERR(fence)) {
1253 ret = PTR_ERR(fence);
1254 goto out_backup;
1255 }
1256
1257 dma_resv_add_fence(bo->ttm.base.resv, fence,
1258 DMA_RESV_USAGE_KERNEL);
1259 dma_resv_add_fence(backup->ttm.base.resv, fence,
1260 DMA_RESV_USAGE_KERNEL);
1261 dma_fence_put(fence);
1262 } else {
1263 ret = xe_bo_vmap(backup);
1264 if (ret)
1265 goto out_backup;
1266
1267 if (iosys_map_is_null(&bo->vmap)) {
1268 ret = xe_bo_vmap(bo);
1269 if (ret)
1270 goto out_backup;
1271 unmap = true;
1272 }
1273
1274 xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
1275 xe_bo_size(bo));
1276 }
1277
1278 if (!bo->backup_obj)
1279 bo->backup_obj = backup;
1280
1281 out_backup:
1282 xe_bo_vunmap(backup);
1283 if (ret && backup_created)
1284 xe_bo_put(backup);
1285 out_unlock_bo:
1286 if (unmap)
1287 xe_bo_vunmap(bo);
1288 xe_bo_unlock(bo);
1289 return ret;
1290 }
1291
1292 /**
1293 * xe_bo_restore_pinned() - Restore a pinned VRAM object
1294 * @bo: The buffer object to move.
1295 *
1296 * On successful completion, the object memory will be moved back to VRAM.
1297 *
1298 * This is needed to for special handling of pinned VRAM object during
1299 * suspend-resume.
1300 *
1301 * Return: 0 on success. Negative error code on failure.
1302 */
xe_bo_restore_pinned(struct xe_bo * bo)1303 int xe_bo_restore_pinned(struct xe_bo *bo)
1304 {
1305 struct ttm_operation_ctx ctx = {
1306 .interruptible = false,
1307 .gfp_retry_mayfail = false,
1308 };
1309 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
1310 struct xe_bo *backup = bo->backup_obj;
1311 bool unmap = false;
1312 int ret;
1313
1314 if (!backup)
1315 return 0;
1316
1317 xe_bo_lock(bo, false);
1318
1319 if (!xe_bo_is_pinned(backup)) {
1320 ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
1321 if (ret)
1322 goto out_unlock_bo;
1323 }
1324
1325 if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
1326 struct xe_migrate *migrate;
1327 struct dma_fence *fence;
1328
1329 if (bo->tile)
1330 migrate = bo->tile->migrate;
1331 else
1332 migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
1333
1334 ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
1335 if (ret)
1336 goto out_unlock_bo;
1337
1338 ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
1339 if (ret)
1340 goto out_unlock_bo;
1341
1342 fence = xe_migrate_copy(migrate, backup, bo,
1343 backup->ttm.resource, bo->ttm.resource,
1344 false);
1345 if (IS_ERR(fence)) {
1346 ret = PTR_ERR(fence);
1347 goto out_unlock_bo;
1348 }
1349
1350 dma_resv_add_fence(bo->ttm.base.resv, fence,
1351 DMA_RESV_USAGE_KERNEL);
1352 dma_resv_add_fence(backup->ttm.base.resv, fence,
1353 DMA_RESV_USAGE_KERNEL);
1354 dma_fence_put(fence);
1355 } else {
1356 ret = xe_bo_vmap(backup);
1357 if (ret)
1358 goto out_unlock_bo;
1359
1360 if (iosys_map_is_null(&bo->vmap)) {
1361 ret = xe_bo_vmap(bo);
1362 if (ret)
1363 goto out_backup;
1364 unmap = true;
1365 }
1366
1367 xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
1368 xe_bo_size(bo));
1369 }
1370
1371 bo->backup_obj = NULL;
1372
1373 out_backup:
1374 xe_bo_vunmap(backup);
1375 if (!bo->backup_obj) {
1376 if (xe_bo_is_pinned(backup))
1377 ttm_bo_unpin(&backup->ttm);
1378 xe_bo_put(backup);
1379 }
1380 out_unlock_bo:
1381 if (unmap)
1382 xe_bo_vunmap(bo);
1383 xe_bo_unlock(bo);
1384 return ret;
1385 }
1386
xe_bo_dma_unmap_pinned(struct xe_bo * bo)1387 int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
1388 {
1389 struct ttm_buffer_object *ttm_bo = &bo->ttm;
1390 struct ttm_tt *tt = ttm_bo->ttm;
1391
1392 if (tt) {
1393 struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm);
1394
1395 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1396 dma_buf_unmap_attachment(ttm_bo->base.import_attach,
1397 ttm_bo->sg,
1398 DMA_BIDIRECTIONAL);
1399 ttm_bo->sg = NULL;
1400 xe_tt->sg = NULL;
1401 } else if (xe_tt->sg) {
1402 dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev,
1403 xe_tt->sg,
1404 DMA_BIDIRECTIONAL, 0);
1405 sg_free_table(xe_tt->sg);
1406 xe_tt->sg = NULL;
1407 }
1408 }
1409
1410 return 0;
1411 }
1412
xe_ttm_io_mem_pfn(struct ttm_buffer_object * ttm_bo,unsigned long page_offset)1413 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
1414 unsigned long page_offset)
1415 {
1416 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1417 struct xe_res_cursor cursor;
1418 struct xe_vram_region *vram;
1419
1420 if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
1421 return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
1422
1423 vram = res_to_mem_region(ttm_bo->resource);
1424 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
1425 return (vram->io_start + cursor.start) >> PAGE_SHIFT;
1426 }
1427
1428 static void __xe_bo_vunmap(struct xe_bo *bo);
1429
1430 /*
1431 * TODO: Move this function to TTM so we don't rely on how TTM does its
1432 * locking, thereby abusing TTM internals.
1433 */
xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object * ttm_bo)1434 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
1435 {
1436 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1437 bool locked;
1438
1439 xe_assert(xe, !kref_read(&ttm_bo->kref));
1440
1441 /*
1442 * We can typically only race with TTM trylocking under the
1443 * lru_lock, which will immediately be unlocked again since
1444 * the ttm_bo refcount is zero at this point. So trylocking *should*
1445 * always succeed here, as long as we hold the lru lock.
1446 */
1447 spin_lock(&ttm_bo->bdev->lru_lock);
1448 locked = dma_resv_trylock(ttm_bo->base.resv);
1449 spin_unlock(&ttm_bo->bdev->lru_lock);
1450 xe_assert(xe, locked);
1451
1452 return locked;
1453 }
1454
xe_ttm_bo_release_notify(struct ttm_buffer_object * ttm_bo)1455 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
1456 {
1457 struct dma_resv_iter cursor;
1458 struct dma_fence *fence;
1459 struct dma_fence *replacement = NULL;
1460 struct xe_bo *bo;
1461
1462 if (!xe_bo_is_xe_bo(ttm_bo))
1463 return;
1464
1465 bo = ttm_to_xe_bo(ttm_bo);
1466 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
1467
1468 /*
1469 * Corner case where TTM fails to allocate memory and this BOs resv
1470 * still points the VMs resv
1471 */
1472 if (ttm_bo->base.resv != &ttm_bo->base._resv)
1473 return;
1474
1475 if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
1476 return;
1477
1478 /*
1479 * Scrub the preempt fences if any. The unbind fence is already
1480 * attached to the resv.
1481 * TODO: Don't do this for external bos once we scrub them after
1482 * unbind.
1483 */
1484 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
1485 DMA_RESV_USAGE_BOOKKEEP, fence) {
1486 if (xe_fence_is_xe_preempt(fence) &&
1487 !dma_fence_is_signaled(fence)) {
1488 if (!replacement)
1489 replacement = dma_fence_get_stub();
1490
1491 dma_resv_replace_fences(ttm_bo->base.resv,
1492 fence->context,
1493 replacement,
1494 DMA_RESV_USAGE_BOOKKEEP);
1495 }
1496 }
1497 dma_fence_put(replacement);
1498
1499 dma_resv_unlock(ttm_bo->base.resv);
1500 }
1501
xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object * ttm_bo)1502 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
1503 {
1504 if (!xe_bo_is_xe_bo(ttm_bo))
1505 return;
1506
1507 /*
1508 * Object is idle and about to be destroyed. Release the
1509 * dma-buf attachment.
1510 */
1511 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1512 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
1513 struct xe_ttm_tt, ttm);
1514
1515 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
1516 DMA_BIDIRECTIONAL);
1517 ttm_bo->sg = NULL;
1518 xe_tt->sg = NULL;
1519 }
1520 }
1521
xe_ttm_bo_purge(struct ttm_buffer_object * ttm_bo,struct ttm_operation_ctx * ctx)1522 static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operation_ctx *ctx)
1523 {
1524 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1525
1526 if (ttm_bo->ttm) {
1527 struct ttm_placement place = {};
1528 int ret = ttm_bo_validate(ttm_bo, &place, ctx);
1529
1530 drm_WARN_ON(&xe->drm, ret);
1531 }
1532 }
1533
xe_ttm_bo_swap_notify(struct ttm_buffer_object * ttm_bo)1534 static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
1535 {
1536 struct ttm_operation_ctx ctx = {
1537 .interruptible = false,
1538 .gfp_retry_mayfail = false,
1539 };
1540
1541 if (ttm_bo->ttm) {
1542 struct xe_ttm_tt *xe_tt =
1543 container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm);
1544
1545 if (xe_tt->purgeable)
1546 xe_ttm_bo_purge(ttm_bo, &ctx);
1547 }
1548 }
1549
xe_ttm_access_memory(struct ttm_buffer_object * ttm_bo,unsigned long offset,void * buf,int len,int write)1550 static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
1551 unsigned long offset, void *buf, int len,
1552 int write)
1553 {
1554 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1555 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1556 struct iosys_map vmap;
1557 struct xe_res_cursor cursor;
1558 struct xe_vram_region *vram;
1559 int bytes_left = len;
1560 int err = 0;
1561
1562 xe_bo_assert_held(bo);
1563 xe_device_assert_mem_access(xe);
1564
1565 if (!mem_type_is_vram(ttm_bo->resource->mem_type))
1566 return -EIO;
1567
1568 if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) {
1569 struct xe_migrate *migrate =
1570 mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
1571
1572 err = xe_migrate_access_memory(migrate, bo, offset, buf, len,
1573 write);
1574 goto out;
1575 }
1576
1577 vram = res_to_mem_region(ttm_bo->resource);
1578 xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
1579 xe_bo_size(bo) - (offset & PAGE_MASK), &cursor);
1580
1581 do {
1582 unsigned long page_offset = (offset & ~PAGE_MASK);
1583 int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left);
1584
1585 iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping +
1586 cursor.start);
1587 if (write)
1588 xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count);
1589 else
1590 xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count);
1591
1592 buf += byte_count;
1593 offset += byte_count;
1594 bytes_left -= byte_count;
1595 if (bytes_left)
1596 xe_res_next(&cursor, PAGE_SIZE);
1597 } while (bytes_left);
1598
1599 out:
1600 return err ?: len;
1601 }
1602
1603 const struct ttm_device_funcs xe_ttm_funcs = {
1604 .ttm_tt_create = xe_ttm_tt_create,
1605 .ttm_tt_populate = xe_ttm_tt_populate,
1606 .ttm_tt_unpopulate = xe_ttm_tt_unpopulate,
1607 .ttm_tt_destroy = xe_ttm_tt_destroy,
1608 .evict_flags = xe_evict_flags,
1609 .move = xe_bo_move,
1610 .io_mem_reserve = xe_ttm_io_mem_reserve,
1611 .io_mem_pfn = xe_ttm_io_mem_pfn,
1612 .access_memory = xe_ttm_access_memory,
1613 .release_notify = xe_ttm_bo_release_notify,
1614 .eviction_valuable = xe_bo_eviction_valuable,
1615 .delete_mem_notify = xe_ttm_bo_delete_mem_notify,
1616 .swap_notify = xe_ttm_bo_swap_notify,
1617 };
1618
xe_ttm_bo_destroy(struct ttm_buffer_object * ttm_bo)1619 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
1620 {
1621 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1622 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1623 struct xe_tile *tile;
1624 u8 id;
1625
1626 if (bo->ttm.base.import_attach)
1627 drm_prime_gem_destroy(&bo->ttm.base, NULL);
1628 drm_gem_object_release(&bo->ttm.base);
1629
1630 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
1631
1632 for_each_tile(tile, xe, id)
1633 if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size)
1634 xe_ggtt_remove_bo(tile->mem.ggtt, bo);
1635
1636 #ifdef CONFIG_PROC_FS
1637 if (bo->client)
1638 xe_drm_client_remove_bo(bo);
1639 #endif
1640
1641 if (bo->vm && xe_bo_is_user(bo))
1642 xe_vm_put(bo->vm);
1643
1644 if (bo->parent_obj)
1645 xe_bo_put(bo->parent_obj);
1646
1647 mutex_lock(&xe->mem_access.vram_userfault.lock);
1648 if (!list_empty(&bo->vram_userfault_link))
1649 list_del(&bo->vram_userfault_link);
1650 mutex_unlock(&xe->mem_access.vram_userfault.lock);
1651
1652 kfree(bo);
1653 }
1654
xe_gem_object_free(struct drm_gem_object * obj)1655 static void xe_gem_object_free(struct drm_gem_object *obj)
1656 {
1657 /* Our BO reference counting scheme works as follows:
1658 *
1659 * The gem object kref is typically used throughout the driver,
1660 * and the gem object holds a ttm_buffer_object refcount, so
1661 * that when the last gem object reference is put, which is when
1662 * we end up in this function, we put also that ttm_buffer_object
1663 * refcount. Anything using gem interfaces is then no longer
1664 * allowed to access the object in a way that requires a gem
1665 * refcount, including locking the object.
1666 *
1667 * driver ttm callbacks is allowed to use the ttm_buffer_object
1668 * refcount directly if needed.
1669 */
1670 __xe_bo_vunmap(gem_to_xe_bo(obj));
1671 ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
1672 }
1673
xe_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)1674 static void xe_gem_object_close(struct drm_gem_object *obj,
1675 struct drm_file *file_priv)
1676 {
1677 struct xe_bo *bo = gem_to_xe_bo(obj);
1678
1679 if (bo->vm && !xe_vm_in_fault_mode(bo->vm)) {
1680 xe_assert(xe_bo_device(bo), xe_bo_is_user(bo));
1681
1682 xe_bo_lock(bo, false);
1683 ttm_bo_set_bulk_move(&bo->ttm, NULL);
1684 xe_bo_unlock(bo);
1685 }
1686 }
1687
xe_gem_fault(struct vm_fault * vmf)1688 static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
1689 {
1690 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
1691 struct drm_device *ddev = tbo->base.dev;
1692 struct xe_device *xe = to_xe_device(ddev);
1693 struct xe_bo *bo = ttm_to_xe_bo(tbo);
1694 bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
1695 vm_fault_t ret;
1696 int idx;
1697
1698 if (needs_rpm)
1699 xe_pm_runtime_get(xe);
1700
1701 ret = ttm_bo_vm_reserve(tbo, vmf);
1702 if (ret)
1703 goto out;
1704
1705 if (drm_dev_enter(ddev, &idx)) {
1706 trace_xe_bo_cpu_fault(bo);
1707
1708 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
1709 TTM_BO_VM_NUM_PREFAULT);
1710 drm_dev_exit(idx);
1711 } else {
1712 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
1713 }
1714
1715 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
1716 goto out;
1717 /*
1718 * ttm_bo_vm_reserve() already has dma_resv_lock.
1719 */
1720 if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) {
1721 mutex_lock(&xe->mem_access.vram_userfault.lock);
1722 if (list_empty(&bo->vram_userfault_link))
1723 list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list);
1724 mutex_unlock(&xe->mem_access.vram_userfault.lock);
1725 }
1726
1727 dma_resv_unlock(tbo->base.resv);
1728 out:
1729 if (needs_rpm)
1730 xe_pm_runtime_put(xe);
1731
1732 return ret;
1733 }
1734
xe_bo_vm_access(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)1735 static int xe_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
1736 void *buf, int len, int write)
1737 {
1738 struct ttm_buffer_object *ttm_bo = vma->vm_private_data;
1739 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1740 struct xe_device *xe = xe_bo_device(bo);
1741 int ret;
1742
1743 xe_pm_runtime_get(xe);
1744 ret = ttm_bo_vm_access(vma, addr, buf, len, write);
1745 xe_pm_runtime_put(xe);
1746
1747 return ret;
1748 }
1749
1750 /**
1751 * xe_bo_read() - Read from an xe_bo
1752 * @bo: The buffer object to read from.
1753 * @offset: The byte offset to start reading from.
1754 * @dst: Location to store the read.
1755 * @size: Size in bytes for the read.
1756 *
1757 * Read @size bytes from the @bo, starting from @offset, storing into @dst.
1758 *
1759 * Return: Zero on success, or negative error.
1760 */
xe_bo_read(struct xe_bo * bo,u64 offset,void * dst,int size)1761 int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
1762 {
1763 int ret;
1764
1765 ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0);
1766 if (ret >= 0 && ret != size)
1767 ret = -EIO;
1768 else if (ret == size)
1769 ret = 0;
1770
1771 return ret;
1772 }
1773
1774 static const struct vm_operations_struct xe_gem_vm_ops = {
1775 .fault = xe_gem_fault,
1776 .open = ttm_bo_vm_open,
1777 .close = ttm_bo_vm_close,
1778 .access = xe_bo_vm_access,
1779 };
1780
1781 static const struct drm_gem_object_funcs xe_gem_object_funcs = {
1782 .free = xe_gem_object_free,
1783 .close = xe_gem_object_close,
1784 .mmap = drm_gem_ttm_mmap,
1785 .export = xe_gem_prime_export,
1786 .vm_ops = &xe_gem_vm_ops,
1787 };
1788
1789 /**
1790 * xe_bo_alloc - Allocate storage for a struct xe_bo
1791 *
1792 * This function is intended to allocate storage to be used for input
1793 * to __xe_bo_create_locked(), in the case a pointer to the bo to be
1794 * created is needed before the call to __xe_bo_create_locked().
1795 * If __xe_bo_create_locked ends up never to be called, then the
1796 * storage allocated with this function needs to be freed using
1797 * xe_bo_free().
1798 *
1799 * Return: A pointer to an uninitialized struct xe_bo on success,
1800 * ERR_PTR(-ENOMEM) on error.
1801 */
xe_bo_alloc(void)1802 struct xe_bo *xe_bo_alloc(void)
1803 {
1804 struct xe_bo *bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1805
1806 if (!bo)
1807 return ERR_PTR(-ENOMEM);
1808
1809 return bo;
1810 }
1811
1812 /**
1813 * xe_bo_free - Free storage allocated using xe_bo_alloc()
1814 * @bo: The buffer object storage.
1815 *
1816 * Refer to xe_bo_alloc() documentation for valid use-cases.
1817 */
xe_bo_free(struct xe_bo * bo)1818 void xe_bo_free(struct xe_bo *bo)
1819 {
1820 kfree(bo);
1821 }
1822
___xe_bo_create_locked(struct xe_device * xe,struct xe_bo * bo,struct xe_tile * tile,struct dma_resv * resv,struct ttm_lru_bulk_move * bulk,size_t size,u16 cpu_caching,enum ttm_bo_type type,u32 flags)1823 struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
1824 struct xe_tile *tile, struct dma_resv *resv,
1825 struct ttm_lru_bulk_move *bulk, size_t size,
1826 u16 cpu_caching, enum ttm_bo_type type,
1827 u32 flags)
1828 {
1829 struct ttm_operation_ctx ctx = {
1830 .interruptible = true,
1831 .no_wait_gpu = false,
1832 .gfp_retry_mayfail = true,
1833 };
1834 struct ttm_placement *placement;
1835 uint32_t alignment;
1836 size_t aligned_size;
1837 int err;
1838
1839 /* Only kernel objects should set GT */
1840 xe_assert(xe, !tile || type == ttm_bo_type_kernel);
1841
1842 if (XE_WARN_ON(!size)) {
1843 xe_bo_free(bo);
1844 return ERR_PTR(-EINVAL);
1845 }
1846
1847 /* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */
1848 if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT))
1849 return ERR_PTR(-EINVAL);
1850
1851 if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
1852 !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
1853 ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
1854 (flags & (XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_NEEDS_2M)))) {
1855 size_t align = flags & XE_BO_FLAG_NEEDS_2M ? SZ_2M : SZ_64K;
1856
1857 aligned_size = ALIGN(size, align);
1858 if (type != ttm_bo_type_device)
1859 size = ALIGN(size, align);
1860 flags |= XE_BO_FLAG_INTERNAL_64K;
1861 alignment = align >> PAGE_SHIFT;
1862 } else {
1863 aligned_size = ALIGN(size, SZ_4K);
1864 flags &= ~XE_BO_FLAG_INTERNAL_64K;
1865 alignment = SZ_4K >> PAGE_SHIFT;
1866 }
1867
1868 if (type == ttm_bo_type_device && aligned_size != size)
1869 return ERR_PTR(-EINVAL);
1870
1871 if (!bo) {
1872 bo = xe_bo_alloc();
1873 if (IS_ERR(bo))
1874 return bo;
1875 }
1876
1877 bo->ccs_cleared = false;
1878 bo->tile = tile;
1879 bo->flags = flags;
1880 bo->cpu_caching = cpu_caching;
1881 bo->ttm.base.funcs = &xe_gem_object_funcs;
1882 bo->ttm.priority = XE_BO_PRIORITY_NORMAL;
1883 INIT_LIST_HEAD(&bo->pinned_link);
1884 #ifdef CONFIG_PROC_FS
1885 INIT_LIST_HEAD(&bo->client_link);
1886 #endif
1887 INIT_LIST_HEAD(&bo->vram_userfault_link);
1888
1889 drm_gem_private_object_init(&xe->drm, &bo->ttm.base, size);
1890
1891 if (resv) {
1892 ctx.allow_res_evict = !(flags & XE_BO_FLAG_NO_RESV_EVICT);
1893 ctx.resv = resv;
1894 }
1895
1896 if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
1897 err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
1898 if (WARN_ON(err)) {
1899 xe_ttm_bo_destroy(&bo->ttm);
1900 return ERR_PTR(err);
1901 }
1902 }
1903
1904 /* Defer populating type_sg bos */
1905 placement = (type == ttm_bo_type_sg ||
1906 bo->flags & XE_BO_FLAG_DEFER_BACKING) ? &sys_placement :
1907 &bo->placement;
1908 err = ttm_bo_init_reserved(&xe->ttm, &bo->ttm, type,
1909 placement, alignment,
1910 &ctx, NULL, resv, xe_ttm_bo_destroy);
1911 if (err)
1912 return ERR_PTR(err);
1913
1914 /*
1915 * The VRAM pages underneath are potentially still being accessed by the
1916 * GPU, as per async GPU clearing and async evictions. However TTM makes
1917 * sure to add any corresponding move/clear fences into the objects
1918 * dma-resv using the DMA_RESV_USAGE_KERNEL slot.
1919 *
1920 * For KMD internal buffers we don't care about GPU clearing, however we
1921 * still need to handle async evictions, where the VRAM is still being
1922 * accessed by the GPU. Most internal callers are not expecting this,
1923 * since they are missing the required synchronisation before accessing
1924 * the memory. To keep things simple just sync wait any kernel fences
1925 * here, if the buffer is designated KMD internal.
1926 *
1927 * For normal userspace objects we should already have the required
1928 * pipelining or sync waiting elsewhere, since we already have to deal
1929 * with things like async GPU clearing.
1930 */
1931 if (type == ttm_bo_type_kernel) {
1932 long timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
1933 DMA_RESV_USAGE_KERNEL,
1934 ctx.interruptible,
1935 MAX_SCHEDULE_TIMEOUT);
1936
1937 if (timeout < 0) {
1938 if (!resv)
1939 dma_resv_unlock(bo->ttm.base.resv);
1940 xe_bo_put(bo);
1941 return ERR_PTR(timeout);
1942 }
1943 }
1944
1945 bo->created = true;
1946 if (bulk)
1947 ttm_bo_set_bulk_move(&bo->ttm, bulk);
1948 else
1949 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
1950
1951 return bo;
1952 }
1953
__xe_bo_fixed_placement(struct xe_device * xe,struct xe_bo * bo,u32 flags,u64 start,u64 end,u64 size)1954 static int __xe_bo_fixed_placement(struct xe_device *xe,
1955 struct xe_bo *bo,
1956 u32 flags,
1957 u64 start, u64 end, u64 size)
1958 {
1959 struct ttm_place *place = bo->placements;
1960
1961 if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
1962 return -EINVAL;
1963
1964 place->flags = TTM_PL_FLAG_CONTIGUOUS;
1965 place->fpfn = start >> PAGE_SHIFT;
1966 place->lpfn = end >> PAGE_SHIFT;
1967
1968 switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) {
1969 case XE_BO_FLAG_VRAM0:
1970 place->mem_type = XE_PL_VRAM0;
1971 break;
1972 case XE_BO_FLAG_VRAM1:
1973 place->mem_type = XE_PL_VRAM1;
1974 break;
1975 case XE_BO_FLAG_STOLEN:
1976 place->mem_type = XE_PL_STOLEN;
1977 break;
1978
1979 default:
1980 /* 0 or multiple of the above set */
1981 return -EINVAL;
1982 }
1983
1984 bo->placement = (struct ttm_placement) {
1985 .num_placement = 1,
1986 .placement = place,
1987 };
1988
1989 return 0;
1990 }
1991
1992 static struct xe_bo *
__xe_bo_create_locked(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,u64 start,u64 end,u16 cpu_caching,enum ttm_bo_type type,u32 flags,u64 alignment)1993 __xe_bo_create_locked(struct xe_device *xe,
1994 struct xe_tile *tile, struct xe_vm *vm,
1995 size_t size, u64 start, u64 end,
1996 u16 cpu_caching, enum ttm_bo_type type, u32 flags,
1997 u64 alignment)
1998 {
1999 struct xe_bo *bo = NULL;
2000 int err;
2001
2002 if (vm)
2003 xe_vm_assert_held(vm);
2004
2005 if (start || end != ~0ULL) {
2006 bo = xe_bo_alloc();
2007 if (IS_ERR(bo))
2008 return bo;
2009
2010 flags |= XE_BO_FLAG_FIXED_PLACEMENT;
2011 err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
2012 if (err) {
2013 xe_bo_free(bo);
2014 return ERR_PTR(err);
2015 }
2016 }
2017
2018 bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
2019 vm && !xe_vm_in_fault_mode(vm) &&
2020 flags & XE_BO_FLAG_USER ?
2021 &vm->lru_bulk_move : NULL, size,
2022 cpu_caching, type, flags);
2023 if (IS_ERR(bo))
2024 return bo;
2025
2026 bo->min_align = alignment;
2027
2028 /*
2029 * Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
2030 * to ensure the shared resv doesn't disappear under the bo, the bo
2031 * will keep a reference to the vm, and avoid circular references
2032 * by having all the vm's bo refereferences released at vm close
2033 * time.
2034 */
2035 if (vm && xe_bo_is_user(bo))
2036 xe_vm_get(vm);
2037 bo->vm = vm;
2038
2039 if (bo->flags & XE_BO_FLAG_GGTT) {
2040 struct xe_tile *t;
2041 u8 id;
2042
2043 if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) {
2044 if (!tile && flags & XE_BO_FLAG_STOLEN)
2045 tile = xe_device_get_root_tile(xe);
2046
2047 xe_assert(xe, tile);
2048 }
2049
2050 for_each_tile(t, xe, id) {
2051 if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t)))
2052 continue;
2053
2054 if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
2055 err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
2056 start + xe_bo_size(bo), U64_MAX);
2057 } else {
2058 err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
2059 }
2060 if (err)
2061 goto err_unlock_put_bo;
2062 }
2063 }
2064
2065 trace_xe_bo_create(bo);
2066 return bo;
2067
2068 err_unlock_put_bo:
2069 __xe_bo_unset_bulk_move(bo);
2070 xe_bo_unlock_vm_held(bo);
2071 xe_bo_put(bo);
2072 return ERR_PTR(err);
2073 }
2074
2075 struct xe_bo *
xe_bo_create_locked_range(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,u64 start,u64 end,enum ttm_bo_type type,u32 flags,u64 alignment)2076 xe_bo_create_locked_range(struct xe_device *xe,
2077 struct xe_tile *tile, struct xe_vm *vm,
2078 size_t size, u64 start, u64 end,
2079 enum ttm_bo_type type, u32 flags, u64 alignment)
2080 {
2081 return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
2082 flags, alignment);
2083 }
2084
xe_bo_create_locked(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,enum ttm_bo_type type,u32 flags)2085 struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
2086 struct xe_vm *vm, size_t size,
2087 enum ttm_bo_type type, u32 flags)
2088 {
2089 return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
2090 flags, 0);
2091 }
2092
xe_bo_create_user(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,u16 cpu_caching,u32 flags)2093 struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
2094 struct xe_vm *vm, size_t size,
2095 u16 cpu_caching,
2096 u32 flags)
2097 {
2098 struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
2099 cpu_caching, ttm_bo_type_device,
2100 flags | XE_BO_FLAG_USER, 0);
2101 if (!IS_ERR(bo))
2102 xe_bo_unlock_vm_held(bo);
2103
2104 return bo;
2105 }
2106
xe_bo_create(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,enum ttm_bo_type type,u32 flags)2107 struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
2108 struct xe_vm *vm, size_t size,
2109 enum ttm_bo_type type, u32 flags)
2110 {
2111 struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
2112
2113 if (!IS_ERR(bo))
2114 xe_bo_unlock_vm_held(bo);
2115
2116 return bo;
2117 }
2118
xe_bo_create_pin_map_at(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,u64 offset,enum ttm_bo_type type,u32 flags)2119 struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
2120 struct xe_vm *vm,
2121 size_t size, u64 offset,
2122 enum ttm_bo_type type, u32 flags)
2123 {
2124 return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
2125 type, flags, 0);
2126 }
2127
xe_bo_create_pin_map_at_aligned(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,u64 offset,enum ttm_bo_type type,u32 flags,u64 alignment)2128 struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
2129 struct xe_tile *tile,
2130 struct xe_vm *vm,
2131 size_t size, u64 offset,
2132 enum ttm_bo_type type, u32 flags,
2133 u64 alignment)
2134 {
2135 struct xe_bo *bo;
2136 int err;
2137 u64 start = offset == ~0ull ? 0 : offset;
2138 u64 end = offset == ~0ull ? offset : start + size;
2139
2140 if (flags & XE_BO_FLAG_STOLEN &&
2141 xe_ttm_stolen_cpu_access_needs_ggtt(xe))
2142 flags |= XE_BO_FLAG_GGTT;
2143
2144 bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
2145 flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
2146 alignment);
2147 if (IS_ERR(bo))
2148 return bo;
2149
2150 err = xe_bo_pin(bo);
2151 if (err)
2152 goto err_put;
2153
2154 err = xe_bo_vmap(bo);
2155 if (err)
2156 goto err_unpin;
2157
2158 xe_bo_unlock_vm_held(bo);
2159
2160 return bo;
2161
2162 err_unpin:
2163 xe_bo_unpin(bo);
2164 err_put:
2165 xe_bo_unlock_vm_held(bo);
2166 xe_bo_put(bo);
2167 return ERR_PTR(err);
2168 }
2169
xe_bo_create_pin_map(struct xe_device * xe,struct xe_tile * tile,struct xe_vm * vm,size_t size,enum ttm_bo_type type,u32 flags)2170 struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
2171 struct xe_vm *vm, size_t size,
2172 enum ttm_bo_type type, u32 flags)
2173 {
2174 return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
2175 }
2176
__xe_bo_unpin_map_no_vm(void * arg)2177 static void __xe_bo_unpin_map_no_vm(void *arg)
2178 {
2179 xe_bo_unpin_map_no_vm(arg);
2180 }
2181
xe_managed_bo_create_pin_map(struct xe_device * xe,struct xe_tile * tile,size_t size,u32 flags)2182 struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
2183 size_t size, u32 flags)
2184 {
2185 struct xe_bo *bo;
2186 int ret;
2187
2188 KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags);
2189
2190 bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
2191 if (IS_ERR(bo))
2192 return bo;
2193
2194 ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo);
2195 if (ret)
2196 return ERR_PTR(ret);
2197
2198 return bo;
2199 }
2200
xe_managed_bo_create_from_data(struct xe_device * xe,struct xe_tile * tile,const void * data,size_t size,u32 flags)2201 struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
2202 const void *data, size_t size, u32 flags)
2203 {
2204 struct xe_bo *bo = xe_managed_bo_create_pin_map(xe, tile, ALIGN(size, PAGE_SIZE), flags);
2205
2206 if (IS_ERR(bo))
2207 return bo;
2208
2209 xe_map_memcpy_to(xe, &bo->vmap, 0, data, size);
2210
2211 return bo;
2212 }
2213
2214 /**
2215 * xe_managed_bo_reinit_in_vram
2216 * @xe: xe device
2217 * @tile: Tile where the new buffer will be created
2218 * @src: Managed buffer object allocated in system memory
2219 *
2220 * Replace a managed src buffer object allocated in system memory with a new
2221 * one allocated in vram, copying the data between them.
2222 * Buffer object in VRAM is not going to have the same GGTT address, the caller
2223 * is responsible for making sure that any old references to it are updated.
2224 *
2225 * Returns 0 for success, negative error code otherwise.
2226 */
xe_managed_bo_reinit_in_vram(struct xe_device * xe,struct xe_tile * tile,struct xe_bo ** src)2227 int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src)
2228 {
2229 struct xe_bo *bo;
2230 u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
2231
2232 dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE |
2233 XE_BO_FLAG_PINNED_NORESTORE);
2234
2235 xe_assert(xe, IS_DGFX(xe));
2236 xe_assert(xe, !(*src)->vmap.is_iomem);
2237
2238 bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr,
2239 xe_bo_size(*src), dst_flags);
2240 if (IS_ERR(bo))
2241 return PTR_ERR(bo);
2242
2243 devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src);
2244 *src = bo;
2245
2246 return 0;
2247 }
2248
2249 /*
2250 * XXX: This is in the VM bind data path, likely should calculate this once and
2251 * store, with a recalculation if the BO is moved.
2252 */
vram_region_gpu_offset(struct ttm_resource * res)2253 uint64_t vram_region_gpu_offset(struct ttm_resource *res)
2254 {
2255 struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
2256
2257 switch (res->mem_type) {
2258 case XE_PL_STOLEN:
2259 return xe_ttm_stolen_gpu_offset(xe);
2260 case XE_PL_TT:
2261 case XE_PL_SYSTEM:
2262 return 0;
2263 default:
2264 return res_to_mem_region(res)->dpa_base;
2265 }
2266 return 0;
2267 }
2268
2269 /**
2270 * xe_bo_pin_external - pin an external BO
2271 * @bo: buffer object to be pinned
2272 *
2273 * Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2274 * BO. Unique call compared to xe_bo_pin as this function has it own set of
2275 * asserts and code to ensure evict / restore on suspend / resume.
2276 *
2277 * Returns 0 for success, negative error code otherwise.
2278 */
xe_bo_pin_external(struct xe_bo * bo)2279 int xe_bo_pin_external(struct xe_bo *bo)
2280 {
2281 struct xe_device *xe = xe_bo_device(bo);
2282 int err;
2283
2284 xe_assert(xe, !bo->vm);
2285 xe_assert(xe, xe_bo_is_user(bo));
2286
2287 if (!xe_bo_is_pinned(bo)) {
2288 err = xe_bo_validate(bo, NULL, false);
2289 if (err)
2290 return err;
2291
2292 spin_lock(&xe->pinned.lock);
2293 list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
2294 spin_unlock(&xe->pinned.lock);
2295 }
2296
2297 ttm_bo_pin(&bo->ttm);
2298 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2299 xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
2300
2301 /*
2302 * FIXME: If we always use the reserve / unreserve functions for locking
2303 * we do not need this.
2304 */
2305 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2306
2307 return 0;
2308 }
2309
xe_bo_pin(struct xe_bo * bo)2310 int xe_bo_pin(struct xe_bo *bo)
2311 {
2312 struct ttm_place *place = &bo->placements[0];
2313 struct xe_device *xe = xe_bo_device(bo);
2314 int err;
2315
2316 /* We currently don't expect user BO to be pinned */
2317 xe_assert(xe, !xe_bo_is_user(bo));
2318
2319 /* Pinned object must be in GGTT or have pinned flag */
2320 xe_assert(xe, bo->flags & (XE_BO_FLAG_PINNED |
2321 XE_BO_FLAG_GGTT));
2322
2323 /*
2324 * No reason we can't support pinning imported dma-bufs we just don't
2325 * expect to pin an imported dma-buf.
2326 */
2327 xe_assert(xe, !bo->ttm.base.import_attach);
2328
2329 /* We only expect at most 1 pin */
2330 xe_assert(xe, !xe_bo_is_pinned(bo));
2331
2332 err = xe_bo_validate(bo, NULL, false);
2333 if (err)
2334 return err;
2335
2336 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
2337 spin_lock(&xe->pinned.lock);
2338 if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
2339 list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
2340 else
2341 list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
2342 spin_unlock(&xe->pinned.lock);
2343 }
2344
2345 ttm_bo_pin(&bo->ttm);
2346 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2347 xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
2348
2349 /*
2350 * FIXME: If we always use the reserve / unreserve functions for locking
2351 * we do not need this.
2352 */
2353 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2354
2355 return 0;
2356 }
2357
2358 /**
2359 * xe_bo_unpin_external - unpin an external BO
2360 * @bo: buffer object to be unpinned
2361 *
2362 * Unpin an external (not tied to a VM, can be exported via dma-buf / prime FD)
2363 * BO. Unique call compared to xe_bo_unpin as this function has it own set of
2364 * asserts and code to ensure evict / restore on suspend / resume.
2365 *
2366 * Returns 0 for success, negative error code otherwise.
2367 */
xe_bo_unpin_external(struct xe_bo * bo)2368 void xe_bo_unpin_external(struct xe_bo *bo)
2369 {
2370 struct xe_device *xe = xe_bo_device(bo);
2371
2372 xe_assert(xe, !bo->vm);
2373 xe_assert(xe, xe_bo_is_pinned(bo));
2374 xe_assert(xe, xe_bo_is_user(bo));
2375
2376 spin_lock(&xe->pinned.lock);
2377 if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link))
2378 list_del_init(&bo->pinned_link);
2379 spin_unlock(&xe->pinned.lock);
2380
2381 ttm_bo_unpin(&bo->ttm);
2382 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2383 xe_ttm_tt_account_add(xe, bo->ttm.ttm);
2384
2385 /*
2386 * FIXME: If we always use the reserve / unreserve functions for locking
2387 * we do not need this.
2388 */
2389 ttm_bo_move_to_lru_tail_unlocked(&bo->ttm);
2390 }
2391
xe_bo_unpin(struct xe_bo * bo)2392 void xe_bo_unpin(struct xe_bo *bo)
2393 {
2394 struct ttm_place *place = &bo->placements[0];
2395 struct xe_device *xe = xe_bo_device(bo);
2396
2397 xe_assert(xe, !bo->ttm.base.import_attach);
2398 xe_assert(xe, xe_bo_is_pinned(bo));
2399
2400 if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
2401 spin_lock(&xe->pinned.lock);
2402 xe_assert(xe, !list_empty(&bo->pinned_link));
2403 list_del_init(&bo->pinned_link);
2404 spin_unlock(&xe->pinned.lock);
2405
2406 if (bo->backup_obj) {
2407 if (xe_bo_is_pinned(bo->backup_obj))
2408 ttm_bo_unpin(&bo->backup_obj->ttm);
2409 xe_bo_put(bo->backup_obj);
2410 bo->backup_obj = NULL;
2411 }
2412 }
2413 ttm_bo_unpin(&bo->ttm);
2414 if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
2415 xe_ttm_tt_account_add(xe, bo->ttm.ttm);
2416 }
2417
2418 /**
2419 * xe_bo_validate() - Make sure the bo is in an allowed placement
2420 * @bo: The bo,
2421 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
2422 * NULL. Used together with @allow_res_evict.
2423 * @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
2424 * reservation object.
2425 *
2426 * Make sure the bo is in allowed placement, migrating it if necessary. If
2427 * needed, other bos will be evicted. If bos selected for eviction shares
2428 * the @vm's reservation object, they can be evicted iff @allow_res_evict is
2429 * set to true, otherwise they will be bypassed.
2430 *
2431 * Return: 0 on success, negative error code on failure. May return
2432 * -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
2433 */
xe_bo_validate(struct xe_bo * bo,struct xe_vm * vm,bool allow_res_evict)2434 int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
2435 {
2436 struct ttm_operation_ctx ctx = {
2437 .interruptible = true,
2438 .no_wait_gpu = false,
2439 .gfp_retry_mayfail = true,
2440 };
2441 struct pin_cookie cookie;
2442 int ret;
2443
2444 if (vm) {
2445 lockdep_assert_held(&vm->lock);
2446 xe_vm_assert_held(vm);
2447
2448 ctx.allow_res_evict = allow_res_evict;
2449 ctx.resv = xe_vm_resv(vm);
2450 }
2451
2452 cookie = xe_vm_set_validating(vm, allow_res_evict);
2453 trace_xe_bo_validate(bo);
2454 ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
2455 xe_vm_clear_validating(vm, allow_res_evict, cookie);
2456
2457 return ret;
2458 }
2459
xe_bo_is_xe_bo(struct ttm_buffer_object * bo)2460 bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
2461 {
2462 if (bo->destroy == &xe_ttm_bo_destroy)
2463 return true;
2464
2465 return false;
2466 }
2467
2468 /*
2469 * Resolve a BO address. There is no assert to check if the proper lock is held
2470 * so it should only be used in cases where it is not fatal to get the wrong
2471 * address, such as printing debug information, but not in cases where memory is
2472 * written based on this result.
2473 */
__xe_bo_addr(struct xe_bo * bo,u64 offset,size_t page_size)2474 dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
2475 {
2476 struct xe_device *xe = xe_bo_device(bo);
2477 struct xe_res_cursor cur;
2478 u64 page;
2479
2480 xe_assert(xe, page_size <= PAGE_SIZE);
2481 page = offset >> PAGE_SHIFT;
2482 offset &= (PAGE_SIZE - 1);
2483
2484 if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
2485 xe_assert(xe, bo->ttm.ttm);
2486
2487 xe_res_first_sg(xe_bo_sg(bo), page << PAGE_SHIFT,
2488 page_size, &cur);
2489 return xe_res_dma(&cur) + offset;
2490 } else {
2491 struct xe_res_cursor cur;
2492
2493 xe_res_first(bo->ttm.resource, page << PAGE_SHIFT,
2494 page_size, &cur);
2495 return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource);
2496 }
2497 }
2498
xe_bo_addr(struct xe_bo * bo,u64 offset,size_t page_size)2499 dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
2500 {
2501 if (!READ_ONCE(bo->ttm.pin_count))
2502 xe_bo_assert_held(bo);
2503 return __xe_bo_addr(bo, offset, page_size);
2504 }
2505
xe_bo_vmap(struct xe_bo * bo)2506 int xe_bo_vmap(struct xe_bo *bo)
2507 {
2508 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
2509 void *virtual;
2510 bool is_iomem;
2511 int ret;
2512
2513 xe_bo_assert_held(bo);
2514
2515 if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) ||
2516 !force_contiguous(bo->flags)))
2517 return -EINVAL;
2518
2519 if (!iosys_map_is_null(&bo->vmap))
2520 return 0;
2521
2522 /*
2523 * We use this more or less deprecated interface for now since
2524 * ttm_bo_vmap() doesn't offer the optimization of kmapping
2525 * single page bos, which is done here.
2526 * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap
2527 * to use struct iosys_map.
2528 */
2529 ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap);
2530 if (ret)
2531 return ret;
2532
2533 virtual = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
2534 if (is_iomem)
2535 iosys_map_set_vaddr_iomem(&bo->vmap, (void __iomem *)virtual);
2536 else
2537 iosys_map_set_vaddr(&bo->vmap, virtual);
2538
2539 return 0;
2540 }
2541
__xe_bo_vunmap(struct xe_bo * bo)2542 static void __xe_bo_vunmap(struct xe_bo *bo)
2543 {
2544 if (!iosys_map_is_null(&bo->vmap)) {
2545 iosys_map_clear(&bo->vmap);
2546 ttm_bo_kunmap(&bo->kmap);
2547 }
2548 }
2549
xe_bo_vunmap(struct xe_bo * bo)2550 void xe_bo_vunmap(struct xe_bo *bo)
2551 {
2552 xe_bo_assert_held(bo);
2553 __xe_bo_vunmap(bo);
2554 }
2555
gem_create_set_pxp_type(struct xe_device * xe,struct xe_bo * bo,u64 value)2556 static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value)
2557 {
2558 if (value == DRM_XE_PXP_TYPE_NONE)
2559 return 0;
2560
2561 /* we only support DRM_XE_PXP_TYPE_HWDRM for now */
2562 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
2563 return -EINVAL;
2564
2565 return xe_pxp_key_assign(xe->pxp, bo);
2566 }
2567
2568 typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe,
2569 struct xe_bo *bo,
2570 u64 value);
2571
2572 static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = {
2573 [DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE] = gem_create_set_pxp_type,
2574 };
2575
gem_create_user_ext_set_property(struct xe_device * xe,struct xe_bo * bo,u64 extension)2576 static int gem_create_user_ext_set_property(struct xe_device *xe,
2577 struct xe_bo *bo,
2578 u64 extension)
2579 {
2580 u64 __user *address = u64_to_user_ptr(extension);
2581 struct drm_xe_ext_set_property ext;
2582 int err;
2583 u32 idx;
2584
2585 err = copy_from_user(&ext, address, sizeof(ext));
2586 if (XE_IOCTL_DBG(xe, err))
2587 return -EFAULT;
2588
2589 if (XE_IOCTL_DBG(xe, ext.property >=
2590 ARRAY_SIZE(gem_create_set_property_funcs)) ||
2591 XE_IOCTL_DBG(xe, ext.pad) ||
2592 XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY))
2593 return -EINVAL;
2594
2595 idx = array_index_nospec(ext.property, ARRAY_SIZE(gem_create_set_property_funcs));
2596 if (!gem_create_set_property_funcs[idx])
2597 return -EINVAL;
2598
2599 return gem_create_set_property_funcs[idx](xe, bo, ext.value);
2600 }
2601
2602 typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe,
2603 struct xe_bo *bo,
2604 u64 extension);
2605
2606 static const xe_gem_create_user_extension_fn gem_create_user_extension_funcs[] = {
2607 [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_user_ext_set_property,
2608 };
2609
2610 #define MAX_USER_EXTENSIONS 16
gem_create_user_extensions(struct xe_device * xe,struct xe_bo * bo,u64 extensions,int ext_number)2611 static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo,
2612 u64 extensions, int ext_number)
2613 {
2614 u64 __user *address = u64_to_user_ptr(extensions);
2615 struct drm_xe_user_extension ext;
2616 int err;
2617 u32 idx;
2618
2619 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
2620 return -E2BIG;
2621
2622 err = copy_from_user(&ext, address, sizeof(ext));
2623 if (XE_IOCTL_DBG(xe, err))
2624 return -EFAULT;
2625
2626 if (XE_IOCTL_DBG(xe, ext.pad) ||
2627 XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs)))
2628 return -EINVAL;
2629
2630 idx = array_index_nospec(ext.name,
2631 ARRAY_SIZE(gem_create_user_extension_funcs));
2632 err = gem_create_user_extension_funcs[idx](xe, bo, extensions);
2633 if (XE_IOCTL_DBG(xe, err))
2634 return err;
2635
2636 if (ext.next_extension)
2637 return gem_create_user_extensions(xe, bo, ext.next_extension,
2638 ++ext_number);
2639
2640 return 0;
2641 }
2642
xe_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2643 int xe_gem_create_ioctl(struct drm_device *dev, void *data,
2644 struct drm_file *file)
2645 {
2646 struct xe_device *xe = to_xe_device(dev);
2647 struct xe_file *xef = to_xe_file(file);
2648 struct drm_xe_gem_create *args = data;
2649 struct xe_vm *vm = NULL;
2650 ktime_t end = 0;
2651 struct xe_bo *bo;
2652 unsigned int bo_flags;
2653 u32 handle;
2654 int err;
2655
2656 if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
2657 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2658 return -EINVAL;
2659
2660 /* at least one valid memory placement must be specified */
2661 if (XE_IOCTL_DBG(xe, (args->placement & ~xe->info.mem_region_mask) ||
2662 !args->placement))
2663 return -EINVAL;
2664
2665 if (XE_IOCTL_DBG(xe, args->flags &
2666 ~(DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING |
2667 DRM_XE_GEM_CREATE_FLAG_SCANOUT |
2668 DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM)))
2669 return -EINVAL;
2670
2671 if (XE_IOCTL_DBG(xe, args->handle))
2672 return -EINVAL;
2673
2674 if (XE_IOCTL_DBG(xe, !args->size))
2675 return -EINVAL;
2676
2677 if (XE_IOCTL_DBG(xe, args->size > SIZE_MAX))
2678 return -EINVAL;
2679
2680 if (XE_IOCTL_DBG(xe, args->size & ~PAGE_MASK))
2681 return -EINVAL;
2682
2683 bo_flags = 0;
2684 if (args->flags & DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING)
2685 bo_flags |= XE_BO_FLAG_DEFER_BACKING;
2686
2687 if (args->flags & DRM_XE_GEM_CREATE_FLAG_SCANOUT)
2688 bo_flags |= XE_BO_FLAG_SCANOUT;
2689
2690 bo_flags |= args->placement << (ffs(XE_BO_FLAG_SYSTEM) - 1);
2691
2692 /* CCS formats need physical placement at a 64K alignment in VRAM. */
2693 if ((bo_flags & XE_BO_FLAG_VRAM_MASK) &&
2694 (bo_flags & XE_BO_FLAG_SCANOUT) &&
2695 !(xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) &&
2696 IS_ALIGNED(args->size, SZ_64K))
2697 bo_flags |= XE_BO_FLAG_NEEDS_64K;
2698
2699 if (args->flags & DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM) {
2700 if (XE_IOCTL_DBG(xe, !(bo_flags & XE_BO_FLAG_VRAM_MASK)))
2701 return -EINVAL;
2702
2703 bo_flags |= XE_BO_FLAG_NEEDS_CPU_ACCESS;
2704 }
2705
2706 if (XE_IOCTL_DBG(xe, !args->cpu_caching ||
2707 args->cpu_caching > DRM_XE_GEM_CPU_CACHING_WC))
2708 return -EINVAL;
2709
2710 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_VRAM_MASK &&
2711 args->cpu_caching != DRM_XE_GEM_CPU_CACHING_WC))
2712 return -EINVAL;
2713
2714 if (XE_IOCTL_DBG(xe, bo_flags & XE_BO_FLAG_SCANOUT &&
2715 args->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB))
2716 return -EINVAL;
2717
2718 if (args->vm_id) {
2719 vm = xe_vm_lookup(xef, args->vm_id);
2720 if (XE_IOCTL_DBG(xe, !vm))
2721 return -ENOENT;
2722 }
2723
2724 retry:
2725 if (vm) {
2726 err = xe_vm_lock(vm, true);
2727 if (err)
2728 goto out_vm;
2729 }
2730
2731 bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching,
2732 bo_flags);
2733
2734 if (vm)
2735 xe_vm_unlock(vm);
2736
2737 if (IS_ERR(bo)) {
2738 err = PTR_ERR(bo);
2739 if (xe_vm_validate_should_retry(NULL, err, &end))
2740 goto retry;
2741 goto out_vm;
2742 }
2743
2744 if (args->extensions) {
2745 err = gem_create_user_extensions(xe, bo, args->extensions, 0);
2746 if (err)
2747 goto out_bulk;
2748 }
2749
2750 err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
2751 if (err)
2752 goto out_bulk;
2753
2754 args->handle = handle;
2755 goto out_put;
2756
2757 out_bulk:
2758 if (vm && !xe_vm_in_fault_mode(vm)) {
2759 xe_vm_lock(vm, false);
2760 __xe_bo_unset_bulk_move(bo);
2761 xe_vm_unlock(vm);
2762 }
2763 out_put:
2764 xe_bo_put(bo);
2765 out_vm:
2766 if (vm)
2767 xe_vm_put(vm);
2768
2769 return err;
2770 }
2771
xe_gem_mmap_offset_ioctl(struct drm_device * dev,void * data,struct drm_file * file)2772 int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
2773 struct drm_file *file)
2774 {
2775 struct xe_device *xe = to_xe_device(dev);
2776 struct drm_xe_gem_mmap_offset *args = data;
2777 struct drm_gem_object *gem_obj;
2778
2779 if (XE_IOCTL_DBG(xe, args->extensions) ||
2780 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
2781 return -EINVAL;
2782
2783 if (XE_IOCTL_DBG(xe, args->flags &
2784 ~DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER))
2785 return -EINVAL;
2786
2787 if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) {
2788 if (XE_IOCTL_DBG(xe, !IS_DGFX(xe)))
2789 return -EINVAL;
2790
2791 if (XE_IOCTL_DBG(xe, args->handle))
2792 return -EINVAL;
2793
2794 if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K))
2795 return -EINVAL;
2796
2797 BUILD_BUG_ON(((XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT) +
2798 SZ_4K) >= DRM_FILE_PAGE_OFFSET_START);
2799 args->offset = XE_PCI_BARRIER_MMAP_OFFSET;
2800 return 0;
2801 }
2802
2803 gem_obj = drm_gem_object_lookup(file, args->handle);
2804 if (XE_IOCTL_DBG(xe, !gem_obj))
2805 return -ENOENT;
2806
2807 /* The mmap offset was set up at BO allocation time. */
2808 args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
2809
2810 xe_bo_put(gem_to_xe_bo(gem_obj));
2811 return 0;
2812 }
2813
2814 /**
2815 * xe_bo_lock() - Lock the buffer object's dma_resv object
2816 * @bo: The struct xe_bo whose lock is to be taken
2817 * @intr: Whether to perform any wait interruptible
2818 *
2819 * Locks the buffer object's dma_resv object. If the buffer object is
2820 * pointing to a shared dma_resv object, that shared lock is locked.
2821 *
2822 * Return: 0 on success, -EINTR if @intr is true and the wait for a
2823 * contended lock was interrupted. If @intr is set to false, the
2824 * function always returns 0.
2825 */
xe_bo_lock(struct xe_bo * bo,bool intr)2826 int xe_bo_lock(struct xe_bo *bo, bool intr)
2827 {
2828 if (intr)
2829 return dma_resv_lock_interruptible(bo->ttm.base.resv, NULL);
2830
2831 dma_resv_lock(bo->ttm.base.resv, NULL);
2832
2833 return 0;
2834 }
2835
2836 /**
2837 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
2838 * @bo: The struct xe_bo whose lock is to be released.
2839 *
2840 * Unlock a buffer object lock that was locked by xe_bo_lock().
2841 */
xe_bo_unlock(struct xe_bo * bo)2842 void xe_bo_unlock(struct xe_bo *bo)
2843 {
2844 dma_resv_unlock(bo->ttm.base.resv);
2845 }
2846
2847 /**
2848 * xe_bo_can_migrate - Whether a buffer object likely can be migrated
2849 * @bo: The buffer object to migrate
2850 * @mem_type: The TTM memory type intended to migrate to
2851 *
2852 * Check whether the buffer object supports migration to the
2853 * given memory type. Note that pinning may affect the ability to migrate as
2854 * returned by this function.
2855 *
2856 * This function is primarily intended as a helper for checking the
2857 * possibility to migrate buffer objects and can be called without
2858 * the object lock held.
2859 *
2860 * Return: true if migration is possible, false otherwise.
2861 */
xe_bo_can_migrate(struct xe_bo * bo,u32 mem_type)2862 bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type)
2863 {
2864 unsigned int cur_place;
2865
2866 if (bo->ttm.type == ttm_bo_type_kernel)
2867 return true;
2868
2869 if (bo->ttm.type == ttm_bo_type_sg)
2870 return false;
2871
2872 for (cur_place = 0; cur_place < bo->placement.num_placement;
2873 cur_place++) {
2874 if (bo->placements[cur_place].mem_type == mem_type)
2875 return true;
2876 }
2877
2878 return false;
2879 }
2880
xe_place_from_ttm_type(u32 mem_type,struct ttm_place * place)2881 static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
2882 {
2883 memset(place, 0, sizeof(*place));
2884 place->mem_type = mem_type;
2885 }
2886
2887 /**
2888 * xe_bo_migrate - Migrate an object to the desired region id
2889 * @bo: The buffer object to migrate.
2890 * @mem_type: The TTM region type to migrate to.
2891 *
2892 * Attempt to migrate the buffer object to the desired memory region. The
2893 * buffer object may not be pinned, and must be locked.
2894 * On successful completion, the object memory type will be updated,
2895 * but an async migration task may not have completed yet, and to
2896 * accomplish that, the object's kernel fences must be signaled with
2897 * the object lock held.
2898 *
2899 * Return: 0 on success. Negative error code on failure. In particular may
2900 * return -EINTR or -ERESTARTSYS if signal pending.
2901 */
xe_bo_migrate(struct xe_bo * bo,u32 mem_type)2902 int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
2903 {
2904 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
2905 struct ttm_operation_ctx ctx = {
2906 .interruptible = true,
2907 .no_wait_gpu = false,
2908 .gfp_retry_mayfail = true,
2909 };
2910 struct ttm_placement placement;
2911 struct ttm_place requested;
2912
2913 xe_bo_assert_held(bo);
2914
2915 if (bo->ttm.resource->mem_type == mem_type)
2916 return 0;
2917
2918 if (xe_bo_is_pinned(bo))
2919 return -EBUSY;
2920
2921 if (!xe_bo_can_migrate(bo, mem_type))
2922 return -EINVAL;
2923
2924 xe_place_from_ttm_type(mem_type, &requested);
2925 placement.num_placement = 1;
2926 placement.placement = &requested;
2927
2928 /*
2929 * Stolen needs to be handled like below VRAM handling if we ever need
2930 * to support it.
2931 */
2932 drm_WARN_ON(&xe->drm, mem_type == XE_PL_STOLEN);
2933
2934 if (mem_type_is_vram(mem_type)) {
2935 u32 c = 0;
2936
2937 add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
2938 }
2939
2940 return ttm_bo_validate(&bo->ttm, &placement, &ctx);
2941 }
2942
2943 /**
2944 * xe_bo_evict - Evict an object to evict placement
2945 * @bo: The buffer object to migrate.
2946 *
2947 * On successful completion, the object memory will be moved to evict
2948 * placement. This function blocks until the object has been fully moved.
2949 *
2950 * Return: 0 on success. Negative error code on failure.
2951 */
xe_bo_evict(struct xe_bo * bo)2952 int xe_bo_evict(struct xe_bo *bo)
2953 {
2954 struct ttm_operation_ctx ctx = {
2955 .interruptible = false,
2956 .no_wait_gpu = false,
2957 .gfp_retry_mayfail = true,
2958 };
2959 struct ttm_placement placement;
2960 int ret;
2961
2962 xe_evict_flags(&bo->ttm, &placement);
2963 ret = ttm_bo_validate(&bo->ttm, &placement, &ctx);
2964 if (ret)
2965 return ret;
2966
2967 dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
2968 false, MAX_SCHEDULE_TIMEOUT);
2969
2970 return 0;
2971 }
2972
2973 /**
2974 * xe_bo_needs_ccs_pages - Whether a bo needs to back up CCS pages when
2975 * placed in system memory.
2976 * @bo: The xe_bo
2977 *
2978 * Return: true if extra pages need to be allocated, false otherwise.
2979 */
xe_bo_needs_ccs_pages(struct xe_bo * bo)2980 bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
2981 {
2982 struct xe_device *xe = xe_bo_device(bo);
2983
2984 if (GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe))
2985 return false;
2986
2987 if (!xe_device_has_flat_ccs(xe) || bo->ttm.type != ttm_bo_type_device)
2988 return false;
2989
2990 /* On discrete GPUs, if the GPU can access this buffer from
2991 * system memory (i.e., it allows XE_PL_TT placement), FlatCCS
2992 * can't be used since there's no CCS storage associated with
2993 * non-VRAM addresses.
2994 */
2995 if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
2996 return false;
2997
2998 /*
2999 * Compression implies coh_none, therefore we know for sure that WB
3000 * memory can't currently use compression, which is likely one of the
3001 * common cases.
3002 */
3003 if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)
3004 return false;
3005
3006 return true;
3007 }
3008
3009 /**
3010 * __xe_bo_release_dummy() - Dummy kref release function
3011 * @kref: The embedded struct kref.
3012 *
3013 * Dummy release function for xe_bo_put_deferred(). Keep off.
3014 */
__xe_bo_release_dummy(struct kref * kref)3015 void __xe_bo_release_dummy(struct kref *kref)
3016 {
3017 }
3018
3019 /**
3020 * xe_bo_put_commit() - Put bos whose put was deferred by xe_bo_put_deferred().
3021 * @deferred: The lockless list used for the call to xe_bo_put_deferred().
3022 *
3023 * Puts all bos whose put was deferred by xe_bo_put_deferred().
3024 * The @deferred list can be either an onstack local list or a global
3025 * shared list used by a workqueue.
3026 */
xe_bo_put_commit(struct llist_head * deferred)3027 void xe_bo_put_commit(struct llist_head *deferred)
3028 {
3029 struct llist_node *freed;
3030 struct xe_bo *bo, *next;
3031
3032 if (!deferred)
3033 return;
3034
3035 freed = llist_del_all(deferred);
3036 if (!freed)
3037 return;
3038
3039 llist_for_each_entry_safe(bo, next, freed, freed)
3040 drm_gem_object_free(&bo->ttm.base.refcount);
3041 }
3042
xe_bo_dev_work_func(struct work_struct * work)3043 static void xe_bo_dev_work_func(struct work_struct *work)
3044 {
3045 struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free);
3046
3047 xe_bo_put_commit(&bo_dev->async_list);
3048 }
3049
3050 /**
3051 * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing
3052 * @bo_dev: The BO dev structure
3053 */
xe_bo_dev_init(struct xe_bo_dev * bo_dev)3054 void xe_bo_dev_init(struct xe_bo_dev *bo_dev)
3055 {
3056 INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func);
3057 }
3058
3059 /**
3060 * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing
3061 * @bo_dev: The BO dev structure
3062 */
xe_bo_dev_fini(struct xe_bo_dev * bo_dev)3063 void xe_bo_dev_fini(struct xe_bo_dev *bo_dev)
3064 {
3065 flush_work(&bo_dev->async_free);
3066 }
3067
xe_bo_put(struct xe_bo * bo)3068 void xe_bo_put(struct xe_bo *bo)
3069 {
3070 struct xe_tile *tile;
3071 u8 id;
3072
3073 might_sleep();
3074 if (bo) {
3075 #ifdef CONFIG_PROC_FS
3076 if (bo->client)
3077 might_lock(&bo->client->bos_lock);
3078 #endif
3079 for_each_tile(tile, xe_bo_device(bo), id)
3080 if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
3081 xe_ggtt_might_lock(bo->ggtt_node[id]->ggtt);
3082 drm_gem_object_put(&bo->ttm.base);
3083 }
3084 }
3085
3086 /**
3087 * xe_bo_dumb_create - Create a dumb bo as backing for a fb
3088 * @file_priv: ...
3089 * @dev: ...
3090 * @args: ...
3091 *
3092 * See dumb_create() hook in include/drm/drm_drv.h
3093 *
3094 * Return: ...
3095 */
xe_bo_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)3096 int xe_bo_dumb_create(struct drm_file *file_priv,
3097 struct drm_device *dev,
3098 struct drm_mode_create_dumb *args)
3099 {
3100 struct xe_device *xe = to_xe_device(dev);
3101 struct xe_bo *bo;
3102 uint32_t handle;
3103 int cpp = DIV_ROUND_UP(args->bpp, 8);
3104 int err;
3105 u32 page_size = max_t(u32, PAGE_SIZE,
3106 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
3107
3108 args->pitch = ALIGN(args->width * cpp, 64);
3109 args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
3110 page_size);
3111
3112 bo = xe_bo_create_user(xe, NULL, NULL, args->size,
3113 DRM_XE_GEM_CPU_CACHING_WC,
3114 XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
3115 XE_BO_FLAG_SCANOUT |
3116 XE_BO_FLAG_NEEDS_CPU_ACCESS);
3117 if (IS_ERR(bo))
3118 return PTR_ERR(bo);
3119
3120 err = drm_gem_handle_create(file_priv, &bo->ttm.base, &handle);
3121 /* drop reference from allocate - handle holds it now */
3122 drm_gem_object_put(&bo->ttm.base);
3123 if (!err)
3124 args->handle = handle;
3125 return err;
3126 }
3127
xe_bo_runtime_pm_release_mmap_offset(struct xe_bo * bo)3128 void xe_bo_runtime_pm_release_mmap_offset(struct xe_bo *bo)
3129 {
3130 struct ttm_buffer_object *tbo = &bo->ttm;
3131 struct ttm_device *bdev = tbo->bdev;
3132
3133 drm_vma_node_unmap(&tbo->base.vma_node, bdev->dev_mapping);
3134
3135 list_del_init(&bo->vram_userfault_link);
3136 }
3137
3138 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
3139 #include "tests/xe_bo.c"
3140 #endif
3141