1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_sa.h" 7 8 #include <linux/kernel.h> 9 10 #include <drm/drm_managed.h> 11 12 #include "xe_bo.h" 13 #include "xe_device.h" 14 #include "xe_map.h" 15 16 static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg) 17 { 18 struct xe_sa_manager *sa_manager = arg; 19 struct xe_bo *bo = sa_manager->bo; 20 21 if (!bo) { 22 drm_err(drm, "no bo for sa manager\n"); 23 return; 24 } 25 26 drm_suballoc_manager_fini(&sa_manager->base); 27 28 if (sa_manager->is_iomem) 29 kvfree(sa_manager->cpu_ptr); 30 31 sa_manager->bo = NULL; 32 } 33 34 /** 35 * __xe_sa_bo_manager_init() - Create and initialize the suballocator 36 * @tile: the &xe_tile where allocate 37 * @size: number of bytes to allocate 38 * @guard: number of bytes to exclude from suballocations 39 * @align: alignment for each suballocated chunk 40 * 41 * Prepares the suballocation manager for suballocations. 42 * 43 * Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure. 44 */ 45 struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align) 46 { 47 struct xe_device *xe = tile_to_xe(tile); 48 struct xe_sa_manager *sa_manager; 49 u32 managed_size; 50 struct xe_bo *bo; 51 int ret; 52 53 xe_tile_assert(tile, size > guard); 54 managed_size = size - guard; 55 56 sa_manager = drmm_kzalloc(&xe->drm, sizeof(*sa_manager), GFP_KERNEL); 57 if (!sa_manager) 58 return ERR_PTR(-ENOMEM); 59 60 bo = xe_managed_bo_create_pin_map(xe, tile, size, 61 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 62 XE_BO_FLAG_GGTT | 63 XE_BO_FLAG_GGTT_INVALIDATE | 64 XE_BO_FLAG_PINNED_NORESTORE); 65 if (IS_ERR(bo)) { 66 drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n", 67 size / SZ_1K, bo); 68 return ERR_CAST(bo); 69 } 70 sa_manager->bo = bo; 71 sa_manager->is_iomem = bo->vmap.is_iomem; 72 sa_manager->gpu_addr = xe_bo_ggtt_addr(bo); 73 74 if (bo->vmap.is_iomem) { 75 sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL); 76 if (!sa_manager->cpu_ptr) 77 return ERR_PTR(-ENOMEM); 78 } else { 79 sa_manager->cpu_ptr = bo->vmap.vaddr; 80 memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size); 81 } 82 83 drm_suballoc_manager_init(&sa_manager->base, managed_size, align); 84 ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini, 85 sa_manager); 86 if (ret) 87 return ERR_PTR(ret); 88 89 return sa_manager; 90 } 91 92 /** 93 * __xe_sa_bo_new() - Make a suballocation but use custom gfp flags. 94 * @sa_manager: the &xe_sa_manager 95 * @size: number of bytes we want to suballocate 96 * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL. 97 * 98 * Try to make a suballocation of size @size. 99 * 100 * Return: a &drm_suballoc, or an ERR_PTR. 101 */ 102 struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp) 103 { 104 /* 105 * BB to large, return -ENOBUFS indicating user should split 106 * array of binds into smaller chunks. 107 */ 108 if (size > sa_manager->base.size) 109 return ERR_PTR(-ENOBUFS); 110 111 return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0); 112 } 113 114 void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo) 115 { 116 struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager); 117 struct xe_device *xe = tile_to_xe(sa_manager->bo->tile); 118 119 if (!sa_manager->bo->vmap.is_iomem) 120 return; 121 122 xe_map_memcpy_to(xe, &sa_manager->bo->vmap, drm_suballoc_soffset(sa_bo), 123 xe_sa_bo_cpu_addr(sa_bo), 124 drm_suballoc_size(sa_bo)); 125 } 126 127 void xe_sa_bo_free(struct drm_suballoc *sa_bo, 128 struct dma_fence *fence) 129 { 130 drm_suballoc_free(sa_bo, fence); 131 } 132