xref: /linux/drivers/gpu/drm/xe/xe_sa.c (revision 40286d6379aacfcc053253ef78dc78b09addffda)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_sa.h"
7 
8 #include <linux/kernel.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "xe_bo.h"
13 #include "xe_device_types.h"
14 #include "xe_map.h"
15 
16 static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
17 {
18 	struct xe_sa_manager *sa_manager = arg;
19 	struct xe_bo *bo = sa_manager->bo;
20 
21 	if (!bo) {
22 		drm_err(drm, "no bo for sa manager\n");
23 		return;
24 	}
25 
26 	drm_suballoc_manager_fini(&sa_manager->base);
27 
28 	if (sa_manager->is_iomem)
29 		kvfree(sa_manager->cpu_ptr);
30 
31 	sa_manager->bo = NULL;
32 	sa_manager->shadow = NULL;
33 }
34 
35 /**
36  * __xe_sa_bo_manager_init() - Create and initialize the suballocator
37  * @tile: the &xe_tile where allocate
38  * @size: number of bytes to allocate
39  * @guard: number of bytes to exclude from suballocations
40  * @align: alignment for each suballocated chunk
41  * @flags: flags for suballocator
42  *
43  * Prepares the suballocation manager for suballocations.
44  *
45  * Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure.
46  */
47 struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size,
48 					      u32 guard, u32 align, u32 flags)
49 {
50 	struct xe_device *xe = tile_to_xe(tile);
51 	struct xe_sa_manager *sa_manager;
52 	u32 managed_size;
53 	struct xe_bo *bo;
54 	int ret;
55 
56 	xe_tile_assert(tile, size > guard);
57 	managed_size = size - guard;
58 
59 	sa_manager = drmm_kzalloc(&xe->drm, sizeof(*sa_manager), GFP_KERNEL);
60 	if (!sa_manager)
61 		return ERR_PTR(-ENOMEM);
62 
63 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
64 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
65 					  XE_BO_FLAG_GGTT |
66 					  XE_BO_FLAG_GGTT_INVALIDATE |
67 					  XE_BO_FLAG_PINNED_NORESTORE);
68 	if (IS_ERR(bo)) {
69 		drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
70 			size / SZ_1K, bo);
71 		return ERR_CAST(bo);
72 	}
73 	sa_manager->bo = bo;
74 	sa_manager->is_iomem = bo->vmap.is_iomem;
75 
76 	if (bo->vmap.is_iomem) {
77 		sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
78 		if (!sa_manager->cpu_ptr)
79 			return ERR_PTR(-ENOMEM);
80 	} else {
81 		sa_manager->cpu_ptr = bo->vmap.vaddr;
82 		memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
83 	}
84 
85 	if (flags & XE_SA_BO_MANAGER_FLAG_SHADOW) {
86 		struct xe_bo *shadow;
87 
88 		ret = drmm_mutex_init(&xe->drm, &sa_manager->swap_guard);
89 		if (ret)
90 			return ERR_PTR(ret);
91 
92 		if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
93 			fs_reclaim_acquire(GFP_KERNEL);
94 			might_lock(&sa_manager->swap_guard);
95 			fs_reclaim_release(GFP_KERNEL);
96 		}
97 
98 		shadow = xe_managed_bo_create_pin_map(xe, tile, size,
99 						      XE_BO_FLAG_VRAM_IF_DGFX(tile) |
100 						      XE_BO_FLAG_GGTT |
101 						      XE_BO_FLAG_GGTT_INVALIDATE |
102 						      XE_BO_FLAG_PINNED_NORESTORE);
103 		if (IS_ERR(shadow)) {
104 			drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
105 				size / SZ_1K, shadow);
106 			return ERR_CAST(shadow);
107 		}
108 		sa_manager->shadow = shadow;
109 	}
110 
111 	drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
112 	ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
113 				       sa_manager);
114 	if (ret)
115 		return ERR_PTR(ret);
116 
117 	return sa_manager;
118 }
119 
120 /**
121  * xe_sa_bo_swap_shadow() - Swap the SA BO with shadow BO.
122  * @sa_manager: the XE sub allocator manager
123  *
124  * Swaps the sub-allocator primary buffer object with shadow buffer object.
125  *
126  * Return: None.
127  */
128 void xe_sa_bo_swap_shadow(struct xe_sa_manager *sa_manager)
129 {
130 	struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
131 
132 	xe_assert(xe, sa_manager->shadow);
133 	lockdep_assert_held(&sa_manager->swap_guard);
134 
135 	swap(sa_manager->bo, sa_manager->shadow);
136 	if (!sa_manager->bo->vmap.is_iomem)
137 		sa_manager->cpu_ptr = sa_manager->bo->vmap.vaddr;
138 }
139 
140 /**
141  * xe_sa_bo_sync_shadow() - Sync the SA Shadow BO with primary BO.
142  * @sa_bo: the sub-allocator buffer object.
143  *
144  * Synchronize sub-allocator shadow buffer object with primary buffer object.
145  *
146  * Return: None.
147  */
148 void xe_sa_bo_sync_shadow(struct drm_suballoc *sa_bo)
149 {
150 	struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
151 	struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
152 
153 	xe_assert(xe, sa_manager->shadow);
154 	lockdep_assert_held(&sa_manager->swap_guard);
155 
156 	xe_map_memcpy_to(xe, &sa_manager->shadow->vmap,
157 			 drm_suballoc_soffset(sa_bo),
158 			 xe_sa_bo_cpu_addr(sa_bo),
159 			 drm_suballoc_size(sa_bo));
160 }
161 
162 /**
163  * __xe_sa_bo_new() - Make a suballocation but use custom gfp flags.
164  * @sa_manager: the &xe_sa_manager
165  * @size: number of bytes we want to suballocate
166  * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL.
167  *
168  * Try to make a suballocation of size @size.
169  *
170  * Return: a &drm_suballoc, or an ERR_PTR.
171  */
172 struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp)
173 {
174 	/*
175 	 * BB to large, return -ENOBUFS indicating user should split
176 	 * array of binds into smaller chunks.
177 	 */
178 	if (size > sa_manager->base.size)
179 		return ERR_PTR(-ENOBUFS);
180 
181 	return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0);
182 }
183 
184 /**
185  * xe_sa_bo_alloc() - Allocate uninitialized suballoc object.
186  * @gfp: gfp flags used for memory allocation.
187  *
188  * Allocate memory for an uninitialized suballoc object. Intended usage is
189  * allocate memory for suballoc object outside of a reclaim tainted context
190  * and then be initialized at a later time in a reclaim tainted context.
191  *
192  * Return: a new uninitialized suballoc object, or an ERR_PTR(-ENOMEM).
193  */
194 struct drm_suballoc *xe_sa_bo_alloc(gfp_t gfp)
195 {
196 	return drm_suballoc_alloc(gfp);
197 }
198 
199 /**
200  * xe_sa_bo_init() - Initialize a suballocation.
201  * @sa_manager: pointer to the sa_manager
202  * @sa: The struct drm_suballoc.
203  * @size: number of bytes we want to suballocate.
204  *
205  * Try to make a suballocation on a pre-allocated suballoc object of size @size.
206  *
207  * Return: zero on success, errno on failure.
208  */
209 int xe_sa_bo_init(struct xe_sa_manager *sa_manager, struct drm_suballoc *sa, size_t size)
210 {
211 	return drm_suballoc_insert(&sa_manager->base, sa, size, true, 0);
212 }
213 
214 /**
215  * xe_sa_bo_flush_write() - Copy the data from the sub-allocation to the GPU memory.
216  * @sa_bo: the &drm_suballoc to flush
217  */
218 void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
219 {
220 	struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
221 	struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
222 
223 	if (!sa_manager->bo->vmap.is_iomem)
224 		return;
225 
226 	xe_map_memcpy_to(xe, &sa_manager->bo->vmap, drm_suballoc_soffset(sa_bo),
227 			 xe_sa_bo_cpu_addr(sa_bo),
228 			 drm_suballoc_size(sa_bo));
229 }
230 
231 /**
232  * xe_sa_bo_sync_read() - Copy the data from GPU memory to the sub-allocation.
233  * @sa_bo: the &drm_suballoc to sync
234  */
235 void xe_sa_bo_sync_read(struct drm_suballoc *sa_bo)
236 {
237 	struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
238 	struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
239 
240 	if (!sa_manager->bo->vmap.is_iomem)
241 		return;
242 
243 	xe_map_memcpy_from(xe, xe_sa_bo_cpu_addr(sa_bo), &sa_manager->bo->vmap,
244 			   drm_suballoc_soffset(sa_bo),
245 			   drm_suballoc_size(sa_bo));
246 }
247 
248 void xe_sa_bo_free(struct drm_suballoc *sa_bo,
249 		   struct dma_fence *fence)
250 {
251 	drm_suballoc_free(sa_bo, fence);
252 }
253