xref: /linux/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c (revision 72bea132f3680ee51e7ed2cee62892b6f5121909)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021-2022 Intel Corporation
4  * Copyright (C) 2021-2002 Red Hat
5  */
6 
7 #include <drm/drm_managed.h>
8 
9 #include <drm/ttm/ttm_placement.h>
10 #include <drm/ttm/ttm_range_manager.h>
11 
12 #include "xe_bo.h"
13 #include "xe_device.h"
14 #include "xe_gt.h"
15 #include "xe_res_cursor.h"
16 #include "xe_ttm_vram_mgr.h"
17 
18 static inline struct drm_buddy_block *
19 xe_ttm_vram_mgr_first_block(struct list_head *list)
20 {
21 	return list_first_entry_or_null(list, struct drm_buddy_block, link);
22 }
23 
24 static inline bool xe_is_vram_mgr_blocks_contiguous(struct drm_buddy *mm,
25 						    struct list_head *head)
26 {
27 	struct drm_buddy_block *block;
28 	u64 start, size;
29 
30 	block = xe_ttm_vram_mgr_first_block(head);
31 	if (!block)
32 		return false;
33 
34 	while (head != block->link.next) {
35 		start = drm_buddy_block_offset(block);
36 		size = drm_buddy_block_size(mm, block);
37 
38 		block = list_entry(block->link.next, struct drm_buddy_block,
39 				   link);
40 		if (start + size != drm_buddy_block_offset(block))
41 			return false;
42 	}
43 
44 	return true;
45 }
46 
47 static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
48 			       struct ttm_buffer_object *tbo,
49 			       const struct ttm_place *place,
50 			       struct ttm_resource **res)
51 {
52 	struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
53 	struct xe_ttm_vram_mgr_resource *vres;
54 	struct drm_buddy *mm = &mgr->mm;
55 	u64 size, remaining_size, min_page_size;
56 	unsigned long lpfn;
57 	int err;
58 
59 	lpfn = place->lpfn;
60 	if (!lpfn || lpfn > man->size >> PAGE_SHIFT)
61 		lpfn = man->size >> PAGE_SHIFT;
62 
63 	if (tbo->base.size >> PAGE_SHIFT > (lpfn - place->fpfn))
64 		return -E2BIG; /* don't trigger eviction for the impossible */
65 
66 	vres = kzalloc(sizeof(*vres), GFP_KERNEL);
67 	if (!vres)
68 		return -ENOMEM;
69 
70 	ttm_resource_init(tbo, place, &vres->base);
71 
72 	/* bail out quickly if there's likely not enough VRAM for this BO */
73 	if (ttm_resource_manager_usage(man) > man->size) {
74 		err = -ENOSPC;
75 		goto error_fini;
76 	}
77 
78 	INIT_LIST_HEAD(&vres->blocks);
79 
80 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
81 		vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
82 
83 	if (place->fpfn || lpfn != man->size >> PAGE_SHIFT)
84 		vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
85 
86 	if (WARN_ON(!vres->base.size)) {
87 		err = -EINVAL;
88 		goto error_fini;
89 	}
90 	size = vres->base.size;
91 
92 	min_page_size = mgr->default_page_size;
93 	if (tbo->page_alignment)
94 		min_page_size = tbo->page_alignment << PAGE_SHIFT;
95 
96 	if (WARN_ON(min_page_size < mm->chunk_size)) {
97 		err = -EINVAL;
98 		goto error_fini;
99 	}
100 
101 	if (WARN_ON(min_page_size > SZ_2G)) { /* FIXME: sg limit */
102 		err = -EINVAL;
103 		goto error_fini;
104 	}
105 
106 	if (WARN_ON((size > SZ_2G &&
107 		     (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS)))) {
108 		err = -EINVAL;
109 		goto error_fini;
110 	}
111 
112 	if (WARN_ON(!IS_ALIGNED(size, min_page_size))) {
113 		err = -EINVAL;
114 		goto error_fini;
115 	}
116 
117 	mutex_lock(&mgr->lock);
118 	if (lpfn <= mgr->visible_size >> PAGE_SHIFT && size > mgr->visible_avail) {
119 		mutex_unlock(&mgr->lock);
120 		err = -ENOSPC;
121 		goto error_fini;
122 	}
123 
124 	if (place->fpfn + (size >> PAGE_SHIFT) != place->lpfn &&
125 	    place->flags & TTM_PL_FLAG_CONTIGUOUS) {
126 		size = roundup_pow_of_two(size);
127 		min_page_size = size;
128 
129 		lpfn = max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn);
130 	}
131 
132 	remaining_size = size;
133 	do {
134 		/*
135 		 * Limit maximum size to 2GiB due to SG table limitations.
136 		 * FIXME: Should maybe be handled as part of sg construction.
137 		 */
138 		u64 alloc_size = min_t(u64, remaining_size, SZ_2G);
139 
140 		err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
141 					     (u64)lpfn << PAGE_SHIFT,
142 					     alloc_size,
143 					     min_page_size,
144 					     &vres->blocks,
145 					     vres->flags);
146 		if (err)
147 			goto error_free_blocks;
148 
149 		remaining_size -= alloc_size;
150 	} while (remaining_size);
151 
152 	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
153 		if (!drm_buddy_block_trim(mm, vres->base.size, &vres->blocks))
154 			size = vres->base.size;
155 	}
156 
157 	if (lpfn <= mgr->visible_size >> PAGE_SHIFT) {
158 		vres->used_visible_size = size;
159 	} else {
160 		struct drm_buddy_block *block;
161 
162 		list_for_each_entry(block, &vres->blocks, link) {
163 			u64 start = drm_buddy_block_offset(block);
164 
165 			if (start < mgr->visible_size) {
166 				u64 end = start + drm_buddy_block_size(mm, block);
167 
168 				vres->used_visible_size +=
169 					min(end, mgr->visible_size) - start;
170 			}
171 		}
172 	}
173 
174 	mgr->visible_avail -= vres->used_visible_size;
175 	mutex_unlock(&mgr->lock);
176 
177 	if (!(vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) &&
178 	    xe_is_vram_mgr_blocks_contiguous(mm, &vres->blocks))
179 		vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
180 
181 	/*
182 	 * For some kernel objects we still rely on the start when io mapping
183 	 * the object.
184 	 */
185 	if (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) {
186 		struct drm_buddy_block *block = list_first_entry(&vres->blocks,
187 								 typeof(*block),
188 								 link);
189 
190 		vres->base.start = drm_buddy_block_offset(block) >> PAGE_SHIFT;
191 	} else {
192 		vres->base.start = XE_BO_INVALID_OFFSET;
193 	}
194 
195 	*res = &vres->base;
196 	return 0;
197 
198 error_free_blocks:
199 	drm_buddy_free_list(mm, &vres->blocks);
200 	mutex_unlock(&mgr->lock);
201 error_fini:
202 	ttm_resource_fini(man, &vres->base);
203 	kfree(vres);
204 
205 	return err;
206 }
207 
208 static void xe_ttm_vram_mgr_del(struct ttm_resource_manager *man,
209 				struct ttm_resource *res)
210 {
211 	struct xe_ttm_vram_mgr_resource *vres =
212 		to_xe_ttm_vram_mgr_resource(res);
213 	struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
214 	struct drm_buddy *mm = &mgr->mm;
215 
216 	mutex_lock(&mgr->lock);
217 	drm_buddy_free_list(mm, &vres->blocks);
218 	mgr->visible_avail += vres->used_visible_size;
219 	mutex_unlock(&mgr->lock);
220 
221 	ttm_resource_fini(man, res);
222 
223 	kfree(vres);
224 }
225 
226 static void xe_ttm_vram_mgr_debug(struct ttm_resource_manager *man,
227 				  struct drm_printer *printer)
228 {
229 	struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
230 	struct drm_buddy *mm = &mgr->mm;
231 
232 	mutex_lock(&mgr->lock);
233 	drm_printf(printer, "default_page_size: %lluKiB\n",
234 		   mgr->default_page_size >> 10);
235 	drm_printf(printer, "visible_avail: %lluMiB\n",
236 		   (u64)mgr->visible_avail >> 20);
237 	drm_printf(printer, "visible_size: %lluMiB\n",
238 		   (u64)mgr->visible_size >> 20);
239 
240 	drm_buddy_print(mm, printer);
241 	mutex_unlock(&mgr->lock);
242 	drm_printf(printer, "man size:%llu\n", man->size);
243 }
244 
245 static bool xe_ttm_vram_mgr_intersects(struct ttm_resource_manager *man,
246 				       struct ttm_resource *res,
247 				       const struct ttm_place *place,
248 				       size_t size)
249 {
250 	struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
251 	struct xe_ttm_vram_mgr_resource *vres =
252 		to_xe_ttm_vram_mgr_resource(res);
253 	struct drm_buddy *mm = &mgr->mm;
254 	struct drm_buddy_block *block;
255 
256 	if (!place->fpfn && !place->lpfn)
257 		return true;
258 
259 	if (!place->fpfn && place->lpfn == mgr->visible_size >> PAGE_SHIFT)
260 		return vres->used_visible_size > 0;
261 
262 	list_for_each_entry(block, &vres->blocks, link) {
263 		unsigned long fpfn =
264 			drm_buddy_block_offset(block) >> PAGE_SHIFT;
265 		unsigned long lpfn = fpfn +
266 			(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
267 
268 		if (place->fpfn < lpfn && place->lpfn > fpfn)
269 			return true;
270 	}
271 
272 	return false;
273 }
274 
275 static bool xe_ttm_vram_mgr_compatible(struct ttm_resource_manager *man,
276 				       struct ttm_resource *res,
277 				       const struct ttm_place *place,
278 				       size_t size)
279 {
280 	struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
281 	struct xe_ttm_vram_mgr_resource *vres =
282 		to_xe_ttm_vram_mgr_resource(res);
283 	struct drm_buddy *mm = &mgr->mm;
284 	struct drm_buddy_block *block;
285 
286 	if (!place->fpfn && !place->lpfn)
287 		return true;
288 
289 	if (!place->fpfn && place->lpfn == mgr->visible_size >> PAGE_SHIFT)
290 		return vres->used_visible_size == size;
291 
292 	list_for_each_entry(block, &vres->blocks, link) {
293 		unsigned long fpfn =
294 			drm_buddy_block_offset(block) >> PAGE_SHIFT;
295 		unsigned long lpfn = fpfn +
296 			(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
297 
298 		if (fpfn < place->fpfn || lpfn > place->lpfn)
299 			return false;
300 	}
301 
302 	return true;
303 }
304 
305 static const struct ttm_resource_manager_func xe_ttm_vram_mgr_func = {
306 	.alloc	= xe_ttm_vram_mgr_new,
307 	.free	= xe_ttm_vram_mgr_del,
308 	.intersects = xe_ttm_vram_mgr_intersects,
309 	.compatible = xe_ttm_vram_mgr_compatible,
310 	.debug	= xe_ttm_vram_mgr_debug
311 };
312 
313 static void ttm_vram_mgr_fini(struct drm_device *dev, void *arg)
314 {
315 	struct xe_device *xe = to_xe_device(dev);
316 	struct xe_ttm_vram_mgr *mgr = arg;
317 	struct ttm_resource_manager *man = &mgr->manager;
318 
319 	ttm_resource_manager_set_used(man, false);
320 
321 	if (ttm_resource_manager_evict_all(&xe->ttm, man))
322 		return;
323 
324 	WARN_ON_ONCE(mgr->visible_avail != mgr->visible_size);
325 
326 	drm_buddy_fini(&mgr->mm);
327 
328 	ttm_resource_manager_cleanup(&mgr->manager);
329 
330 	ttm_set_driver_manager(&xe->ttm, mgr->mem_type, NULL);
331 
332 	mutex_destroy(&mgr->lock);
333 }
334 
335 int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
336 			   u32 mem_type, u64 size, u64 io_size,
337 			   u64 default_page_size)
338 {
339 	struct ttm_resource_manager *man = &mgr->manager;
340 	int err;
341 
342 	man->func = &xe_ttm_vram_mgr_func;
343 	mgr->mem_type = mem_type;
344 	mutex_init(&mgr->lock);
345 	mgr->default_page_size = default_page_size;
346 	mgr->visible_size = io_size;
347 	mgr->visible_avail = io_size;
348 
349 	ttm_resource_manager_init(man, &xe->ttm, size);
350 	err = drm_buddy_init(&mgr->mm, man->size, default_page_size);
351 	if (err)
352 		return err;
353 
354 	ttm_set_driver_manager(&xe->ttm, mem_type, &mgr->manager);
355 	ttm_resource_manager_set_used(&mgr->manager, true);
356 
357 	return drmm_add_action_or_reset(&xe->drm, ttm_vram_mgr_fini, mgr);
358 }
359 
360 int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr)
361 {
362 	struct xe_device *xe = tile_to_xe(tile);
363 	struct xe_mem_region *vram = &tile->mem.vram;
364 
365 	mgr->vram = vram;
366 	return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id,
367 				      vram->usable_size, vram->io_size,
368 				      PAGE_SIZE);
369 }
370 
371 int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
372 			      struct ttm_resource *res,
373 			      u64 offset, u64 length,
374 			      struct device *dev,
375 			      enum dma_data_direction dir,
376 			      struct sg_table **sgt)
377 {
378 	struct xe_tile *tile = &xe->tiles[res->mem_type - XE_PL_VRAM0];
379 	struct xe_ttm_vram_mgr_resource *vres = to_xe_ttm_vram_mgr_resource(res);
380 	struct xe_res_cursor cursor;
381 	struct scatterlist *sg;
382 	int num_entries = 0;
383 	int i, r;
384 
385 	if (vres->used_visible_size < res->size)
386 		return -EOPNOTSUPP;
387 
388 	*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
389 	if (!*sgt)
390 		return -ENOMEM;
391 
392 	/* Determine the number of DRM_BUDDY blocks to export */
393 	xe_res_first(res, offset, length, &cursor);
394 	while (cursor.remaining) {
395 		num_entries++;
396 		xe_res_next(&cursor, cursor.size);
397 	}
398 
399 	r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
400 	if (r)
401 		goto error_free;
402 
403 	/* Initialize scatterlist nodes of sg_table */
404 	for_each_sgtable_sg((*sgt), sg, i)
405 		sg->length = 0;
406 
407 	/*
408 	 * Walk down DRM_BUDDY blocks to populate scatterlist nodes
409 	 * @note: Use iterator api to get first the DRM_BUDDY block
410 	 * and the number of bytes from it. Access the following
411 	 * DRM_BUDDY block(s) if more buffer needs to exported
412 	 */
413 	xe_res_first(res, offset, length, &cursor);
414 	for_each_sgtable_sg((*sgt), sg, i) {
415 		phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
416 		size_t size = cursor.size;
417 		dma_addr_t addr;
418 
419 		addr = dma_map_resource(dev, phys, size, dir,
420 					DMA_ATTR_SKIP_CPU_SYNC);
421 		r = dma_mapping_error(dev, addr);
422 		if (r)
423 			goto error_unmap;
424 
425 		sg_set_page(sg, NULL, size, 0);
426 		sg_dma_address(sg) = addr;
427 		sg_dma_len(sg) = size;
428 
429 		xe_res_next(&cursor, cursor.size);
430 	}
431 
432 	return 0;
433 
434 error_unmap:
435 	for_each_sgtable_sg((*sgt), sg, i) {
436 		if (!sg->length)
437 			continue;
438 
439 		dma_unmap_resource(dev, sg->dma_address,
440 				   sg->length, dir,
441 				   DMA_ATTR_SKIP_CPU_SYNC);
442 	}
443 	sg_free_table(*sgt);
444 
445 error_free:
446 	kfree(*sgt);
447 	return r;
448 }
449 
450 void xe_ttm_vram_mgr_free_sgt(struct device *dev, enum dma_data_direction dir,
451 			      struct sg_table *sgt)
452 {
453 	struct scatterlist *sg;
454 	int i;
455 
456 	for_each_sgtable_sg(sgt, sg, i)
457 		dma_unmap_resource(dev, sg->dma_address,
458 				   sg->length, dir,
459 				   DMA_ATTR_SKIP_CPU_SYNC);
460 	sg_free_table(sgt);
461 	kfree(sgt);
462 }
463 
464 u64 xe_ttm_vram_get_cpu_visible_size(struct ttm_resource_manager *man)
465 {
466 	struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
467 
468 	return mgr->visible_size;
469 }
470 
471 void xe_ttm_vram_get_used(struct ttm_resource_manager *man,
472 			  u64 *used, u64 *used_visible)
473 {
474 	struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
475 
476 	mutex_lock(&mgr->lock);
477 	*used = mgr->mm.size - mgr->mm.avail;
478 	*used_visible = mgr->visible_size - mgr->visible_avail;
479 	mutex_unlock(&mgr->lock);
480 }
481