xref: /linux/drivers/gpu/drm/xe/xe_sa.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_sa.h"
7 
8 #include <linux/kernel.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "xe_bo.h"
13 #include "xe_device.h"
14 #include "xe_map.h"
15 
16 static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
17 {
18 	struct xe_sa_manager *sa_manager = arg;
19 	struct xe_bo *bo = sa_manager->bo;
20 
21 	if (!bo) {
22 		drm_err(drm, "no bo for sa manager\n");
23 		return;
24 	}
25 
26 	drm_suballoc_manager_fini(&sa_manager->base);
27 
28 	if (sa_manager->is_iomem)
29 		kvfree(sa_manager->cpu_ptr);
30 
31 	sa_manager->bo = NULL;
32 }
33 
34 /**
35  * __xe_sa_bo_manager_init() - Create and initialize the suballocator
36  * @tile: the &xe_tile where allocate
37  * @size: number of bytes to allocate
38  * @guard: number of bytes to exclude from suballocations
39  * @align: alignment for each suballocated chunk
40  *
41  * Prepares the suballocation manager for suballocations.
42  *
43  * Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure.
44  */
45 struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align)
46 {
47 	struct xe_device *xe = tile_to_xe(tile);
48 	struct xe_sa_manager *sa_manager;
49 	u32 managed_size;
50 	struct xe_bo *bo;
51 	int ret;
52 
53 	xe_tile_assert(tile, size > guard);
54 	managed_size = size - guard;
55 
56 	sa_manager = drmm_kzalloc(&xe->drm, sizeof(*sa_manager), GFP_KERNEL);
57 	if (!sa_manager)
58 		return ERR_PTR(-ENOMEM);
59 
60 	bo = xe_managed_bo_create_pin_map(xe, tile, size,
61 					  XE_BO_FLAG_VRAM_IF_DGFX(tile) |
62 					  XE_BO_FLAG_GGTT |
63 					  XE_BO_FLAG_GGTT_INVALIDATE |
64 					  XE_BO_FLAG_PINNED_NORESTORE);
65 	if (IS_ERR(bo)) {
66 		drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
67 			size / SZ_1K, bo);
68 		return ERR_CAST(bo);
69 	}
70 	sa_manager->bo = bo;
71 	sa_manager->is_iomem = bo->vmap.is_iomem;
72 
73 	if (bo->vmap.is_iomem) {
74 		sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
75 		if (!sa_manager->cpu_ptr)
76 			return ERR_PTR(-ENOMEM);
77 	} else {
78 		sa_manager->cpu_ptr = bo->vmap.vaddr;
79 		memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
80 	}
81 
82 	drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
83 	ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
84 				       sa_manager);
85 	if (ret)
86 		return ERR_PTR(ret);
87 
88 	return sa_manager;
89 }
90 
91 /**
92  * __xe_sa_bo_new() - Make a suballocation but use custom gfp flags.
93  * @sa_manager: the &xe_sa_manager
94  * @size: number of bytes we want to suballocate
95  * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL.
96  *
97  * Try to make a suballocation of size @size.
98  *
99  * Return: a &drm_suballoc, or an ERR_PTR.
100  */
101 struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp)
102 {
103 	/*
104 	 * BB to large, return -ENOBUFS indicating user should split
105 	 * array of binds into smaller chunks.
106 	 */
107 	if (size > sa_manager->base.size)
108 		return ERR_PTR(-ENOBUFS);
109 
110 	return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0);
111 }
112 
113 /**
114  * xe_sa_bo_flush_write() - Copy the data from the sub-allocation to the GPU memory.
115  * @sa_bo: the &drm_suballoc to flush
116  */
117 void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
118 {
119 	struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
120 	struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
121 
122 	if (!sa_manager->bo->vmap.is_iomem)
123 		return;
124 
125 	xe_map_memcpy_to(xe, &sa_manager->bo->vmap, drm_suballoc_soffset(sa_bo),
126 			 xe_sa_bo_cpu_addr(sa_bo),
127 			 drm_suballoc_size(sa_bo));
128 }
129 
130 /**
131  * xe_sa_bo_sync_read() - Copy the data from GPU memory to the sub-allocation.
132  * @sa_bo: the &drm_suballoc to sync
133  */
134 void xe_sa_bo_sync_read(struct drm_suballoc *sa_bo)
135 {
136 	struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
137 	struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
138 
139 	if (!sa_manager->bo->vmap.is_iomem)
140 		return;
141 
142 	xe_map_memcpy_from(xe, xe_sa_bo_cpu_addr(sa_bo), &sa_manager->bo->vmap,
143 			   drm_suballoc_soffset(sa_bo),
144 			   drm_suballoc_size(sa_bo));
145 }
146 
147 void xe_sa_bo_free(struct drm_suballoc *sa_bo,
148 		   struct dma_fence *fence)
149 {
150 	drm_suballoc_free(sa_bo, fence);
151 }
152