xref: /linux/drivers/gpu/drm/xe/xe_guc_buf.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <linux/cleanup.h>
7 #include <drm/drm_managed.h>
8 
9 #include "xe_bo.h"
10 #include "xe_gt_printk.h"
11 #include "xe_guc.h"
12 #include "xe_guc_buf.h"
13 #include "xe_sa.h"
14 
15 #define XE_GUC_BUF_CACHE_DEFAULT_SIZE SZ_8K
16 
17 static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache)
18 {
19 	return container_of(cache, struct xe_guc, buf);
20 }
21 
22 static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache)
23 {
24 	return guc_to_gt(cache_to_guc(cache));
25 }
26 
27 static int guc_buf_cache_init(struct xe_guc_buf_cache *cache, u32 size)
28 {
29 	struct xe_gt *gt = cache_to_gt(cache);
30 	struct xe_sa_manager *sam;
31 
32 	sam = __xe_sa_bo_manager_init(gt_to_tile(gt), size, 0, sizeof(u32), 0);
33 	if (IS_ERR(sam))
34 		return PTR_ERR(sam);
35 	cache->sam = sam;
36 
37 	xe_gt_dbg(gt, "reusable buffer with %u dwords at %#x for %ps\n",
38 		  xe_guc_buf_cache_dwords(cache), xe_bo_ggtt_addr(sam->bo),
39 		  __builtin_return_address(0));
40 	return 0;
41 }
42 
43 /**
44  * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache.
45  * @cache: the &xe_guc_buf_cache to initialize
46  *
47  * The Buffer Cache allows to obtain a reusable buffer that can be used to pass
48  * data to GuC or read data from GuC without a need to create a ad-hoc allocation.
49  *
50  * Return: 0 on success or a negative error code on failure.
51  */
52 int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
53 {
54 	return guc_buf_cache_init(cache, XE_GUC_BUF_CACHE_DEFAULT_SIZE);
55 }
56 
57 /**
58  * xe_guc_buf_cache_init_with_size() - Initialize the GuC Buffer Cache.
59  * @cache: the &xe_guc_buf_cache to initialize
60  * @size: size in bytes
61  *
62  * Like xe_guc_buf_cache_init(), except it allows the caller to make the cache
63  * buffer larger, allowing to accommodate larger objects.
64  *
65  * Return: 0 on success or a negative error code on failure.
66  */
67 int xe_guc_buf_cache_init_with_size(struct xe_guc_buf_cache *cache, u32 size)
68 {
69 	return guc_buf_cache_init(cache, max(XE_GUC_BUF_CACHE_DEFAULT_SIZE, size));
70 }
71 
72 /**
73  * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports.
74  * @cache: the &xe_guc_buf_cache to query
75  *
76  * Return: a size of the largest reusable buffer (in dwords)
77  */
78 u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache)
79 {
80 	return cache->sam ? cache->sam->base.size / sizeof(u32) : 0;
81 }
82 
83 /**
84  * xe_guc_buf_reserve() - Reserve a new sub-allocation.
85  * @cache: the &xe_guc_buf_cache where reserve sub-allocation
86  * @dwords: the requested size of the buffer in dwords
87  *
88  * Use xe_guc_buf_is_valid() to check if returned buffer reference is valid.
89  * Must use xe_guc_buf_release() to release a sub-allocation.
90  *
91  * Return: a &xe_guc_buf of new sub-allocation.
92  */
93 struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords)
94 {
95 	struct drm_suballoc *sa;
96 
97 	if (cache->sam)
98 		sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(u32), GFP_ATOMIC);
99 	else
100 		sa = ERR_PTR(-EOPNOTSUPP);
101 
102 	return (struct xe_guc_buf){ .sa = sa };
103 }
104 
105 /**
106  * xe_guc_buf_from_data() - Reserve a new sub-allocation using data.
107  * @cache: the &xe_guc_buf_cache where reserve sub-allocation
108  * @data: the data to flush the sub-allocation
109  * @size: the size of the data
110  *
111  * Similar to xe_guc_buf_reserve() but flushes @data to the GPU memory.
112  *
113  * Return: a &xe_guc_buf of new sub-allocation.
114  */
115 struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache,
116 				       const void *data, size_t size)
117 {
118 	struct drm_suballoc *sa;
119 
120 	sa = __xe_sa_bo_new(cache->sam, size, GFP_ATOMIC);
121 	if (!IS_ERR(sa))
122 		memcpy(xe_sa_bo_cpu_addr(sa), data, size);
123 
124 	return (struct xe_guc_buf){ .sa = sa };
125 }
126 
127 /**
128  * xe_guc_buf_release() - Release a sub-allocation.
129  * @buf: the &xe_guc_buf to release
130  *
131  * Releases a sub-allocation reserved by the xe_guc_buf_reserve().
132  */
133 void xe_guc_buf_release(const struct xe_guc_buf buf)
134 {
135 	if (xe_guc_buf_is_valid(buf))
136 		xe_sa_bo_free(buf.sa, NULL);
137 }
138 
139 /**
140  * xe_guc_buf_sync_read() - Copy the data from the GPU memory to the sub-allocation.
141  * @buf: the &xe_guc_buf to sync
142  *
143  * Return: a CPU pointer of the sub-allocation.
144  */
145 void *xe_guc_buf_sync_read(const struct xe_guc_buf buf)
146 {
147 	xe_sa_bo_sync_read(buf.sa);
148 
149 	return xe_sa_bo_cpu_addr(buf.sa);
150 }
151 
152 /**
153  * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory.
154  * @buf: the &xe_guc_buf to flush
155  *
156  * Return: a GPU address of the sub-allocation.
157  */
158 u64 xe_guc_buf_flush(const struct xe_guc_buf buf)
159 {
160 	xe_sa_bo_flush_write(buf.sa);
161 	return xe_sa_bo_gpu_addr(buf.sa);
162 }
163 
164 /**
165  * xe_guc_buf_cpu_ptr() - Obtain a CPU pointer to the sub-allocation.
166  * @buf: the &xe_guc_buf to query
167  *
168  * Return: a CPU pointer of the sub-allocation.
169  */
170 void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf)
171 {
172 	return xe_sa_bo_cpu_addr(buf.sa);
173 }
174 
175 /**
176  * xe_guc_buf_gpu_addr() - Obtain a GPU address of the sub-allocation.
177  * @buf: the &xe_guc_buf to query
178  *
179  * Return: a GPU address of the sub-allocation.
180  */
181 u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf)
182 {
183 	return xe_sa_bo_gpu_addr(buf.sa);
184 }
185 
186 /**
187  * xe_guc_cache_gpu_addr_from_ptr() - Lookup a GPU address using the pointer.
188  * @cache: the &xe_guc_buf_cache with sub-allocations
189  * @ptr: the CPU pointer of the sub-allocation
190  * @size: the size of the data
191  *
192  * Return: a GPU address on success or 0 if the pointer was unrelated.
193  */
194 u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size)
195 {
196 	ptrdiff_t offset = ptr - cache->sam->cpu_ptr;
197 
198 	if (offset < 0 || offset + size > cache->sam->base.size)
199 		return 0;
200 
201 	return xe_sa_manager_gpu_addr(cache->sam) + offset;
202 }
203 
204 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
205 #include "tests/xe_guc_buf_kunit.c"
206 #endif
207