xref: /linux/drivers/gpu/drm/xe/xe_guc_buf.c (revision c31f4aa8fed048fa70e742c4bb49bb48dc489ab3)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <linux/cleanup.h>
7 #include <drm/drm_managed.h>
8 
9 #include "xe_assert.h"
10 #include "xe_bo.h"
11 #include "xe_gt_printk.h"
12 #include "xe_guc.h"
13 #include "xe_guc_buf.h"
14 #include "xe_sa.h"
15 
16 #define XE_GUC_BUF_CACHE_DEFAULT_SIZE SZ_8K
17 
18 static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache)
19 {
20 	return container_of(cache, struct xe_guc, buf);
21 }
22 
23 static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache)
24 {
25 	return guc_to_gt(cache_to_guc(cache));
26 }
27 
28 static int guc_buf_cache_init(struct xe_guc_buf_cache *cache, u32 size)
29 {
30 	struct xe_gt *gt = cache_to_gt(cache);
31 	struct xe_sa_manager *sam;
32 
33 	sam = __xe_sa_bo_manager_init(gt_to_tile(gt), size, 0, sizeof(u32));
34 	if (IS_ERR(sam))
35 		return PTR_ERR(sam);
36 	cache->sam = sam;
37 
38 	xe_gt_dbg(gt, "reusable buffer with %u dwords at %#x for %ps\n",
39 		  xe_guc_buf_cache_dwords(cache), xe_bo_ggtt_addr(sam->bo),
40 		  __builtin_return_address(0));
41 	return 0;
42 }
43 
44 /**
45  * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache.
46  * @cache: the &xe_guc_buf_cache to initialize
47  *
48  * The Buffer Cache allows to obtain a reusable buffer that can be used to pass
49  * data to GuC or read data from GuC without a need to create a ad-hoc allocation.
50  *
51  * Return: 0 on success or a negative error code on failure.
52  */
53 int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
54 {
55 	return guc_buf_cache_init(cache, XE_GUC_BUF_CACHE_DEFAULT_SIZE);
56 }
57 
58 /**
59  * xe_guc_buf_cache_init_with_size() - Initialize the GuC Buffer Cache.
60  * @cache: the &xe_guc_buf_cache to initialize
61  * @size: size in bytes
62  *
63  * Like xe_guc_buf_cache_init(), except it allows the caller to make the cache
64  * buffer larger, allowing to accommodate larger objects.
65  *
66  * Return: 0 on success or a negative error code on failure.
67  */
68 int xe_guc_buf_cache_init_with_size(struct xe_guc_buf_cache *cache, u32 size)
69 {
70 	return guc_buf_cache_init(cache, max(XE_GUC_BUF_CACHE_DEFAULT_SIZE, size));
71 }
72 
73 /**
74  * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports.
75  * @cache: the &xe_guc_buf_cache to query
76  *
77  * Return: a size of the largest reusable buffer (in dwords)
78  */
79 u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache)
80 {
81 	return cache->sam ? cache->sam->base.size / sizeof(u32) : 0;
82 }
83 
84 /**
85  * xe_guc_buf_reserve() - Reserve a new sub-allocation.
86  * @cache: the &xe_guc_buf_cache where reserve sub-allocation
87  * @dwords: the requested size of the buffer in dwords
88  *
89  * Use xe_guc_buf_is_valid() to check if returned buffer reference is valid.
90  * Must use xe_guc_buf_release() to release a sub-allocation.
91  *
92  * Return: a &xe_guc_buf of new sub-allocation.
93  */
94 struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords)
95 {
96 	struct drm_suballoc *sa;
97 
98 	if (cache->sam)
99 		sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(u32), GFP_ATOMIC);
100 	else
101 		sa = ERR_PTR(-EOPNOTSUPP);
102 
103 	return (struct xe_guc_buf){ .sa = sa };
104 }
105 
106 /**
107  * xe_guc_buf_from_data() - Reserve a new sub-allocation using data.
108  * @cache: the &xe_guc_buf_cache where reserve sub-allocation
109  * @data: the data to flush the sub-allocation
110  * @size: the size of the data
111  *
112  * Similar to xe_guc_buf_reserve() but flushes @data to the GPU memory.
113  *
114  * Return: a &xe_guc_buf of new sub-allocation.
115  */
116 struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache,
117 				       const void *data, size_t size)
118 {
119 	struct drm_suballoc *sa;
120 
121 	sa = __xe_sa_bo_new(cache->sam, size, GFP_ATOMIC);
122 	if (!IS_ERR(sa))
123 		memcpy(xe_sa_bo_cpu_addr(sa), data, size);
124 
125 	return (struct xe_guc_buf){ .sa = sa };
126 }
127 
128 /**
129  * xe_guc_buf_release() - Release a sub-allocation.
130  * @buf: the &xe_guc_buf to release
131  *
132  * Releases a sub-allocation reserved by the xe_guc_buf_reserve().
133  */
134 void xe_guc_buf_release(const struct xe_guc_buf buf)
135 {
136 	if (xe_guc_buf_is_valid(buf))
137 		xe_sa_bo_free(buf.sa, NULL);
138 }
139 
140 /**
141  * xe_guc_buf_sync_read() - Copy the data from the GPU memory to the sub-allocation.
142  * @buf: the &xe_guc_buf to sync
143  *
144  * Return: a CPU pointer of the sub-allocation.
145  */
146 void *xe_guc_buf_sync_read(const struct xe_guc_buf buf)
147 {
148 	xe_sa_bo_sync_read(buf.sa);
149 
150 	return xe_sa_bo_cpu_addr(buf.sa);
151 }
152 
153 /**
154  * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory.
155  * @buf: the &xe_guc_buf to flush
156  *
157  * Return: a GPU address of the sub-allocation.
158  */
159 u64 xe_guc_buf_flush(const struct xe_guc_buf buf)
160 {
161 	xe_sa_bo_flush_write(buf.sa);
162 	return xe_sa_bo_gpu_addr(buf.sa);
163 }
164 
165 /**
166  * xe_guc_buf_cpu_ptr() - Obtain a CPU pointer to the sub-allocation.
167  * @buf: the &xe_guc_buf to query
168  *
169  * Return: a CPU pointer of the sub-allocation.
170  */
171 void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf)
172 {
173 	return xe_sa_bo_cpu_addr(buf.sa);
174 }
175 
176 /**
177  * xe_guc_buf_gpu_addr() - Obtain a GPU address of the sub-allocation.
178  * @buf: the &xe_guc_buf to query
179  *
180  * Return: a GPU address of the sub-allocation.
181  */
182 u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf)
183 {
184 	return xe_sa_bo_gpu_addr(buf.sa);
185 }
186 
187 /**
188  * xe_guc_cache_gpu_addr_from_ptr() - Lookup a GPU address using the pointer.
189  * @cache: the &xe_guc_buf_cache with sub-allocations
190  * @ptr: the CPU pointer of the sub-allocation
191  * @size: the size of the data
192  *
193  * Return: a GPU address on success or 0 if the pointer was unrelated.
194  */
195 u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size)
196 {
197 	ptrdiff_t offset = ptr - cache->sam->cpu_ptr;
198 
199 	if (offset < 0 || offset + size > cache->sam->base.size)
200 		return 0;
201 
202 	return xe_sa_manager_gpu_addr(cache->sam) + offset;
203 }
204 
205 #if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
206 #include "tests/xe_guc_buf_kunit.c"
207 #endif
208