xref: /linux/drivers/gpu/drm/xe/xe_res_cursor.h (revision 119ff04864a24470b1e531bb53e5c141aa8fefb0)
1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #ifndef _XE_RES_CURSOR_H_
25 #define _XE_RES_CURSOR_H_
26 
27 #include <linux/scatterlist.h>
28 
29 #include <drm/drm_mm.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_range_manager.h>
32 #include <drm/ttm/ttm_resource.h>
33 #include <drm/ttm/ttm_tt.h>
34 
35 #include "xe_bo.h"
36 #include "xe_device.h"
37 #include "xe_macros.h"
38 #include "xe_ttm_vram_mgr.h"
39 
40 /* state back for walking over vram_mgr, stolen_mgr, and gtt_mgr allocations */
41 struct xe_res_cursor {
42 	u64 start;
43 	u64 size;
44 	u64 remaining;
45 	void *node;
46 	u32 mem_type;
47 	struct scatterlist *sgl;
48 	struct drm_buddy *mm;
49 };
50 
51 static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res)
52 {
53 	struct ttm_resource_manager *mgr;
54 
55 	mgr = ttm_manager_type(res->bo->bdev, res->mem_type);
56 	return &to_xe_ttm_vram_mgr(mgr)->mm;
57 }
58 
59 /**
60  * xe_res_first - initialize a xe_res_cursor
61  *
62  * @res: TTM resource object to walk
63  * @start: Start of the range
64  * @size: Size of the range
65  * @cur: cursor object to initialize
66  *
67  * Start walking over the range of allocations between @start and @size.
68  */
69 static inline void xe_res_first(struct ttm_resource *res,
70 				u64 start, u64 size,
71 				struct xe_res_cursor *cur)
72 {
73 	cur->sgl = NULL;
74 	if (!res)
75 		goto fallback;
76 
77 	XE_WARN_ON(start + size > res->size);
78 
79 	cur->mem_type = res->mem_type;
80 
81 	switch (cur->mem_type) {
82 	case XE_PL_STOLEN:
83 	case XE_PL_VRAM0:
84 	case XE_PL_VRAM1: {
85 		struct drm_buddy_block *block;
86 		struct list_head *head, *next;
87 		struct drm_buddy *mm = xe_res_get_buddy(res);
88 
89 		head = &to_xe_ttm_vram_mgr_resource(res)->blocks;
90 
91 		block = list_first_entry_or_null(head,
92 						 struct drm_buddy_block,
93 						 link);
94 		if (!block)
95 			goto fallback;
96 
97 		while (start >= drm_buddy_block_size(mm, block)) {
98 			start -= drm_buddy_block_size(mm, block);
99 
100 			next = block->link.next;
101 			if (next != head)
102 				block = list_entry(next, struct drm_buddy_block,
103 						   link);
104 		}
105 
106 		cur->mm = mm;
107 		cur->start = drm_buddy_block_offset(block) + start;
108 		cur->size = min(drm_buddy_block_size(mm, block) - start,
109 				size);
110 		cur->remaining = size;
111 		cur->node = block;
112 		break;
113 	}
114 	default:
115 		goto fallback;
116 	}
117 
118 	return;
119 
120 fallback:
121 	cur->start = start;
122 	cur->size = size;
123 	cur->remaining = size;
124 	cur->node = NULL;
125 	cur->mem_type = XE_PL_TT;
126 	XE_WARN_ON(res && start + size > res->size);
127 }
128 
129 static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
130 {
131 	struct scatterlist *sgl = cur->sgl;
132 	u64 start = cur->start;
133 
134 	while (start >= sg_dma_len(sgl)) {
135 		start -= sg_dma_len(sgl);
136 		sgl = sg_next(sgl);
137 		XE_WARN_ON(!sgl);
138 	}
139 
140 	cur->start = start;
141 	cur->size = sg_dma_len(sgl) - start;
142 	cur->sgl = sgl;
143 }
144 
145 /**
146  * xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
147  *
148  * @sg: scatter gather table to walk
149  * @start: Start of the range
150  * @size: Size of the range
151  * @cur: cursor object to initialize
152  *
153  * Start walking over the range of allocations between @start and @size.
154  */
155 static inline void xe_res_first_sg(const struct sg_table *sg,
156 				   u64 start, u64 size,
157 				   struct xe_res_cursor *cur)
158 {
159 	XE_WARN_ON(!sg);
160 	XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
161 		   !IS_ALIGNED(size, PAGE_SIZE));
162 	cur->node = NULL;
163 	cur->start = start;
164 	cur->remaining = size;
165 	cur->size = 0;
166 	cur->sgl = sg->sgl;
167 	cur->mem_type = XE_PL_TT;
168 	__xe_res_sg_next(cur);
169 }
170 
171 /**
172  * xe_res_next - advance the cursor
173  *
174  * @cur: the cursor to advance
175  * @size: number of bytes to move forward
176  *
177  * Move the cursor @size bytes forwrad, walking to the next node if necessary.
178  */
179 static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
180 {
181 	struct drm_buddy_block *block;
182 	struct list_head *next;
183 	u64 start;
184 
185 	XE_WARN_ON(size > cur->remaining);
186 
187 	cur->remaining -= size;
188 	if (!cur->remaining)
189 		return;
190 
191 	if (cur->size > size) {
192 		cur->size -= size;
193 		cur->start += size;
194 		return;
195 	}
196 
197 	if (cur->sgl) {
198 		cur->start += size;
199 		__xe_res_sg_next(cur);
200 		return;
201 	}
202 
203 	switch (cur->mem_type) {
204 	case XE_PL_STOLEN:
205 	case XE_PL_VRAM0:
206 	case XE_PL_VRAM1:
207 		start = size - cur->size;
208 		block = cur->node;
209 
210 		next = block->link.next;
211 		block = list_entry(next, struct drm_buddy_block, link);
212 
213 
214 		while (start >= drm_buddy_block_size(cur->mm, block)) {
215 			start -= drm_buddy_block_size(cur->mm, block);
216 
217 			next = block->link.next;
218 			block = list_entry(next, struct drm_buddy_block, link);
219 		}
220 
221 		cur->start = drm_buddy_block_offset(block) + start;
222 		cur->size = min(drm_buddy_block_size(cur->mm, block) - start,
223 				cur->remaining);
224 		cur->node = block;
225 		break;
226 	default:
227 		return;
228 	}
229 }
230 
231 /**
232  * xe_res_dma - return dma address of cursor at current position
233  *
234  * @cur: the cursor to return the dma address from
235  */
236 static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
237 {
238 	return cur->sgl ? sg_dma_address(cur->sgl) + cur->start : cur->start;
239 }
240 #endif
241