xref: /linux/drivers/gpu/drm/xe/xe_res_cursor.h (revision 08516de501fae647fb29bf3b62718de56cc24014)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #ifndef _XE_RES_CURSOR_H_
25 #define _XE_RES_CURSOR_H_
26 
27 #include <linux/scatterlist.h>
28 
29 #include <drm/drm_mm.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_range_manager.h>
32 #include <drm/ttm/ttm_resource.h>
33 #include <drm/ttm/ttm_tt.h>
34 
35 #include "xe_bo.h"
36 #include "xe_device.h"
37 #include "xe_macros.h"
38 #include "xe_ttm_vram_mgr.h"
39 
40 /* state back for walking over vram_mgr, stolen_mgr, and gtt_mgr allocations */
41 struct xe_res_cursor {
42 	u64 start;
43 	u64 size;
44 	u64 remaining;
45 	void *node;
46 	u32 mem_type;
47 	struct scatterlist *sgl;
48 	struct drm_buddy *mm;
49 };
50 
51 static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res)
52 {
53 	struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
54 
55 	if (res->mem_type != XE_PL_STOLEN) {
56 		return &xe->tiles[res->mem_type - XE_PL_VRAM0].mem.vram_mgr->mm;
57 	} else {
58 		struct ttm_resource_manager *mgr =
59 			ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
60 
61 		return &to_xe_ttm_vram_mgr(mgr)->mm;
62 	}
63 }
64 
65 /**
66  * xe_res_first - initialize a xe_res_cursor
67  *
68  * @res: TTM resource object to walk
69  * @start: Start of the range
70  * @size: Size of the range
71  * @cur: cursor object to initialize
72  *
73  * Start walking over the range of allocations between @start and @size.
74  */
75 static inline void xe_res_first(struct ttm_resource *res,
76 				u64 start, u64 size,
77 				struct xe_res_cursor *cur)
78 {
79 	cur->sgl = NULL;
80 	if (!res)
81 		goto fallback;
82 
83 	XE_BUG_ON(start + size > res->size);
84 
85 	cur->mem_type = res->mem_type;
86 
87 	switch (cur->mem_type) {
88 	case XE_PL_STOLEN:
89 	case XE_PL_VRAM0:
90 	case XE_PL_VRAM1: {
91 		struct drm_buddy_block *block;
92 		struct list_head *head, *next;
93 		struct drm_buddy *mm = xe_res_get_buddy(res);
94 
95 		head = &to_xe_ttm_vram_mgr_resource(res)->blocks;
96 
97 		block = list_first_entry_or_null(head,
98 						 struct drm_buddy_block,
99 						 link);
100 		if (!block)
101 			goto fallback;
102 
103 		while (start >= drm_buddy_block_size(mm, block)) {
104 			start -= drm_buddy_block_size(mm, block);
105 
106 			next = block->link.next;
107 			if (next != head)
108 				block = list_entry(next, struct drm_buddy_block,
109 						   link);
110 		}
111 
112 		cur->mm = mm;
113 		cur->start = drm_buddy_block_offset(block) + start;
114 		cur->size = min(drm_buddy_block_size(mm, block) - start,
115 				size);
116 		cur->remaining = size;
117 		cur->node = block;
118 		break;
119 	}
120 	default:
121 		goto fallback;
122 	}
123 
124 	return;
125 
126 fallback:
127 	cur->start = start;
128 	cur->size = size;
129 	cur->remaining = size;
130 	cur->node = NULL;
131 	cur->mem_type = XE_PL_TT;
132 	XE_WARN_ON(res && start + size > res->size);
133 	return;
134 }
135 
136 static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
137 {
138 	struct scatterlist *sgl = cur->sgl;
139 	u64 start = cur->start;
140 
141 	while (start >= sg_dma_len(sgl)) {
142 		start -= sg_dma_len(sgl);
143 		sgl = sg_next(sgl);
144 		XE_BUG_ON(!sgl);
145 	}
146 
147 	cur->start = start;
148 	cur->size = sg_dma_len(sgl) - start;
149 	cur->sgl = sgl;
150 }
151 
152 /**
153  * xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
154  *
155  * @sg: scatter gather table to walk
156  * @start: Start of the range
157  * @size: Size of the range
158  * @cur: cursor object to initialize
159  *
160  * Start walking over the range of allocations between @start and @size.
161  */
162 static inline void xe_res_first_sg(const struct sg_table *sg,
163 				   u64 start, u64 size,
164 				   struct xe_res_cursor *cur)
165 {
166 	XE_BUG_ON(!sg);
167 	XE_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
168 		  !IS_ALIGNED(size, PAGE_SIZE));
169 	cur->node = NULL;
170 	cur->start = start;
171 	cur->remaining = size;
172 	cur->size = 0;
173 	cur->sgl = sg->sgl;
174 	cur->mem_type = XE_PL_TT;
175 	__xe_res_sg_next(cur);
176 }
177 
178 /**
179  * xe_res_next - advance the cursor
180  *
181  * @cur: the cursor to advance
182  * @size: number of bytes to move forward
183  *
184  * Move the cursor @size bytes forwrad, walking to the next node if necessary.
185  */
186 static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
187 {
188 	struct drm_buddy_block *block;
189 	struct list_head *next;
190 	u64 start;
191 
192 	XE_BUG_ON(size > cur->remaining);
193 
194 	cur->remaining -= size;
195 	if (!cur->remaining)
196 		return;
197 
198 	if (cur->size > size) {
199 		cur->size -= size;
200 		cur->start += size;
201 		return;
202 	}
203 
204 	if (cur->sgl) {
205 		cur->start += size;
206 		__xe_res_sg_next(cur);
207 		return;
208 	}
209 
210 	switch (cur->mem_type) {
211 	case XE_PL_STOLEN:
212 	case XE_PL_VRAM0:
213 	case XE_PL_VRAM1:
214 		start = size - cur->size;
215 		block = cur->node;
216 
217 		next = block->link.next;
218 		block = list_entry(next, struct drm_buddy_block, link);
219 
220 
221 		while (start >= drm_buddy_block_size(cur->mm, block)) {
222 			start -= drm_buddy_block_size(cur->mm, block);
223 
224 			next = block->link.next;
225 			block = list_entry(next, struct drm_buddy_block, link);
226 		}
227 
228 		cur->start = drm_buddy_block_offset(block) + start;
229 		cur->size = min(drm_buddy_block_size(cur->mm, block) - start,
230 				cur->remaining);
231 		cur->node = block;
232 		break;
233 	default:
234 		return;
235 	}
236 }
237 
238 /**
239  * xe_res_dma - return dma address of cursor at current position
240  *
241  * @cur: the cursor to return the dma address from
242  */
243 static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
244 {
245 	return cur->sgl ? sg_dma_address(cur->sgl) + cur->start : cur->start;
246 }
247 #endif
248