xref: /linux/drivers/gpu/drm/xe/xe_res_cursor.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #ifndef _XE_RES_CURSOR_H_
25 #define _XE_RES_CURSOR_H_
26 
27 #include <linux/scatterlist.h>
28 
29 #include <drm/ttm/ttm_placement.h>
30 #include <drm/ttm/ttm_range_manager.h>
31 #include <drm/ttm/ttm_resource.h>
32 #include <drm/ttm/ttm_tt.h>
33 
34 #include "xe_bo.h"
35 #include "xe_device.h"
36 #include "xe_macros.h"
37 #include "xe_ttm_vram_mgr.h"
38 
39 /* state back for walking over vram_mgr, stolen_mgr, and gtt_mgr allocations */
40 struct xe_res_cursor {
41 	u64 start;
42 	u64 size;
43 	u64 remaining;
44 	void *node;
45 	u32 mem_type;
46 	struct scatterlist *sgl;
47 	struct drm_buddy *mm;
48 };
49 
xe_res_get_buddy(struct ttm_resource * res)50 static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res)
51 {
52 	struct ttm_resource_manager *mgr;
53 
54 	mgr = ttm_manager_type(res->bo->bdev, res->mem_type);
55 	return &to_xe_ttm_vram_mgr(mgr)->mm;
56 }
57 
58 /**
59  * xe_res_first - initialize a xe_res_cursor
60  *
61  * @res: TTM resource object to walk
62  * @start: Start of the range
63  * @size: Size of the range
64  * @cur: cursor object to initialize
65  *
66  * Start walking over the range of allocations between @start and @size.
67  */
xe_res_first(struct ttm_resource * res,u64 start,u64 size,struct xe_res_cursor * cur)68 static inline void xe_res_first(struct ttm_resource *res,
69 				u64 start, u64 size,
70 				struct xe_res_cursor *cur)
71 {
72 	cur->sgl = NULL;
73 	if (!res)
74 		goto fallback;
75 
76 	XE_WARN_ON(start + size > res->size);
77 
78 	cur->mem_type = res->mem_type;
79 
80 	switch (cur->mem_type) {
81 	case XE_PL_STOLEN:
82 	case XE_PL_VRAM0:
83 	case XE_PL_VRAM1: {
84 		struct drm_buddy_block *block;
85 		struct list_head *head, *next;
86 		struct drm_buddy *mm = xe_res_get_buddy(res);
87 
88 		head = &to_xe_ttm_vram_mgr_resource(res)->blocks;
89 
90 		block = list_first_entry_or_null(head,
91 						 struct drm_buddy_block,
92 						 link);
93 		if (!block)
94 			goto fallback;
95 
96 		while (start >= drm_buddy_block_size(mm, block)) {
97 			start -= drm_buddy_block_size(mm, block);
98 
99 			next = block->link.next;
100 			if (next != head)
101 				block = list_entry(next, struct drm_buddy_block,
102 						   link);
103 		}
104 
105 		cur->mm = mm;
106 		cur->start = drm_buddy_block_offset(block) + start;
107 		cur->size = min(drm_buddy_block_size(mm, block) - start,
108 				size);
109 		cur->remaining = size;
110 		cur->node = block;
111 		break;
112 	}
113 	default:
114 		goto fallback;
115 	}
116 
117 	return;
118 
119 fallback:
120 	cur->start = start;
121 	cur->size = size;
122 	cur->remaining = size;
123 	cur->node = NULL;
124 	cur->mem_type = XE_PL_TT;
125 	XE_WARN_ON(res && start + size > res->size);
126 }
127 
__xe_res_sg_next(struct xe_res_cursor * cur)128 static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
129 {
130 	struct scatterlist *sgl = cur->sgl;
131 	u64 start = cur->start;
132 
133 	while (start >= sg_dma_len(sgl)) {
134 		start -= sg_dma_len(sgl);
135 		sgl = sg_next(sgl);
136 		XE_WARN_ON(!sgl);
137 	}
138 
139 	cur->start = start;
140 	cur->size = sg_dma_len(sgl) - start;
141 	cur->sgl = sgl;
142 }
143 
144 /**
145  * xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
146  *
147  * @sg: scatter gather table to walk
148  * @start: Start of the range
149  * @size: Size of the range
150  * @cur: cursor object to initialize
151  *
152  * Start walking over the range of allocations between @start and @size.
153  */
xe_res_first_sg(const struct sg_table * sg,u64 start,u64 size,struct xe_res_cursor * cur)154 static inline void xe_res_first_sg(const struct sg_table *sg,
155 				   u64 start, u64 size,
156 				   struct xe_res_cursor *cur)
157 {
158 	XE_WARN_ON(!sg);
159 	cur->node = NULL;
160 	cur->start = start;
161 	cur->remaining = size;
162 	cur->size = 0;
163 	cur->sgl = sg->sgl;
164 	cur->mem_type = XE_PL_TT;
165 	__xe_res_sg_next(cur);
166 }
167 
168 /**
169  * xe_res_next - advance the cursor
170  *
171  * @cur: the cursor to advance
172  * @size: number of bytes to move forward
173  *
174  * Move the cursor @size bytes forwrad, walking to the next node if necessary.
175  */
xe_res_next(struct xe_res_cursor * cur,u64 size)176 static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
177 {
178 	struct drm_buddy_block *block;
179 	struct list_head *next;
180 	u64 start;
181 
182 	XE_WARN_ON(size > cur->remaining);
183 
184 	cur->remaining -= size;
185 	if (!cur->remaining)
186 		return;
187 
188 	if (cur->size > size) {
189 		cur->size -= size;
190 		cur->start += size;
191 		return;
192 	}
193 
194 	if (cur->sgl) {
195 		cur->start += size;
196 		__xe_res_sg_next(cur);
197 		return;
198 	}
199 
200 	switch (cur->mem_type) {
201 	case XE_PL_STOLEN:
202 	case XE_PL_VRAM0:
203 	case XE_PL_VRAM1:
204 		start = size - cur->size;
205 		block = cur->node;
206 
207 		next = block->link.next;
208 		block = list_entry(next, struct drm_buddy_block, link);
209 
210 
211 		while (start >= drm_buddy_block_size(cur->mm, block)) {
212 			start -= drm_buddy_block_size(cur->mm, block);
213 
214 			next = block->link.next;
215 			block = list_entry(next, struct drm_buddy_block, link);
216 		}
217 
218 		cur->start = drm_buddy_block_offset(block) + start;
219 		cur->size = min(drm_buddy_block_size(cur->mm, block) - start,
220 				cur->remaining);
221 		cur->node = block;
222 		break;
223 	default:
224 		return;
225 	}
226 }
227 
228 /**
229  * xe_res_dma - return dma address of cursor at current position
230  *
231  * @cur: the cursor to return the dma address from
232  */
xe_res_dma(const struct xe_res_cursor * cur)233 static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
234 {
235 	return cur->sgl ? sg_dma_address(cur->sgl) + cur->start : cur->start;
236 }
237 #endif
238