xref: /linux/drivers/gpu/drm/xe/xe_res_cursor.h (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 #ifndef __XE_RES_CURSOR_H__
25 #define __XE_RES_CURSOR_H__
26 
27 #include <linux/scatterlist.h>
28 
29 #include <drm/drm_mm.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_range_manager.h>
32 #include <drm/ttm/ttm_resource.h>
33 #include <drm/ttm/ttm_tt.h>
34 
35 #include "xe_bo.h"
36 #include "xe_macros.h"
37 #include "xe_ttm_vram_mgr.h"
38 
39 /* state back for walking over vram_mgr and gtt_mgr allocations */
40 struct xe_res_cursor {
41 	u64 start;
42 	u64 size;
43 	u64 remaining;
44 	void *node;
45 	u32 mem_type;
46 	struct scatterlist *sgl;
47 };
48 
49 /**
50  * xe_res_first - initialize a xe_res_cursor
51  *
52  * @res: TTM resource object to walk
53  * @start: Start of the range
54  * @size: Size of the range
55  * @cur: cursor object to initialize
56  *
57  * Start walking over the range of allocations between @start and @size.
58  */
59 static inline void xe_res_first(struct ttm_resource *res,
60 				u64 start, u64 size,
61 				struct xe_res_cursor *cur)
62 {
63 	struct drm_buddy_block *block;
64 	struct list_head *head, *next;
65 
66 	cur->sgl = NULL;
67 	if (!res)
68 		goto fallback;
69 
70 	XE_BUG_ON(start + size > res->size);
71 
72 	cur->mem_type = res->mem_type;
73 
74 	switch (cur->mem_type) {
75 	case XE_PL_VRAM0:
76 	case XE_PL_VRAM1:
77 		head = &to_xe_ttm_vram_mgr_resource(res)->blocks;
78 
79 		block = list_first_entry_or_null(head,
80 						 struct drm_buddy_block,
81 						 link);
82 		if (!block)
83 			goto fallback;
84 
85 		while (start >= xe_ttm_vram_mgr_block_size(block)) {
86 			start -= xe_ttm_vram_mgr_block_size(block);
87 
88 			next = block->link.next;
89 			if (next != head)
90 				block = list_entry(next, struct drm_buddy_block,
91 						   link);
92 		}
93 
94 		cur->start = xe_ttm_vram_mgr_block_start(block) + start;
95 		cur->size = min(xe_ttm_vram_mgr_block_size(block) - start,
96 				size);
97 		cur->remaining = size;
98 		cur->node = block;
99 		break;
100 	default:
101 		goto fallback;
102 	}
103 
104 	return;
105 
106 fallback:
107 	cur->start = start;
108 	cur->size = size;
109 	cur->remaining = size;
110 	cur->node = NULL;
111 	cur->mem_type = XE_PL_TT;
112 	XE_WARN_ON(res && start + size > res->size);
113 	return;
114 }
115 
116 static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
117 {
118 	struct scatterlist *sgl = cur->sgl;
119 	u64 start = cur->start;
120 
121 	while (start >= sg_dma_len(sgl)) {
122 		start -= sg_dma_len(sgl);
123 		sgl = sg_next(sgl);
124 		XE_BUG_ON(!sgl);
125 	}
126 
127 	cur->start = start;
128 	cur->size = sg_dma_len(sgl) - start;
129 	cur->sgl = sgl;
130 }
131 
132 /**
133  * xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
134  *
135  * @sg: scatter gather table to walk
136  * @start: Start of the range
137  * @size: Size of the range
138  * @cur: cursor object to initialize
139  *
140  * Start walking over the range of allocations between @start and @size.
141  */
142 static inline void xe_res_first_sg(const struct sg_table *sg,
143 				   u64 start, u64 size,
144 				   struct xe_res_cursor *cur)
145 {
146 	XE_BUG_ON(!sg);
147 	XE_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
148 		  !IS_ALIGNED(size, PAGE_SIZE));
149 	cur->node = NULL;
150 	cur->start = start;
151 	cur->remaining = size;
152 	cur->size = 0;
153 	cur->sgl = sg->sgl;
154 	cur->mem_type = XE_PL_TT;
155 	__xe_res_sg_next(cur);
156 }
157 
158 /**
159  * xe_res_next - advance the cursor
160  *
161  * @cur: the cursor to advance
162  * @size: number of bytes to move forward
163  *
164  * Move the cursor @size bytes forwrad, walking to the next node if necessary.
165  */
166 static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
167 {
168 	struct drm_buddy_block *block;
169 	struct list_head *next;
170 	u64 start;
171 
172 	XE_BUG_ON(size > cur->remaining);
173 
174 	cur->remaining -= size;
175 	if (!cur->remaining)
176 		return;
177 
178 	if (cur->size > size) {
179 		cur->size -= size;
180 		cur->start += size;
181 		return;
182 	}
183 
184 	if (cur->sgl) {
185 		cur->start += size;
186 		__xe_res_sg_next(cur);
187 		return;
188 	}
189 
190 	switch (cur->mem_type) {
191 	case XE_PL_VRAM0:
192 	case XE_PL_VRAM1:
193 		start = size - cur->size;
194 		block = cur->node;
195 
196 		next = block->link.next;
197 		block = list_entry(next, struct drm_buddy_block, link);
198 
199 
200 		while (start >= xe_ttm_vram_mgr_block_size(block)) {
201 			start -= xe_ttm_vram_mgr_block_size(block);
202 
203 			next = block->link.next;
204 			block = list_entry(next, struct drm_buddy_block, link);
205 		}
206 
207 		cur->start = xe_ttm_vram_mgr_block_start(block) + start;
208 		cur->size = min(xe_ttm_vram_mgr_block_size(block) - start,
209 				cur->remaining);
210 		cur->node = block;
211 		break;
212 	default:
213 		return;
214 	}
215 }
216 
217 /**
218  * xe_res_dma - return dma address of cursor at current position
219  *
220  * @cur: the cursor to return the dma address from
221  */
222 static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
223 {
224 	return cur->sgl ? sg_dma_address(cur->sgl) + cur->start : cur->start;
225 }
226 #endif
227