xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h (revision db5d28c0bfe566908719bec8e25443aabecbb802)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright 2020 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Christian König
24  */
25 
26 #ifndef __AMDGPU_RES_CURSOR_H__
27 #define __AMDGPU_RES_CURSOR_H__
28 
29 #include <drm/drm_mm.h>
30 #include <drm/ttm/ttm_resource.h>
31 #include <drm/ttm/ttm_range_manager.h>
32 
33 #include "amdgpu_vram_mgr.h"
34 
35 /* state back for walking over vram_mgr and gtt_mgr allocations */
36 struct amdgpu_res_cursor {
37 	uint64_t		start;
38 	uint64_t		size;
39 	uint64_t		remaining;
40 	void			*node;
41 	uint32_t		mem_type;
42 };
43 
44 /**
45  * amdgpu_res_first - initialize a amdgpu_res_cursor
46  *
47  * @res: TTM resource object to walk
48  * @start: Start of the range
49  * @size: Size of the range
50  * @cur: cursor object to initialize
51  *
52  * Start walking over the range of allocations between @start and @size.
53  */
amdgpu_res_first(struct ttm_resource * res,uint64_t start,uint64_t size,struct amdgpu_res_cursor * cur)54 static inline void amdgpu_res_first(struct ttm_resource *res,
55 				    uint64_t start, uint64_t size,
56 				    struct amdgpu_res_cursor *cur)
57 {
58 	struct drm_buddy_block *block;
59 	struct list_head *head, *next;
60 	struct drm_mm_node *node;
61 
62 	if (!res)
63 		goto fallback;
64 
65 	BUG_ON(start + size > res->size);
66 
67 	cur->mem_type = res->mem_type;
68 
69 	switch (cur->mem_type) {
70 	case TTM_PL_VRAM:
71 		head = &to_amdgpu_vram_mgr_resource(res)->blocks;
72 
73 		block = list_first_entry_or_null(head,
74 						 struct drm_buddy_block,
75 						 link);
76 		if (!block)
77 			goto fallback;
78 
79 		while (start >= amdgpu_vram_mgr_block_size(block)) {
80 			start -= amdgpu_vram_mgr_block_size(block);
81 
82 			next = block->link.next;
83 			if (next != head)
84 				block = list_entry(next, struct drm_buddy_block, link);
85 		}
86 
87 		cur->start = amdgpu_vram_mgr_block_start(block) + start;
88 		cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
89 		cur->remaining = size;
90 		cur->node = block;
91 		break;
92 	case TTM_PL_TT:
93 	case AMDGPU_PL_DOORBELL:
94 		node = to_ttm_range_mgr_node(res)->mm_nodes;
95 		while (start >= node->size << PAGE_SHIFT)
96 			start -= node++->size << PAGE_SHIFT;
97 
98 		cur->start = (node->start << PAGE_SHIFT) + start;
99 		cur->size = min((node->size << PAGE_SHIFT) - start, size);
100 		cur->remaining = size;
101 		cur->node = node;
102 		break;
103 	default:
104 		goto fallback;
105 	}
106 
107 	return;
108 
109 fallback:
110 	cur->start = start;
111 	cur->size = size;
112 	cur->remaining = size;
113 	cur->node = NULL;
114 	WARN_ON(res && start + size > res->size);
115 }
116 
117 /**
118  * amdgpu_res_next - advance the cursor
119  *
120  * @cur: the cursor to advance
121  * @size: number of bytes to move forward
122  *
123  * Move the cursor @size bytes forwrad, walking to the next node if necessary.
124  */
amdgpu_res_next(struct amdgpu_res_cursor * cur,uint64_t size)125 static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
126 {
127 	struct drm_buddy_block *block;
128 	struct drm_mm_node *node;
129 	struct list_head *next;
130 
131 	BUG_ON(size > cur->remaining);
132 
133 	cur->remaining -= size;
134 	if (!cur->remaining)
135 		return;
136 
137 	cur->size -= size;
138 	if (cur->size) {
139 		cur->start += size;
140 		return;
141 	}
142 
143 	switch (cur->mem_type) {
144 	case TTM_PL_VRAM:
145 		block = cur->node;
146 
147 		next = block->link.next;
148 		block = list_entry(next, struct drm_buddy_block, link);
149 
150 		cur->node = block;
151 		cur->start = amdgpu_vram_mgr_block_start(block);
152 		cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
153 		break;
154 	case TTM_PL_TT:
155 	case AMDGPU_PL_DOORBELL:
156 		node = cur->node;
157 
158 		cur->node = ++node;
159 		cur->start = node->start << PAGE_SHIFT;
160 		cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
161 		break;
162 	default:
163 		return;
164 	}
165 }
166 
167 /**
168  * amdgpu_res_cleared - check if blocks are cleared
169  *
170  * @cur: the cursor to extract the block
171  *
172  * Check if the @cur block is cleared
173  */
amdgpu_res_cleared(struct amdgpu_res_cursor * cur)174 static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur)
175 {
176 	struct drm_buddy_block *block;
177 
178 	switch (cur->mem_type) {
179 	case TTM_PL_VRAM:
180 		block = cur->node;
181 
182 		if (!amdgpu_vram_mgr_is_cleared(block))
183 			return false;
184 		break;
185 	default:
186 		return false;
187 	}
188 
189 	return true;
190 }
191 
192 #endif
193