xref: /linux/drivers/gpu/drm/xe/xe_bo_evict.c (revision 2de3f38fbf89d3cb96d1237aa7a10c0f6480f450)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_bo_evict.h"
7 
8 #include "xe_bo.h"
9 #include "xe_device.h"
10 #include "xe_ggtt.h"
11 #include "xe_tile.h"
12 
13 typedef int (*xe_pinned_fn)(struct xe_bo *bo);
14 
15 static int xe_bo_apply_to_pinned(struct xe_device *xe,
16 				 struct list_head *pinned_list,
17 				 struct list_head *new_list,
18 				 const xe_pinned_fn pinned_fn)
19 {
20 	LIST_HEAD(still_in_list);
21 	struct xe_bo *bo;
22 	int ret = 0;
23 
24 	spin_lock(&xe->pinned.lock);
25 	while (!ret) {
26 		bo = list_first_entry_or_null(pinned_list, typeof(*bo),
27 					      pinned_link);
28 		if (!bo)
29 			break;
30 		xe_bo_get(bo);
31 		list_move_tail(&bo->pinned_link, &still_in_list);
32 		spin_unlock(&xe->pinned.lock);
33 
34 		xe_bo_lock(bo, false);
35 		ret = pinned_fn(bo);
36 		if (ret && pinned_list != new_list) {
37 			spin_lock(&xe->pinned.lock);
38 			list_move(&bo->pinned_link, pinned_list);
39 			spin_unlock(&xe->pinned.lock);
40 		}
41 		xe_bo_unlock(bo);
42 		xe_bo_put(bo);
43 		spin_lock(&xe->pinned.lock);
44 	}
45 	list_splice_tail(&still_in_list, new_list);
46 	spin_unlock(&xe->pinned.lock);
47 
48 	return ret;
49 }
50 
51 /**
52  * xe_bo_evict_all - evict all BOs from VRAM
53  *
54  * @xe: xe device
55  *
56  * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
57  * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
58  * All eviction magic done via TTM calls.
59  *
60  * Evict == move VRAM BOs to temporary (typically system) memory.
61  *
62  * This function should be called before the device goes into a suspend state
63  * where the VRAM loses power.
64  */
65 int xe_bo_evict_all(struct xe_device *xe)
66 {
67 	struct ttm_device *bdev = &xe->ttm;
68 	struct xe_tile *tile;
69 	u32 mem_type;
70 	u8 id;
71 	int ret;
72 
73 	/* User memory */
74 	for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
75 		struct ttm_resource_manager *man =
76 			ttm_manager_type(bdev, mem_type);
77 
78 		/*
79 		 * On igpu platforms with flat CCS we need to ensure we save and restore any CCS
80 		 * state since this state lives inside graphics stolen memory which doesn't survive
81 		 * hibernation.
82 		 *
83 		 * This can be further improved by only evicting objects that we know have actually
84 		 * used a compression enabled PAT index.
85 		 */
86 		if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
87 			continue;
88 
89 		if (man) {
90 			ret = ttm_resource_manager_evict_all(bdev, man);
91 			if (ret)
92 				return ret;
93 		}
94 	}
95 
96 	ret = xe_bo_apply_to_pinned(xe, &xe->pinned.external_vram,
97 				    &xe->pinned.external_vram,
98 				    xe_bo_evict_pinned);
99 
100 	/*
101 	 * Wait for all user BO to be evicted as those evictions depend on the
102 	 * memory moved below.
103 	 */
104 	for_each_tile(tile, xe, id)
105 		xe_tile_migrate_wait(tile);
106 
107 	if (ret)
108 		return ret;
109 
110 	return xe_bo_apply_to_pinned(xe, &xe->pinned.kernel_bo_present,
111 				     &xe->pinned.evicted,
112 				     xe_bo_evict_pinned);
113 }
114 
115 static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
116 {
117 	struct xe_device *xe = xe_bo_device(bo);
118 	int ret;
119 
120 	ret = xe_bo_restore_pinned(bo);
121 	if (ret)
122 		return ret;
123 
124 	if (bo->flags & XE_BO_FLAG_GGTT) {
125 		struct xe_tile *tile;
126 		u8 id;
127 
128 		for_each_tile(tile, xe_bo_device(bo), id) {
129 			if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
130 				continue;
131 
132 			mutex_lock(&tile->mem.ggtt->lock);
133 			xe_ggtt_map_bo(tile->mem.ggtt, bo);
134 			mutex_unlock(&tile->mem.ggtt->lock);
135 		}
136 	}
137 
138 	/*
139 	 * We expect validate to trigger a move VRAM and our move code
140 	 * should setup the iosys map.
141 	 */
142 	xe_assert(xe, !iosys_map_is_null(&bo->vmap));
143 
144 	return 0;
145 }
146 
147 /**
148  * xe_bo_restore_kernel - restore kernel BOs to VRAM
149  *
150  * @xe: xe device
151  *
152  * Move kernel BOs from temporary (typically system) memory to VRAM via CPU. All
153  * moves done via TTM calls.
154  *
155  * This function should be called early, before trying to init the GT, on device
156  * resume.
157  */
158 int xe_bo_restore_kernel(struct xe_device *xe)
159 {
160 	return xe_bo_apply_to_pinned(xe, &xe->pinned.evicted,
161 				     &xe->pinned.kernel_bo_present,
162 				     xe_bo_restore_and_map_ggtt);
163 }
164 
165 /**
166  * xe_bo_restore_user - restore pinned user BOs to VRAM
167  *
168  * @xe: xe device
169  *
170  * Move pinned user BOs from temporary (typically system) memory to VRAM via
171  * CPU. All moves done via TTM calls.
172  *
173  * This function should be called late, after GT init, on device resume.
174  */
175 int xe_bo_restore_user(struct xe_device *xe)
176 {
177 	struct xe_tile *tile;
178 	int ret, id;
179 
180 	if (!IS_DGFX(xe))
181 		return 0;
182 
183 	/* Pinned user memory in VRAM should be validated on resume */
184 	ret = xe_bo_apply_to_pinned(xe, &xe->pinned.external_vram,
185 				    &xe->pinned.external_vram,
186 				    xe_bo_restore_pinned);
187 
188 	/* Wait for restore to complete */
189 	for_each_tile(tile, xe, id)
190 		xe_tile_migrate_wait(tile);
191 
192 	return ret;
193 }
194