xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu_reset.h"
25 #include "aldebaran.h"
26 #include "sienna_cichlid.h"
27 #include "smu_v13_0_10.h"
28 
29 static int amdgpu_reset_xgmi_reset_on_init_suspend(struct amdgpu_device *adev)
30 {
31 	int i;
32 
33 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
34 		if (!adev->ip_blocks[i].status.valid)
35 			continue;
36 		if (!adev->ip_blocks[i].status.hw)
37 			continue;
38 		/* displays are handled in phase1 */
39 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
40 			continue;
41 
42 		/* XXX handle errors */
43 		amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
44 		adev->ip_blocks[i].status.hw = false;
45 	}
46 
47 	/* VCN FW shared region is in frambuffer, there are some flags
48 	 * initialized in that region during sw_init. Make sure the region is
49 	 * backed up.
50 	 */
51 	amdgpu_vcn_save_vcpu_bo(adev);
52 
53 	return 0;
54 }
55 
56 static int amdgpu_reset_xgmi_reset_on_init_prep_hwctxt(
57 	struct amdgpu_reset_control *reset_ctl,
58 	struct amdgpu_reset_context *reset_context)
59 {
60 	struct list_head *reset_device_list = reset_context->reset_device_list;
61 	struct amdgpu_device *tmp_adev;
62 	int r;
63 
64 	list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
65 		amdgpu_unregister_gpu_instance(tmp_adev);
66 		r = amdgpu_reset_xgmi_reset_on_init_suspend(tmp_adev);
67 		if (r) {
68 			dev_err(tmp_adev->dev,
69 				"xgmi reset on init: prepare for reset failed");
70 			return r;
71 		}
72 	}
73 
74 	return r;
75 }
76 
77 static int amdgpu_reset_xgmi_reset_on_init_restore_hwctxt(
78 	struct amdgpu_reset_control *reset_ctl,
79 	struct amdgpu_reset_context *reset_context)
80 {
81 	struct list_head *reset_device_list = reset_context->reset_device_list;
82 	struct amdgpu_device *tmp_adev = NULL;
83 	int r;
84 
85 	r = amdgpu_device_reinit_after_reset(reset_context);
86 	if (r)
87 		return r;
88 	list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
89 		if (!tmp_adev->kfd.init_complete) {
90 			kgd2kfd_init_zone_device(tmp_adev);
91 			amdgpu_amdkfd_device_init(tmp_adev);
92 			amdgpu_amdkfd_drm_client_create(tmp_adev);
93 		}
94 	}
95 
96 	return r;
97 }
98 
99 static int amdgpu_reset_xgmi_reset_on_init_perform_reset(
100 	struct amdgpu_reset_control *reset_ctl,
101 	struct amdgpu_reset_context *reset_context)
102 {
103 	struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
104 	struct list_head *reset_device_list = reset_context->reset_device_list;
105 	struct amdgpu_device *tmp_adev = NULL;
106 	int r;
107 
108 	dev_dbg(adev->dev, "xgmi roi - hw reset\n");
109 
110 	list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
111 		mutex_lock(&tmp_adev->reset_cntl->reset_lock);
112 		tmp_adev->reset_cntl->active_reset =
113 			amdgpu_asic_reset_method(adev);
114 	}
115 	r = 0;
116 	/* Mode1 reset needs to be triggered on all devices together */
117 	list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
118 		/* For XGMI run all resets in parallel to speed up the process */
119 		if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
120 			r = -EALREADY;
121 		if (r) {
122 			dev_err(tmp_adev->dev,
123 				"xgmi reset on init: reset failed with error, %d",
124 				r);
125 			break;
126 		}
127 	}
128 
129 	/* For XGMI wait for all resets to complete before proceed */
130 	if (!r) {
131 		list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
132 			flush_work(&tmp_adev->xgmi_reset_work);
133 			r = tmp_adev->asic_reset_res;
134 			if (r)
135 				break;
136 		}
137 	}
138 
139 	list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
140 		mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
141 		tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
142 	}
143 
144 	return r;
145 }
146 
147 int amdgpu_reset_do_xgmi_reset_on_init(
148 	struct amdgpu_reset_context *reset_context)
149 {
150 	struct list_head *reset_device_list = reset_context->reset_device_list;
151 	struct amdgpu_device *adev;
152 	int r;
153 
154 	if (!reset_device_list || list_empty(reset_device_list) ||
155 	    list_is_singular(reset_device_list))
156 		return -EINVAL;
157 
158 	adev = list_first_entry(reset_device_list, struct amdgpu_device,
159 				reset_list);
160 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
161 	if (r)
162 		return r;
163 
164 	r = amdgpu_reset_perform_reset(adev, reset_context);
165 
166 	return r;
167 }
168 
169 struct amdgpu_reset_handler xgmi_reset_on_init_handler = {
170 	.reset_method = AMD_RESET_METHOD_ON_INIT,
171 	.prepare_env = NULL,
172 	.prepare_hwcontext = amdgpu_reset_xgmi_reset_on_init_prep_hwctxt,
173 	.perform_reset = amdgpu_reset_xgmi_reset_on_init_perform_reset,
174 	.restore_hwcontext = amdgpu_reset_xgmi_reset_on_init_restore_hwctxt,
175 	.restore_env = NULL,
176 	.do_reset = NULL,
177 };
178 
179 int amdgpu_reset_init(struct amdgpu_device *adev)
180 {
181 	int ret = 0;
182 
183 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
184 	case IP_VERSION(13, 0, 2):
185 	case IP_VERSION(13, 0, 6):
186 	case IP_VERSION(13, 0, 14):
187 		ret = aldebaran_reset_init(adev);
188 		break;
189 	case IP_VERSION(11, 0, 7):
190 		ret = sienna_cichlid_reset_init(adev);
191 		break;
192 	case IP_VERSION(13, 0, 10):
193 		ret = smu_v13_0_10_reset_init(adev);
194 		break;
195 	default:
196 		break;
197 	}
198 
199 	return ret;
200 }
201 
202 int amdgpu_reset_fini(struct amdgpu_device *adev)
203 {
204 	int ret = 0;
205 
206 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
207 	case IP_VERSION(13, 0, 2):
208 	case IP_VERSION(13, 0, 6):
209 	case IP_VERSION(13, 0, 14):
210 		ret = aldebaran_reset_fini(adev);
211 		break;
212 	case IP_VERSION(11, 0, 7):
213 		ret = sienna_cichlid_reset_fini(adev);
214 		break;
215 	case IP_VERSION(13, 0, 10):
216 		ret = smu_v13_0_10_reset_fini(adev);
217 		break;
218 	default:
219 		break;
220 	}
221 
222 	return ret;
223 }
224 
225 int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
226 				   struct amdgpu_reset_context *reset_context)
227 {
228 	struct amdgpu_reset_handler *reset_handler = NULL;
229 
230 	if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
231 		reset_handler = adev->reset_cntl->get_reset_handler(
232 			adev->reset_cntl, reset_context);
233 	if (!reset_handler)
234 		return -EOPNOTSUPP;
235 
236 	return reset_handler->prepare_hwcontext(adev->reset_cntl,
237 						reset_context);
238 }
239 
240 int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
241 			       struct amdgpu_reset_context *reset_context)
242 {
243 	int ret;
244 	struct amdgpu_reset_handler *reset_handler = NULL;
245 
246 	if (adev->reset_cntl)
247 		reset_handler = adev->reset_cntl->get_reset_handler(
248 			adev->reset_cntl, reset_context);
249 	if (!reset_handler)
250 		return -EOPNOTSUPP;
251 
252 	ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
253 	if (ret)
254 		return ret;
255 
256 	return reset_handler->restore_hwcontext(adev->reset_cntl,
257 						reset_context);
258 }
259 
260 
261 void amdgpu_reset_destroy_reset_domain(struct kref *ref)
262 {
263 	struct amdgpu_reset_domain *reset_domain = container_of(ref,
264 								struct amdgpu_reset_domain,
265 								refcount);
266 	if (reset_domain->wq)
267 		destroy_workqueue(reset_domain->wq);
268 
269 	kvfree(reset_domain);
270 }
271 
272 struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
273 							     char *wq_name)
274 {
275 	struct amdgpu_reset_domain *reset_domain;
276 
277 	reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL);
278 	if (!reset_domain) {
279 		DRM_ERROR("Failed to allocate amdgpu_reset_domain!");
280 		return NULL;
281 	}
282 
283 	reset_domain->type = type;
284 	kref_init(&reset_domain->refcount);
285 
286 	reset_domain->wq = create_singlethread_workqueue(wq_name);
287 	if (!reset_domain->wq) {
288 		DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!");
289 		amdgpu_reset_put_reset_domain(reset_domain);
290 		return NULL;
291 
292 	}
293 
294 	atomic_set(&reset_domain->in_gpu_reset, 0);
295 	atomic_set(&reset_domain->reset_res, 0);
296 	init_rwsem(&reset_domain->sem);
297 
298 	return reset_domain;
299 }
300 
301 void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain)
302 {
303 	atomic_set(&reset_domain->in_gpu_reset, 1);
304 	down_write(&reset_domain->sem);
305 }
306 
307 
308 void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
309 {
310 	atomic_set(&reset_domain->in_gpu_reset, 0);
311 	up_write(&reset_domain->sem);
312 }
313 
314 void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
315 			   size_t len)
316 {
317 	if (!buf || !len)
318 		return;
319 
320 	switch (rst_ctxt->src) {
321 	case AMDGPU_RESET_SRC_JOB:
322 		if (rst_ctxt->job) {
323 			snprintf(buf, len, "job hang on ring:%s",
324 				 rst_ctxt->job->base.sched->name);
325 		} else {
326 			strscpy(buf, "job hang", len);
327 		}
328 		break;
329 	case AMDGPU_RESET_SRC_RAS:
330 		strscpy(buf, "RAS error", len);
331 		break;
332 	case AMDGPU_RESET_SRC_MES:
333 		strscpy(buf, "MES hang", len);
334 		break;
335 	case AMDGPU_RESET_SRC_HWS:
336 		strscpy(buf, "HWS hang", len);
337 		break;
338 	case AMDGPU_RESET_SRC_USER:
339 		strscpy(buf, "user trigger", len);
340 		break;
341 	default:
342 		strscpy(buf, "unknown", len);
343 	}
344 }
345 
346 bool amdgpu_reset_in_recovery(struct amdgpu_device *adev)
347 {
348 	return (adev->init_lvl->level == AMDGPU_INIT_LEVEL_RESET_RECOVERY);
349 }
350