xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c (revision faee3edfcff750fe5dd760177f52978aefefeb70)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu_reset.h"
25 #include "aldebaran.h"
26 #include "sienna_cichlid.h"
27 #include "smu_v13_0_10.h"
28 
29 static int amdgpu_reset_xgmi_reset_on_init_suspend(struct amdgpu_device *adev)
30 {
31 	int i, r;
32 
33 	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
34 		if (!adev->ip_blocks[i].status.valid)
35 			continue;
36 		if (!adev->ip_blocks[i].status.hw)
37 			continue;
38 		/* displays are handled in phase1 */
39 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
40 			continue;
41 
42 		/* XXX handle errors */
43 		r = adev->ip_blocks[i].version->funcs->suspend(adev);
44 		/* XXX handle errors */
45 		if (r) {
46 			dev_err(adev->dev, "suspend of IP block <%s> failed %d",
47 				adev->ip_blocks[i].version->funcs->name, r);
48 		}
49 		adev->ip_blocks[i].status.hw = false;
50 	}
51 
52 	return 0;
53 }
54 
55 static int amdgpu_reset_xgmi_reset_on_init_prep_hwctxt(
56 	struct amdgpu_reset_control *reset_ctl,
57 	struct amdgpu_reset_context *reset_context)
58 {
59 	struct list_head *reset_device_list = reset_context->reset_device_list;
60 	struct amdgpu_device *tmp_adev;
61 	int r;
62 
63 	list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
64 		amdgpu_unregister_gpu_instance(tmp_adev);
65 		r = amdgpu_reset_xgmi_reset_on_init_suspend(tmp_adev);
66 		if (r) {
67 			dev_err(tmp_adev->dev,
68 				"xgmi reset on init: prepare for reset failed");
69 			return r;
70 		}
71 	}
72 
73 	return r;
74 }
75 
76 static int amdgpu_reset_xgmi_reset_on_init_restore_hwctxt(
77 	struct amdgpu_reset_control *reset_ctl,
78 	struct amdgpu_reset_context *reset_context)
79 {
80 	struct list_head *reset_device_list = reset_context->reset_device_list;
81 	struct amdgpu_device *tmp_adev = NULL;
82 	int r;
83 
84 	r = amdgpu_device_reinit_after_reset(reset_context);
85 	if (r)
86 		return r;
87 	list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
88 		if (!tmp_adev->kfd.init_complete) {
89 			kgd2kfd_init_zone_device(tmp_adev);
90 			amdgpu_amdkfd_device_init(tmp_adev);
91 			amdgpu_amdkfd_drm_client_create(tmp_adev);
92 		}
93 	}
94 
95 	return r;
96 }
97 
98 static int amdgpu_reset_xgmi_reset_on_init_perform_reset(
99 	struct amdgpu_reset_control *reset_ctl,
100 	struct amdgpu_reset_context *reset_context)
101 {
102 	struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
103 	struct list_head *reset_device_list = reset_context->reset_device_list;
104 	struct amdgpu_device *tmp_adev = NULL;
105 	int r;
106 
107 	dev_dbg(adev->dev, "xgmi roi - hw reset\n");
108 
109 	list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
110 		mutex_lock(&tmp_adev->reset_cntl->reset_lock);
111 		tmp_adev->reset_cntl->active_reset =
112 			amdgpu_asic_reset_method(adev);
113 	}
114 	r = 0;
115 	/* Mode1 reset needs to be triggered on all devices together */
116 	list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
117 		/* For XGMI run all resets in parallel to speed up the process */
118 		if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
119 			r = -EALREADY;
120 		if (r) {
121 			dev_err(tmp_adev->dev,
122 				"xgmi reset on init: reset failed with error, %d",
123 				r);
124 			break;
125 		}
126 	}
127 
128 	/* For XGMI wait for all resets to complete before proceed */
129 	if (!r) {
130 		list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
131 			flush_work(&tmp_adev->xgmi_reset_work);
132 			r = tmp_adev->asic_reset_res;
133 			if (r)
134 				break;
135 		}
136 	}
137 
138 	list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
139 		mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
140 		tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
141 	}
142 
143 	return r;
144 }
145 
146 int amdgpu_reset_do_xgmi_reset_on_init(
147 	struct amdgpu_reset_context *reset_context)
148 {
149 	struct list_head *reset_device_list = reset_context->reset_device_list;
150 	struct amdgpu_device *adev;
151 	int r;
152 
153 	if (!reset_device_list || list_empty(reset_device_list) ||
154 	    list_is_singular(reset_device_list))
155 		return -EINVAL;
156 
157 	adev = list_first_entry(reset_device_list, struct amdgpu_device,
158 				reset_list);
159 	r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
160 	if (r)
161 		return r;
162 
163 	r = amdgpu_reset_perform_reset(adev, reset_context);
164 
165 	return r;
166 }
167 
168 struct amdgpu_reset_handler xgmi_reset_on_init_handler = {
169 	.reset_method = AMD_RESET_METHOD_ON_INIT,
170 	.prepare_env = NULL,
171 	.prepare_hwcontext = amdgpu_reset_xgmi_reset_on_init_prep_hwctxt,
172 	.perform_reset = amdgpu_reset_xgmi_reset_on_init_perform_reset,
173 	.restore_hwcontext = amdgpu_reset_xgmi_reset_on_init_restore_hwctxt,
174 	.restore_env = NULL,
175 	.do_reset = NULL,
176 };
177 
178 int amdgpu_reset_init(struct amdgpu_device *adev)
179 {
180 	int ret = 0;
181 
182 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
183 	case IP_VERSION(13, 0, 2):
184 	case IP_VERSION(13, 0, 6):
185 	case IP_VERSION(13, 0, 14):
186 		ret = aldebaran_reset_init(adev);
187 		break;
188 	case IP_VERSION(11, 0, 7):
189 		ret = sienna_cichlid_reset_init(adev);
190 		break;
191 	case IP_VERSION(13, 0, 10):
192 		ret = smu_v13_0_10_reset_init(adev);
193 		break;
194 	default:
195 		break;
196 	}
197 
198 	return ret;
199 }
200 
201 int amdgpu_reset_fini(struct amdgpu_device *adev)
202 {
203 	int ret = 0;
204 
205 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
206 	case IP_VERSION(13, 0, 2):
207 	case IP_VERSION(13, 0, 6):
208 	case IP_VERSION(13, 0, 14):
209 		ret = aldebaran_reset_fini(adev);
210 		break;
211 	case IP_VERSION(11, 0, 7):
212 		ret = sienna_cichlid_reset_fini(adev);
213 		break;
214 	case IP_VERSION(13, 0, 10):
215 		ret = smu_v13_0_10_reset_fini(adev);
216 		break;
217 	default:
218 		break;
219 	}
220 
221 	return ret;
222 }
223 
224 int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
225 				   struct amdgpu_reset_context *reset_context)
226 {
227 	struct amdgpu_reset_handler *reset_handler = NULL;
228 
229 	if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
230 		reset_handler = adev->reset_cntl->get_reset_handler(
231 			adev->reset_cntl, reset_context);
232 	if (!reset_handler)
233 		return -EOPNOTSUPP;
234 
235 	return reset_handler->prepare_hwcontext(adev->reset_cntl,
236 						reset_context);
237 }
238 
239 int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
240 			       struct amdgpu_reset_context *reset_context)
241 {
242 	int ret;
243 	struct amdgpu_reset_handler *reset_handler = NULL;
244 
245 	if (adev->reset_cntl)
246 		reset_handler = adev->reset_cntl->get_reset_handler(
247 			adev->reset_cntl, reset_context);
248 	if (!reset_handler)
249 		return -EOPNOTSUPP;
250 
251 	ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
252 	if (ret)
253 		return ret;
254 
255 	return reset_handler->restore_hwcontext(adev->reset_cntl,
256 						reset_context);
257 }
258 
259 
260 void amdgpu_reset_destroy_reset_domain(struct kref *ref)
261 {
262 	struct amdgpu_reset_domain *reset_domain = container_of(ref,
263 								struct amdgpu_reset_domain,
264 								refcount);
265 	if (reset_domain->wq)
266 		destroy_workqueue(reset_domain->wq);
267 
268 	kvfree(reset_domain);
269 }
270 
271 struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
272 							     char *wq_name)
273 {
274 	struct amdgpu_reset_domain *reset_domain;
275 
276 	reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL);
277 	if (!reset_domain) {
278 		DRM_ERROR("Failed to allocate amdgpu_reset_domain!");
279 		return NULL;
280 	}
281 
282 	reset_domain->type = type;
283 	kref_init(&reset_domain->refcount);
284 
285 	reset_domain->wq = create_singlethread_workqueue(wq_name);
286 	if (!reset_domain->wq) {
287 		DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!");
288 		amdgpu_reset_put_reset_domain(reset_domain);
289 		return NULL;
290 
291 	}
292 
293 	atomic_set(&reset_domain->in_gpu_reset, 0);
294 	atomic_set(&reset_domain->reset_res, 0);
295 	init_rwsem(&reset_domain->sem);
296 
297 	return reset_domain;
298 }
299 
300 void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain)
301 {
302 	atomic_set(&reset_domain->in_gpu_reset, 1);
303 	down_write(&reset_domain->sem);
304 }
305 
306 
307 void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
308 {
309 	atomic_set(&reset_domain->in_gpu_reset, 0);
310 	up_write(&reset_domain->sem);
311 }
312 
313 void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
314 			   size_t len)
315 {
316 	if (!buf || !len)
317 		return;
318 
319 	switch (rst_ctxt->src) {
320 	case AMDGPU_RESET_SRC_JOB:
321 		if (rst_ctxt->job) {
322 			snprintf(buf, len, "job hang on ring:%s",
323 				 rst_ctxt->job->base.sched->name);
324 		} else {
325 			strscpy(buf, "job hang", len);
326 		}
327 		break;
328 	case AMDGPU_RESET_SRC_RAS:
329 		strscpy(buf, "RAS error", len);
330 		break;
331 	case AMDGPU_RESET_SRC_MES:
332 		strscpy(buf, "MES hang", len);
333 		break;
334 	case AMDGPU_RESET_SRC_HWS:
335 		strscpy(buf, "HWS hang", len);
336 		break;
337 	case AMDGPU_RESET_SRC_USER:
338 		strscpy(buf, "user trigger", len);
339 		break;
340 	default:
341 		strscpy(buf, "unknown", len);
342 	}
343 }
344