xref: /linux/drivers/gpu/drm/i915/intel_pcode.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2013-2021 Intel Corporation
4  */
5 
6 #include <drm/drm_print.h>
7 
8 #include "i915_drv.h"
9 #include "i915_reg.h"
10 #include "i915_wait_util.h"
11 #include "intel_pcode.h"
12 
13 static int gen6_check_mailbox_status(u32 mbox)
14 {
15 	switch (mbox & GEN6_PCODE_ERROR_MASK) {
16 	case GEN6_PCODE_SUCCESS:
17 		return 0;
18 	case GEN6_PCODE_UNIMPLEMENTED_CMD:
19 		return -ENODEV;
20 	case GEN6_PCODE_ILLEGAL_CMD:
21 		return -ENXIO;
22 	case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
23 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
24 		return -EOVERFLOW;
25 	case GEN6_PCODE_TIMEOUT:
26 		return -ETIMEDOUT;
27 	default:
28 		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
29 		return 0;
30 	}
31 }
32 
33 static int gen7_check_mailbox_status(u32 mbox)
34 {
35 	switch (mbox & GEN6_PCODE_ERROR_MASK) {
36 	case GEN6_PCODE_SUCCESS:
37 		return 0;
38 	case GEN6_PCODE_ILLEGAL_CMD:
39 		return -ENXIO;
40 	case GEN7_PCODE_TIMEOUT:
41 		return -ETIMEDOUT;
42 	case GEN7_PCODE_ILLEGAL_DATA:
43 		return -EINVAL;
44 	case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
45 		return -ENXIO;
46 	case GEN11_PCODE_LOCKED:
47 		return -EBUSY;
48 	case GEN11_PCODE_REJECTED:
49 		return -EACCES;
50 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
51 		return -EOVERFLOW;
52 	default:
53 		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
54 		return 0;
55 	}
56 }
57 
58 static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox,
59 			  u32 *val, u32 *val1,
60 			  int fast_timeout_us, int slow_timeout_ms,
61 			  bool is_read)
62 {
63 	lockdep_assert_held(&uncore->i915->sb_lock);
64 
65 	/*
66 	 * GEN6_PCODE_* are outside of the forcewake domain, we can use
67 	 * intel_uncore_read/write_fw variants to reduce the amount of work
68 	 * required when reading/writing.
69 	 */
70 
71 	if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
72 		return -EAGAIN;
73 
74 	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
75 	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
76 	intel_uncore_write_fw(uncore,
77 			      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
78 
79 	if (__intel_wait_for_register_fw(uncore,
80 					 GEN6_PCODE_MAILBOX,
81 					 GEN6_PCODE_READY, 0,
82 					 fast_timeout_us,
83 					 slow_timeout_ms,
84 					 &mbox))
85 		return -ETIMEDOUT;
86 
87 	if (is_read)
88 		*val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
89 	if (is_read && val1)
90 		*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
91 
92 	if (GRAPHICS_VER(uncore->i915) > 6)
93 		return gen7_check_mailbox_status(mbox);
94 	else
95 		return gen6_check_mailbox_status(mbox);
96 }
97 
98 int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
99 {
100 	int err;
101 
102 	mutex_lock(&uncore->i915->sb_lock);
103 	err = __snb_pcode_rw(uncore, mbox, val, val1, 500, 20, true);
104 	mutex_unlock(&uncore->i915->sb_lock);
105 
106 	if (err) {
107 		drm_dbg(&uncore->i915->drm,
108 			"warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
109 			mbox, __builtin_return_address(0), err);
110 	}
111 
112 	return err;
113 }
114 
115 int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
116 			    int timeout_ms)
117 {
118 	int err;
119 
120 	mutex_lock(&uncore->i915->sb_lock);
121 	err = __snb_pcode_rw(uncore, mbox, &val, NULL, 250, timeout_ms, false);
122 	mutex_unlock(&uncore->i915->sb_lock);
123 
124 	if (err) {
125 		drm_dbg(&uncore->i915->drm,
126 			"warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
127 			val, mbox, __builtin_return_address(0), err);
128 	}
129 
130 	return err;
131 }
132 
133 static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox,
134 				  u32 request, u32 reply_mask, u32 reply,
135 				  u32 *status)
136 {
137 	*status = __snb_pcode_rw(uncore, mbox, &request, NULL, 500, 0, true);
138 
139 	return (*status == 0) && ((request & reply_mask) == reply);
140 }
141 
142 /**
143  * skl_pcode_request - send PCODE request until acknowledgment
144  * @uncore: uncore
145  * @mbox: PCODE mailbox ID the request is targeted for
146  * @request: request ID
147  * @reply_mask: mask used to check for request acknowledgment
148  * @reply: value used to check for request acknowledgment
149  * @timeout_base_ms: timeout for polling with preemption enabled
150  *
151  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
152  * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
153  * The request is acknowledged once the PCODE reply dword equals @reply after
154  * applying @reply_mask. Polling is first attempted with preemption enabled
155  * for @timeout_base_ms and if this times out for another 50 ms with
156  * preemption disabled.
157  *
158  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
159  * other error as reported by PCODE.
160  */
161 int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
162 		      u32 reply_mask, u32 reply, int timeout_base_ms)
163 {
164 	u32 status;
165 	int ret;
166 
167 	mutex_lock(&uncore->i915->sb_lock);
168 
169 #define COND \
170 	skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status)
171 
172 	/*
173 	 * Prime the PCODE by doing a request first. Normally it guarantees
174 	 * that a subsequent request, at most @timeout_base_ms later, succeeds.
175 	 * _wait_for() doesn't guarantee when its passed condition is evaluated
176 	 * first, so send the first request explicitly.
177 	 */
178 	if (COND) {
179 		ret = 0;
180 		goto out;
181 	}
182 	ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
183 	if (!ret)
184 		goto out;
185 
186 	/*
187 	 * The above can time out if the number of requests was low (2 in the
188 	 * worst case) _and_ PCODE was busy for some reason even after a
189 	 * (queued) request and @timeout_base_ms delay. As a workaround retry
190 	 * the poll with preemption disabled to maximize the number of
191 	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
192 	 * account for interrupts that could reduce the number of these
193 	 * requests, and for any quirks of the PCODE firmware that delays
194 	 * the request completion.
195 	 */
196 	drm_dbg_kms(&uncore->i915->drm,
197 		    "PCODE timeout, retrying with preemption disabled\n");
198 	drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3);
199 	preempt_disable();
200 	ret = wait_for_atomic(COND, 50);
201 	preempt_enable();
202 
203 out:
204 	mutex_unlock(&uncore->i915->sb_lock);
205 	return status ? status : ret;
206 #undef COND
207 }
208 
209 static int pcode_init_wait(struct intel_uncore *uncore, int timeout_ms)
210 {
211 	if (__intel_wait_for_register_fw(uncore,
212 					 GEN6_PCODE_MAILBOX,
213 					 GEN6_PCODE_READY, 0,
214 					 500, timeout_ms,
215 					 NULL))
216 		return -EPROBE_DEFER;
217 
218 	return skl_pcode_request(uncore,
219 				 DG1_PCODE_STATUS,
220 				 DG1_UNCORE_GET_INIT_STATUS,
221 				 DG1_UNCORE_INIT_STATUS_COMPLETE,
222 				 DG1_UNCORE_INIT_STATUS_COMPLETE, timeout_ms);
223 }
224 
225 int intel_pcode_init(struct intel_uncore *uncore)
226 {
227 	int err;
228 
229 	if (!IS_DGFX(uncore->i915))
230 		return 0;
231 
232 	/*
233 	 * Wait 10 seconds so that the punit to settle and complete
234 	 * any outstanding transactions upon module load
235 	 */
236 	err = pcode_init_wait(uncore, 10000);
237 
238 	if (err) {
239 		drm_notice(&uncore->i915->drm,
240 			   "Waiting for HW initialisation...\n");
241 		err = pcode_init_wait(uncore, 180000);
242 	}
243 
244 	return err;
245 }
246 
247 int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
248 {
249 	intel_wakeref_t wakeref;
250 	u32 mbox;
251 	int err;
252 
253 	mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
254 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
255 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
256 
257 	with_intel_runtime_pm(uncore->rpm, wakeref)
258 		err = snb_pcode_read(uncore, mbox, val, NULL);
259 
260 	return err;
261 }
262 
263 int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val)
264 {
265 	intel_wakeref_t wakeref;
266 	u32 mbox;
267 	int err;
268 
269 	mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
270 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
271 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
272 
273 	with_intel_runtime_pm(uncore->rpm, wakeref)
274 		err = snb_pcode_write(uncore, mbox, val);
275 
276 	return err;
277 }
278 
279 /* Helpers with drm device */
280 int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
281 {
282 	struct drm_i915_private *i915 = to_i915(drm);
283 
284 	return snb_pcode_read(&i915->uncore, mbox, val, val1);
285 }
286 
287 int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
288 {
289 	struct drm_i915_private *i915 = to_i915(drm);
290 
291 	return snb_pcode_write_timeout(&i915->uncore, mbox, val, timeout_ms);
292 }
293 
294 int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
295 			u32 reply_mask, u32 reply, int timeout_base_ms)
296 {
297 	struct drm_i915_private *i915 = to_i915(drm);
298 
299 	return skl_pcode_request(&i915->uncore, mbox, request, reply_mask, reply,
300 				 timeout_base_ms);
301 }
302