xref: /linux/drivers/gpu/drm/i915/intel_pcode.c (revision 9e1e9d660255d7216067193d774f338d08d8528d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2013-2021 Intel Corporation
4  */
5 
6 #include <drm/drm_print.h>
7 #include <drm/intel/display_parent_interface.h>
8 #include <drm/intel/intel_pcode_regs.h>
9 
10 #include "i915_drv.h"
11 #include "i915_reg.h"
12 #include "i915_wait_util.h"
13 #include "intel_pcode.h"
14 
15 static int gen6_check_mailbox_status(u32 mbox)
16 {
17 	switch (mbox & GEN6_PCODE_ERROR_MASK) {
18 	case GEN6_PCODE_SUCCESS:
19 		return 0;
20 	case GEN6_PCODE_UNIMPLEMENTED_CMD:
21 		return -ENODEV;
22 	case GEN6_PCODE_ILLEGAL_CMD:
23 		return -ENXIO;
24 	case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
25 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
26 		return -EOVERFLOW;
27 	case GEN6_PCODE_TIMEOUT:
28 		return -ETIMEDOUT;
29 	default:
30 		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
31 		return 0;
32 	}
33 }
34 
35 static int gen7_check_mailbox_status(u32 mbox)
36 {
37 	switch (mbox & GEN6_PCODE_ERROR_MASK) {
38 	case GEN6_PCODE_SUCCESS:
39 		return 0;
40 	case GEN6_PCODE_ILLEGAL_CMD:
41 		return -ENXIO;
42 	case GEN7_PCODE_TIMEOUT:
43 		return -ETIMEDOUT;
44 	case GEN7_PCODE_ILLEGAL_DATA:
45 		return -EINVAL;
46 	case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
47 		return -ENXIO;
48 	case GEN11_PCODE_LOCKED:
49 		return -EBUSY;
50 	case GEN11_PCODE_REJECTED:
51 		return -EACCES;
52 	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
53 		return -EOVERFLOW;
54 	default:
55 		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
56 		return 0;
57 	}
58 }
59 
60 static int __snb_pcode_rw(struct intel_uncore *uncore, u32 mbox,
61 			  u32 *val, u32 *val1,
62 			  int fast_timeout_us, int slow_timeout_ms,
63 			  bool is_read)
64 {
65 	lockdep_assert_held(&uncore->i915->sb_lock);
66 
67 	/*
68 	 * GEN6_PCODE_* are outside of the forcewake domain, we can use
69 	 * intel_uncore_read/write_fw variants to reduce the amount of work
70 	 * required when reading/writing.
71 	 */
72 
73 	if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
74 		return -EAGAIN;
75 
76 	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
77 	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
78 	intel_uncore_write_fw(uncore,
79 			      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
80 
81 	if (__intel_wait_for_register_fw(uncore,
82 					 GEN6_PCODE_MAILBOX,
83 					 GEN6_PCODE_READY, 0,
84 					 fast_timeout_us,
85 					 slow_timeout_ms,
86 					 &mbox))
87 		return -ETIMEDOUT;
88 
89 	if (is_read)
90 		*val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
91 	if (is_read && val1)
92 		*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
93 
94 	if (GRAPHICS_VER(uncore->i915) > 6)
95 		return gen7_check_mailbox_status(mbox);
96 	else
97 		return gen6_check_mailbox_status(mbox);
98 }
99 
100 int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
101 {
102 	int err;
103 
104 	mutex_lock(&uncore->i915->sb_lock);
105 	err = __snb_pcode_rw(uncore, mbox, val, val1, 500, 20, true);
106 	mutex_unlock(&uncore->i915->sb_lock);
107 
108 	if (err) {
109 		drm_dbg(&uncore->i915->drm,
110 			"warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
111 			mbox, __builtin_return_address(0), err);
112 	}
113 
114 	return err;
115 }
116 
117 int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
118 			    int timeout_ms)
119 {
120 	int err;
121 
122 	mutex_lock(&uncore->i915->sb_lock);
123 	err = __snb_pcode_rw(uncore, mbox, &val, NULL, 250, timeout_ms, false);
124 	mutex_unlock(&uncore->i915->sb_lock);
125 
126 	if (err) {
127 		drm_dbg(&uncore->i915->drm,
128 			"warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
129 			val, mbox, __builtin_return_address(0), err);
130 	}
131 
132 	return err;
133 }
134 
135 static bool skl_pcode_try_request(struct intel_uncore *uncore, u32 mbox,
136 				  u32 request, u32 reply_mask, u32 reply,
137 				  u32 *status)
138 {
139 	*status = __snb_pcode_rw(uncore, mbox, &request, NULL, 500, 0, true);
140 
141 	return (*status == 0) && ((request & reply_mask) == reply);
142 }
143 
144 /**
145  * skl_pcode_request - send PCODE request until acknowledgment
146  * @uncore: uncore
147  * @mbox: PCODE mailbox ID the request is targeted for
148  * @request: request ID
149  * @reply_mask: mask used to check for request acknowledgment
150  * @reply: value used to check for request acknowledgment
151  * @timeout_base_ms: timeout for polling with preemption enabled
152  *
153  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
154  * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
155  * The request is acknowledged once the PCODE reply dword equals @reply after
156  * applying @reply_mask. Polling is first attempted with preemption enabled
157  * for @timeout_base_ms and if this times out for another 50 ms with
158  * preemption disabled.
159  *
160  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
161  * other error as reported by PCODE.
162  */
163 int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request,
164 		      u32 reply_mask, u32 reply, int timeout_base_ms)
165 {
166 	u32 status;
167 	int ret;
168 
169 	mutex_lock(&uncore->i915->sb_lock);
170 
171 #define COND \
172 	skl_pcode_try_request(uncore, mbox, request, reply_mask, reply, &status)
173 
174 	/*
175 	 * Prime the PCODE by doing a request first. Normally it guarantees
176 	 * that a subsequent request, at most @timeout_base_ms later, succeeds.
177 	 * _wait_for() doesn't guarantee when its passed condition is evaluated
178 	 * first, so send the first request explicitly.
179 	 */
180 	if (COND) {
181 		ret = 0;
182 		goto out;
183 	}
184 	ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
185 	if (!ret)
186 		goto out;
187 
188 	/*
189 	 * The above can time out if the number of requests was low (2 in the
190 	 * worst case) _and_ PCODE was busy for some reason even after a
191 	 * (queued) request and @timeout_base_ms delay. As a workaround retry
192 	 * the poll with preemption disabled to maximize the number of
193 	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
194 	 * account for interrupts that could reduce the number of these
195 	 * requests, and for any quirks of the PCODE firmware that delays
196 	 * the request completion.
197 	 */
198 	drm_dbg_kms(&uncore->i915->drm,
199 		    "PCODE timeout, retrying with preemption disabled\n");
200 	drm_WARN_ON_ONCE(&uncore->i915->drm, timeout_base_ms > 3);
201 	preempt_disable();
202 	ret = wait_for_atomic(COND, 50);
203 	preempt_enable();
204 
205 out:
206 	mutex_unlock(&uncore->i915->sb_lock);
207 	return status ? status : ret;
208 #undef COND
209 }
210 
211 static int pcode_init_wait(struct intel_uncore *uncore, int timeout_ms)
212 {
213 	if (__intel_wait_for_register_fw(uncore,
214 					 GEN6_PCODE_MAILBOX,
215 					 GEN6_PCODE_READY, 0,
216 					 500, timeout_ms,
217 					 NULL))
218 		return -EPROBE_DEFER;
219 
220 	return skl_pcode_request(uncore,
221 				 DG1_PCODE_STATUS,
222 				 DG1_UNCORE_GET_INIT_STATUS,
223 				 DG1_UNCORE_INIT_STATUS_COMPLETE,
224 				 DG1_UNCORE_INIT_STATUS_COMPLETE, timeout_ms);
225 }
226 
227 int intel_pcode_init(struct intel_uncore *uncore)
228 {
229 	int err;
230 
231 	if (!IS_DGFX(uncore->i915))
232 		return 0;
233 
234 	/*
235 	 * Wait 10 seconds so that the punit to settle and complete
236 	 * any outstanding transactions upon module load
237 	 */
238 	err = pcode_init_wait(uncore, 10000);
239 
240 	if (err) {
241 		drm_notice(&uncore->i915->drm,
242 			   "Waiting for HW initialisation...\n");
243 		err = pcode_init_wait(uncore, 180000);
244 	}
245 
246 	return err;
247 }
248 
249 int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val)
250 {
251 	intel_wakeref_t wakeref;
252 	u32 mbox;
253 	int err;
254 
255 	mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
256 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
257 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
258 
259 	with_intel_runtime_pm(uncore->rpm, wakeref)
260 		err = snb_pcode_read(uncore, mbox, val, NULL);
261 
262 	return err;
263 }
264 
265 int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val)
266 {
267 	intel_wakeref_t wakeref;
268 	u32 mbox;
269 	int err;
270 
271 	mbox = REG_FIELD_PREP(GEN6_PCODE_MB_COMMAND, mbcmd)
272 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM1, p1)
273 		| REG_FIELD_PREP(GEN6_PCODE_MB_PARAM2, p2);
274 
275 	with_intel_runtime_pm(uncore->rpm, wakeref)
276 		err = snb_pcode_write(uncore, mbox, val);
277 
278 	return err;
279 }
280 
281 static int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
282 {
283 	struct drm_i915_private *i915 = to_i915(drm);
284 
285 	return snb_pcode_read(&i915->uncore, mbox, val, val1);
286 }
287 
288 static int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
289 {
290 	struct drm_i915_private *i915 = to_i915(drm);
291 
292 	return snb_pcode_write_timeout(&i915->uncore, mbox, val, timeout_ms);
293 }
294 
295 static int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
296 			       u32 reply_mask, u32 reply, int timeout_base_ms)
297 {
298 	struct drm_i915_private *i915 = to_i915(drm);
299 
300 	return skl_pcode_request(&i915->uncore, mbox, request, reply_mask, reply,
301 				 timeout_base_ms);
302 }
303 
304 const struct intel_display_pcode_interface i915_display_pcode_interface = {
305 	.read = intel_pcode_read,
306 	.write = intel_pcode_write_timeout,
307 	.request = intel_pcode_request,
308 };
309