xref: /linux/drivers/gpu/drm/xe/xe_pcode.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pcode.h"
7 
8 #include <linux/delay.h>
9 #include <linux/errno.h>
10 #include <linux/error-injection.h>
11 
12 #include <drm/drm_managed.h>
13 
14 #include "xe_assert.h"
15 #include "xe_device.h"
16 #include "xe_mmio.h"
17 #include "xe_pcode_api.h"
18 
19 /**
20  * DOC: PCODE
21  *
22  * Xe PCODE is the component responsible for interfacing with the PCODE
23  * firmware.
24  * It shall provide a very simple ABI to other Xe components, but be the
25  * single and consolidated place that will communicate with PCODE. All read
26  * and write operations to PCODE will be internal and private to this component.
27  *
28  * What's next:
29  * - PCODE hw metrics
30  * - PCODE for display operations
31  */
32 
33 static int pcode_mailbox_status(struct xe_tile *tile)
34 {
35 	const char *err_str;
36 	int err_decode;
37 	u32 err;
38 
39 #define CASE_ERR(_err, _err_decode, _err_str)	\
40 	case _err:				\
41 		err_decode = _err_decode;	\
42 		err_str = _err_str;		\
43 		break
44 
45 	err = xe_mmio_read32(&tile->mmio, PCODE_MAILBOX) & PCODE_ERROR_MASK;
46 	switch (err) {
47 	CASE_ERR(PCODE_ILLEGAL_CMD,           -ENXIO,     "Illegal Command");
48 	CASE_ERR(PCODE_TIMEOUT,               -ETIMEDOUT, "Timed out");
49 	CASE_ERR(PCODE_ILLEGAL_DATA,          -EINVAL,    "Illegal Data");
50 	CASE_ERR(PCODE_ILLEGAL_SUBCOMMAND,    -ENXIO,     "Illegal Subcommand");
51 	CASE_ERR(PCODE_LOCKED,                -EBUSY,     "PCODE Locked");
52 	CASE_ERR(PCODE_GT_RATIO_OUT_OF_RANGE, -EOVERFLOW, "GT ratio out of range");
53 	CASE_ERR(PCODE_REJECTED,              -EACCES,    "PCODE Rejected");
54 	default:
55 		err_decode = -EPROTO;
56 		err_str = "Unknown";
57 	}
58 
59 	if (err) {
60 		drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s",
61 			err_decode, err_str);
62 
63 		return err_decode;
64 	}
65 
66 	return 0;
67 #undef CASE_ERR
68 }
69 
70 static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
71 			      unsigned int timeout_ms, bool return_data,
72 			      bool atomic)
73 {
74 	struct xe_mmio *mmio = &tile->mmio;
75 	int err;
76 
77 	if (tile_to_xe(tile)->info.skip_pcode)
78 		return 0;
79 
80 	if ((xe_mmio_read32(mmio, PCODE_MAILBOX) & PCODE_READY) != 0)
81 		return -EAGAIN;
82 
83 	xe_mmio_write32(mmio, PCODE_DATA0, *data0);
84 	xe_mmio_write32(mmio, PCODE_DATA1, data1 ? *data1 : 0);
85 	xe_mmio_write32(mmio, PCODE_MAILBOX, PCODE_READY | mbox);
86 
87 	err = xe_mmio_wait32(mmio, PCODE_MAILBOX, PCODE_READY, 0,
88 			     timeout_ms * USEC_PER_MSEC, NULL, atomic);
89 	if (err)
90 		return err;
91 
92 	if (return_data) {
93 		*data0 = xe_mmio_read32(mmio, PCODE_DATA0);
94 		if (data1)
95 			*data1 = xe_mmio_read32(mmio, PCODE_DATA1);
96 	}
97 
98 	return pcode_mailbox_status(tile);
99 }
100 
101 static int pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
102 			    unsigned int timeout_ms, bool return_data,
103 			    bool atomic)
104 {
105 	if (tile_to_xe(tile)->info.skip_pcode)
106 		return 0;
107 
108 	lockdep_assert_held(&tile->pcode.lock);
109 
110 	return __pcode_mailbox_rw(tile, mbox, data0, data1, timeout_ms, return_data, atomic);
111 }
112 
113 int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 data, int timeout)
114 {
115 	int err;
116 
117 	mutex_lock(&tile->pcode.lock);
118 	err = pcode_mailbox_rw(tile, mbox, &data, NULL, timeout, false, false);
119 	mutex_unlock(&tile->pcode.lock);
120 
121 	return err;
122 }
123 
124 int xe_pcode_write64_timeout(struct xe_tile *tile, u32 mbox, u32 data0, u32 data1, int timeout)
125 {
126 	int err;
127 
128 	mutex_lock(&tile->pcode.lock);
129 	err = pcode_mailbox_rw(tile, mbox, &data0, &data1, timeout, false, false);
130 	mutex_unlock(&tile->pcode.lock);
131 
132 	return err;
133 }
134 
135 int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1)
136 {
137 	int err;
138 
139 	mutex_lock(&tile->pcode.lock);
140 	err = pcode_mailbox_rw(tile, mbox, val, val1, 1, true, false);
141 	mutex_unlock(&tile->pcode.lock);
142 
143 	return err;
144 }
145 
146 static int pcode_try_request(struct xe_tile *tile, u32 mbox,
147 			     u32 request, u32 reply_mask, u32 reply,
148 			     u32 *status, bool atomic, int timeout_us, bool locked)
149 {
150 	int slept, wait = 10;
151 
152 	xe_tile_assert(tile, timeout_us > 0);
153 
154 	for (slept = 0; slept < timeout_us; slept += wait) {
155 		if (locked)
156 			*status = pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
157 						   atomic);
158 		else
159 			*status = __pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
160 						     atomic);
161 		if ((*status == 0) && ((request & reply_mask) == reply))
162 			return 0;
163 
164 		if (atomic)
165 			udelay(wait);
166 		else
167 			usleep_range(wait, wait << 1);
168 		wait <<= 1;
169 	}
170 
171 	return -ETIMEDOUT;
172 }
173 
174 /**
175  * xe_pcode_request - send PCODE request until acknowledgment
176  * @tile: tile
177  * @mbox: PCODE mailbox ID the request is targeted for
178  * @request: request ID
179  * @reply_mask: mask used to check for request acknowledgment
180  * @reply: value used to check for request acknowledgment
181  * @timeout_base_ms: timeout for polling with preemption enabled
182  *
183  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
184  * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
185  * The request is acknowledged once the PCODE reply dword equals @reply after
186  * applying @reply_mask. Polling is first attempted with preemption enabled
187  * for @timeout_base_ms and if this times out for another 50 ms with
188  * preemption disabled.
189  *
190  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
191  * other error as reported by PCODE.
192  */
193 int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
194 		     u32 reply_mask, u32 reply, int timeout_base_ms)
195 {
196 	u32 status;
197 	int ret;
198 
199 	xe_tile_assert(tile, timeout_base_ms <= 3);
200 
201 	mutex_lock(&tile->pcode.lock);
202 
203 	ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
204 				false, timeout_base_ms * 1000, true);
205 	if (!ret)
206 		goto out;
207 
208 	/*
209 	 * The above can time out if the number of requests was low (2 in the
210 	 * worst case) _and_ PCODE was busy for some reason even after a
211 	 * (queued) request and @timeout_base_ms delay. As a workaround retry
212 	 * the poll with preemption disabled to maximize the number of
213 	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
214 	 * account for interrupts that could reduce the number of these
215 	 * requests, and for any quirks of the PCODE firmware that delays
216 	 * the request completion.
217 	 */
218 	drm_err(&tile_to_xe(tile)->drm,
219 		"PCODE timeout, retrying with preemption disabled\n");
220 	preempt_disable();
221 	ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
222 				true, 50 * 1000, true);
223 	preempt_enable();
224 
225 out:
226 	mutex_unlock(&tile->pcode.lock);
227 	return status ? status : ret;
228 }
229 /**
230  * xe_pcode_init_min_freq_table - Initialize PCODE's QOS frequency table
231  * @tile: tile instance
232  * @min_gt_freq: Minimal (RPn) GT frequency in units of 50MHz.
233  * @max_gt_freq: Maximal (RP0) GT frequency in units of 50MHz.
234  *
235  * This function initialize PCODE's QOS frequency table for a proper minimal
236  * frequency/power steering decision, depending on the current requested GT
237  * frequency. For older platforms this was a more complete table including
238  * the IA freq. However for the latest platforms this table become a simple
239  * 1-1 Ring vs GT frequency. Even though, without setting it, PCODE might
240  * not take the right decisions for some memory frequencies and affect latency.
241  *
242  * It returns 0 on success, and -ERROR number on failure, -EINVAL if max
243  * frequency is higher then the minimal, and other errors directly translated
244  * from the PCODE Error returns:
245  * - -ENXIO: "Illegal Command"
246  * - -ETIMEDOUT: "Timed out"
247  * - -EINVAL: "Illegal Data"
248  * - -ENXIO, "Illegal Subcommand"
249  * - -EBUSY: "PCODE Locked"
250  * - -EOVERFLOW, "GT ratio out of range"
251  * - -EACCES, "PCODE Rejected"
252  * - -EPROTO, "Unknown"
253  */
254 int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
255 				 u32 max_gt_freq)
256 {
257 	int ret;
258 	u32 freq;
259 
260 	if (!tile_to_xe(tile)->info.has_llc)
261 		return 0;
262 
263 	if (max_gt_freq <= min_gt_freq)
264 		return -EINVAL;
265 
266 	mutex_lock(&tile->pcode.lock);
267 	for (freq = min_gt_freq; freq <= max_gt_freq; freq++) {
268 		u32 data = freq << PCODE_FREQ_RING_RATIO_SHIFT | freq;
269 
270 		ret = pcode_mailbox_rw(tile, PCODE_WRITE_MIN_FREQ_TABLE,
271 				       &data, NULL, 1, false, false);
272 		if (ret)
273 			goto unlock;
274 	}
275 
276 unlock:
277 	mutex_unlock(&tile->pcode.lock);
278 	return ret;
279 }
280 
281 /**
282  * xe_pcode_ready - Ensure PCODE is initialized
283  * @xe: xe instance
284  * @locked: true if lock held, false otherwise
285  *
286  * PCODE init mailbox is polled only on root gt of root tile
287  * as the root tile provides the initialization is complete only
288  * after all the tiles have completed the initialization.
289  * Called only on early probe without locks and with locks in
290  * resume path.
291  *
292  * Returns 0 on success, and -error number on failure.
293  */
294 int xe_pcode_ready(struct xe_device *xe, bool locked)
295 {
296 	u32 status, request = DGFX_GET_INIT_STATUS;
297 	struct xe_tile *tile = xe_device_get_root_tile(xe);
298 	int timeout_us = 180000000; /* 3 min */
299 	int ret;
300 
301 	if (xe->info.skip_pcode)
302 		return 0;
303 
304 	if (!IS_DGFX(xe))
305 		return 0;
306 
307 	if (locked)
308 		mutex_lock(&tile->pcode.lock);
309 
310 	ret = pcode_try_request(tile, DGFX_PCODE_STATUS, request,
311 				DGFX_INIT_STATUS_COMPLETE,
312 				DGFX_INIT_STATUS_COMPLETE,
313 				&status, false, timeout_us, locked);
314 
315 	if (locked)
316 		mutex_unlock(&tile->pcode.lock);
317 
318 	if (ret)
319 		drm_err(&xe->drm,
320 			"PCODE initialization timedout after: 3 min\n");
321 
322 	return ret;
323 }
324 
325 /**
326  * xe_pcode_init: initialize components of PCODE
327  * @tile: tile instance
328  *
329  * This function initializes the xe_pcode component.
330  * To be called once only during probe.
331  */
332 void xe_pcode_init(struct xe_tile *tile)
333 {
334 	drmm_mutex_init(&tile_to_xe(tile)->drm, &tile->pcode.lock);
335 }
336 
337 /**
338  * xe_pcode_probe_early: initializes PCODE
339  * @xe: xe instance
340  *
341  * This function checks the initialization status of PCODE
342  * To be called once only during early probe without locks.
343  *
344  * Returns 0 on success, error code otherwise
345  */
346 int xe_pcode_probe_early(struct xe_device *xe)
347 {
348 	return xe_pcode_ready(xe, false);
349 }
350 ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */
351 
352 /* Helpers with drm device. These should only be called by the display side */
353 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
354 
355 int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
356 {
357 	struct xe_device *xe = to_xe_device(drm);
358 	struct xe_tile *tile = xe_device_get_root_tile(xe);
359 
360 	return xe_pcode_read(tile, mbox, val, val1);
361 }
362 
363 int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
364 {
365 	struct xe_device *xe = to_xe_device(drm);
366 	struct xe_tile *tile = xe_device_get_root_tile(xe);
367 
368 	return xe_pcode_write_timeout(tile, mbox, val, timeout_ms);
369 }
370 
371 int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
372 			u32 reply_mask, u32 reply, int timeout_base_ms)
373 {
374 	struct xe_device *xe = to_xe_device(drm);
375 	struct xe_tile *tile = xe_device_get_root_tile(xe);
376 
377 	return xe_pcode_request(tile, mbox, request, reply_mask, reply, timeout_base_ms);
378 }
379 
380 #endif
381