xref: /linux/drivers/gpu/drm/xe/xe_pcode.c (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_pcode_api.h"
7 #include "xe_pcode.h"
8 
9 #include "xe_gt.h"
10 #include "xe_mmio.h"
11 
12 #include <linux/errno.h>
13 
14 /**
15  * DOC: PCODE
16  *
17  * Xe PCODE is the component responsible for interfacing with the PCODE
18  * firmware.
19  * It shall provide a very simple ABI to other Xe components, but be the
20  * single and consolidated place that will communicate with PCODE. All read
21  * and write operations to PCODE will be internal and private to this component.
22  *
23  * What's next:
24  * - PCODE hw metrics
25  * - PCODE for display operations
26  */
27 
28 static int pcode_mailbox_status(struct xe_gt *gt)
29 {
30 	u32 err;
31 	static const struct pcode_err_decode err_decode[] = {
32 		[PCODE_ILLEGAL_CMD] = {-ENXIO, "Illegal Command"},
33 		[PCODE_TIMEOUT] = {-ETIMEDOUT, "Timed out"},
34 		[PCODE_ILLEGAL_DATA] = {-EINVAL, "Illegal Data"},
35 		[PCODE_ILLEGAL_SUBCOMMAND] = {-ENXIO, "Illegal Subcommand"},
36 		[PCODE_LOCKED] = {-EBUSY, "PCODE Locked"},
37 		[PCODE_GT_RATIO_OUT_OF_RANGE] = {-EOVERFLOW,
38 			"GT ratio out of range"},
39 		[PCODE_REJECTED] = {-EACCES, "PCODE Rejected"},
40 		[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
41 	};
42 
43 	lockdep_assert_held(&gt->pcode.lock);
44 
45 	err = xe_mmio_read32(gt, PCODE_MAILBOX.reg) & PCODE_ERROR_MASK;
46 	if (err) {
47 		drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
48 			err_decode[err].str ?: "Unknown");
49 		return err_decode[err].errno ?: -EPROTO;
50 	}
51 
52 	return 0;
53 }
54 
55 static bool pcode_mailbox_done(struct xe_gt *gt)
56 {
57 	lockdep_assert_held(&gt->pcode.lock);
58 	return (xe_mmio_read32(gt, PCODE_MAILBOX.reg) & PCODE_READY) == 0;
59 }
60 
61 static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
62 			    unsigned int timeout, bool return_data, bool atomic)
63 {
64 	lockdep_assert_held(&gt->pcode.lock);
65 
66 	if (!pcode_mailbox_done(gt))
67 		return -EAGAIN;
68 
69 	xe_mmio_write32(gt, PCODE_DATA0.reg, *data0);
70 	xe_mmio_write32(gt, PCODE_DATA1.reg, data1 ? *data1 : 0);
71 	xe_mmio_write32(gt, PCODE_MAILBOX.reg, PCODE_READY | mbox);
72 
73 	if (atomic)
74 		_wait_for_atomic(pcode_mailbox_done(gt), timeout * 1000, 1);
75 	else
76 		wait_for(pcode_mailbox_done(gt), timeout);
77 
78 	if (return_data) {
79 		*data0 = xe_mmio_read32(gt, PCODE_DATA0.reg);
80 		if (data1)
81 			*data1 = xe_mmio_read32(gt, PCODE_DATA1.reg);
82 	}
83 
84 	return pcode_mailbox_status(gt);
85 }
86 
87 int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout)
88 {
89 	int err;
90 
91 	mutex_lock(&gt->pcode.lock);
92 	err = pcode_mailbox_rw(gt, mbox, &data, NULL, timeout, false, false);
93 	mutex_unlock(&gt->pcode.lock);
94 
95 	return err;
96 }
97 
98 int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1)
99 {
100 	int err;
101 
102 	mutex_lock(&gt->pcode.lock);
103 	err = pcode_mailbox_rw(gt, mbox, val, val1, 1, true, false);
104 	mutex_unlock(&gt->pcode.lock);
105 
106 	return err;
107 }
108 
109 static bool xe_pcode_try_request(struct xe_gt *gt, u32 mbox,
110 				  u32 request, u32 reply_mask, u32 reply,
111 				  u32 *status, bool atomic)
112 {
113 	*status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true, atomic);
114 
115 	return (*status == 0) && ((request & reply_mask) == reply);
116 }
117 
118 /**
119  * xe_pcode_request - send PCODE request until acknowledgment
120  * @gt: gt
121  * @mbox: PCODE mailbox ID the request is targeted for
122  * @request: request ID
123  * @reply_mask: mask used to check for request acknowledgment
124  * @reply: value used to check for request acknowledgment
125  * @timeout_base_ms: timeout for polling with preemption enabled
126  *
127  * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
128  * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
129  * The request is acknowledged once the PCODE reply dword equals @reply after
130  * applying @reply_mask. Polling is first attempted with preemption enabled
131  * for @timeout_base_ms and if this times out for another 50 ms with
132  * preemption disabled.
133  *
134  * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
135  * other error as reported by PCODE.
136  */
137 int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
138 		      u32 reply_mask, u32 reply, int timeout_base_ms)
139 {
140 	u32 status;
141 	int ret;
142 	bool atomic = false;
143 
144 	mutex_lock(&gt->pcode.lock);
145 
146 #define COND \
147 	xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status, atomic)
148 
149 	/*
150 	 * Prime the PCODE by doing a request first. Normally it guarantees
151 	 * that a subsequent request, at most @timeout_base_ms later, succeeds.
152 	 * _wait_for() doesn't guarantee when its passed condition is evaluated
153 	 * first, so send the first request explicitly.
154 	 */
155 	if (COND) {
156 		ret = 0;
157 		goto out;
158 	}
159 	ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
160 	if (!ret)
161 		goto out;
162 
163 	/*
164 	 * The above can time out if the number of requests was low (2 in the
165 	 * worst case) _and_ PCODE was busy for some reason even after a
166 	 * (queued) request and @timeout_base_ms delay. As a workaround retry
167 	 * the poll with preemption disabled to maximize the number of
168 	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
169 	 * account for interrupts that could reduce the number of these
170 	 * requests, and for any quirks of the PCODE firmware that delays
171 	 * the request completion.
172 	 */
173 	drm_err(&gt_to_xe(gt)->drm,
174 		"PCODE timeout, retrying with preemption disabled\n");
175 	drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
176 	preempt_disable();
177 	atomic = true;
178 	ret = wait_for_atomic(COND, 50);
179 	atomic = false;
180 	preempt_enable();
181 
182 out:
183 	mutex_unlock(&gt->pcode.lock);
184 	return status ? status : ret;
185 #undef COND
186 }
187 /**
188  * xe_pcode_init_min_freq_table - Initialize PCODE's QOS frequency table
189  * @gt: gt instance
190  * @min_gt_freq: Minimal (RPn) GT frequency in units of 50MHz.
191  * @max_gt_freq: Maximal (RP0) GT frequency in units of 50MHz.
192  *
193  * This function initialize PCODE's QOS frequency table for a proper minimal
194  * frequency/power steering decision, depending on the current requested GT
195  * frequency. For older platforms this was a more complete table including
196  * the IA freq. However for the latest platforms this table become a simple
197  * 1-1 Ring vs GT frequency. Even though, without setting it, PCODE might
198  * not take the right decisions for some memory frequencies and affect latency.
199  *
200  * It returns 0 on success, and -ERROR number on failure, -EINVAL if max
201  * frequency is higher then the minimal, and other errors directly translated
202  * from the PCODE Error returs:
203  * - -ENXIO: "Illegal Command"
204  * - -ETIMEDOUT: "Timed out"
205  * - -EINVAL: "Illegal Data"
206  * - -ENXIO, "Illegal Subcommand"
207  * - -EBUSY: "PCODE Locked"
208  * - -EOVERFLOW, "GT ratio out of range"
209  * - -EACCES, "PCODE Rejected"
210  * - -EPROTO, "Unknown"
211  */
212 int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
213 				 u32 max_gt_freq)
214 {
215 	int ret;
216 	u32 freq;
217 
218 	if (IS_DGFX(gt_to_xe(gt)))
219 		return 0;
220 
221 	if (max_gt_freq <= min_gt_freq)
222 		return -EINVAL;
223 
224 	mutex_lock(&gt->pcode.lock);
225 	for (freq = min_gt_freq; freq <= max_gt_freq; freq++) {
226 		u32 data = freq << PCODE_FREQ_RING_RATIO_SHIFT | freq;
227 
228 		ret = pcode_mailbox_rw(gt, PCODE_WRITE_MIN_FREQ_TABLE,
229 				       &data, NULL, 1, false, false);
230 		if (ret)
231 			goto unlock;
232 	}
233 
234 unlock:
235 	mutex_unlock(&gt->pcode.lock);
236 	return ret;
237 }
238 
239 static bool pcode_dgfx_status_complete(struct xe_gt *gt)
240 {
241 	u32 data = DGFX_GET_INIT_STATUS;
242 	int status = pcode_mailbox_rw(gt, DGFX_PCODE_STATUS,
243 				      &data, NULL, 1, true, false);
244 
245 	return status == 0 &&
246 		(data & DGFX_INIT_STATUS_COMPLETE) == DGFX_INIT_STATUS_COMPLETE;
247 }
248 
249 /**
250  * xe_pcode_init - Ensure PCODE is initialized
251  * @gt: gt instance
252  *
253  * This function ensures that PCODE is properly initialized. To be called during
254  * probe and resume paths.
255  *
256  * It returns 0 on success, and -error number on failure.
257  */
258 int xe_pcode_init(struct xe_gt *gt)
259 {
260 	int timeout = 180000; /* 3 min */
261 	int ret;
262 
263 	if (!IS_DGFX(gt_to_xe(gt)))
264 		return 0;
265 
266 	mutex_lock(&gt->pcode.lock);
267 	ret = wait_for(pcode_dgfx_status_complete(gt), timeout);
268 	mutex_unlock(&gt->pcode.lock);
269 
270 	if (ret)
271 		drm_err(&gt_to_xe(gt)->drm,
272 			"PCODE initialization timedout after: %d min\n",
273 			timeout / 60000);
274 
275 	return ret;
276 }
277 
278 /**
279  * xe_pcode_probe - Prepare xe_pcode and also ensure PCODE is initialized.
280  * @gt: gt instance
281  *
282  * This function initializes the xe_pcode component, and when needed, it ensures
283  * that PCODE has properly performed its initialization and it is really ready
284  * to go. To be called once only during probe.
285  *
286  * It returns 0 on success, and -error number on failure.
287  */
288 int xe_pcode_probe(struct xe_gt *gt)
289 {
290 	mutex_init(&gt->pcode.lock);
291 
292 	if (!IS_DGFX(gt_to_xe(gt)))
293 		return 0;
294 
295 	return xe_pcode_init(gt);
296 }
297