1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_pcode.h"
7
8 #include <linux/delay.h>
9 #include <linux/errno.h>
10
11 #include <drm/drm_managed.h>
12
13 #include "xe_assert.h"
14 #include "xe_device.h"
15 #include "xe_mmio.h"
16 #include "xe_pcode_api.h"
17
18 /**
19 * DOC: PCODE
20 *
21 * Xe PCODE is the component responsible for interfacing with the PCODE
22 * firmware.
23 * It shall provide a very simple ABI to other Xe components, but be the
24 * single and consolidated place that will communicate with PCODE. All read
25 * and write operations to PCODE will be internal and private to this component.
26 *
27 * What's next:
28 * - PCODE hw metrics
29 * - PCODE for display operations
30 */
31
pcode_mailbox_status(struct xe_tile * tile)32 static int pcode_mailbox_status(struct xe_tile *tile)
33 {
34 u32 err;
35 static const struct pcode_err_decode err_decode[] = {
36 [PCODE_ILLEGAL_CMD] = {-ENXIO, "Illegal Command"},
37 [PCODE_TIMEOUT] = {-ETIMEDOUT, "Timed out"},
38 [PCODE_ILLEGAL_DATA] = {-EINVAL, "Illegal Data"},
39 [PCODE_ILLEGAL_SUBCOMMAND] = {-ENXIO, "Illegal Subcommand"},
40 [PCODE_LOCKED] = {-EBUSY, "PCODE Locked"},
41 [PCODE_GT_RATIO_OUT_OF_RANGE] = {-EOVERFLOW,
42 "GT ratio out of range"},
43 [PCODE_REJECTED] = {-EACCES, "PCODE Rejected"},
44 [PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
45 };
46
47 err = xe_mmio_read32(tile->primary_gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
48 if (err) {
49 drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err,
50 err_decode[err].str ?: "Unknown");
51 return err_decode[err].errno ?: -EPROTO;
52 }
53
54 return 0;
55 }
56
__pcode_mailbox_rw(struct xe_tile * tile,u32 mbox,u32 * data0,u32 * data1,unsigned int timeout_ms,bool return_data,bool atomic)57 static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
58 unsigned int timeout_ms, bool return_data,
59 bool atomic)
60 {
61 struct xe_gt *mmio = tile->primary_gt;
62 int err;
63
64 if (tile_to_xe(tile)->info.skip_pcode)
65 return 0;
66
67 if ((xe_mmio_read32(mmio, PCODE_MAILBOX) & PCODE_READY) != 0)
68 return -EAGAIN;
69
70 xe_mmio_write32(mmio, PCODE_DATA0, *data0);
71 xe_mmio_write32(mmio, PCODE_DATA1, data1 ? *data1 : 0);
72 xe_mmio_write32(mmio, PCODE_MAILBOX, PCODE_READY | mbox);
73
74 err = xe_mmio_wait32(mmio, PCODE_MAILBOX, PCODE_READY, 0,
75 timeout_ms * USEC_PER_MSEC, NULL, atomic);
76 if (err)
77 return err;
78
79 if (return_data) {
80 *data0 = xe_mmio_read32(mmio, PCODE_DATA0);
81 if (data1)
82 *data1 = xe_mmio_read32(mmio, PCODE_DATA1);
83 }
84
85 return pcode_mailbox_status(tile);
86 }
87
pcode_mailbox_rw(struct xe_tile * tile,u32 mbox,u32 * data0,u32 * data1,unsigned int timeout_ms,bool return_data,bool atomic)88 static int pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
89 unsigned int timeout_ms, bool return_data,
90 bool atomic)
91 {
92 if (tile_to_xe(tile)->info.skip_pcode)
93 return 0;
94
95 lockdep_assert_held(&tile->pcode.lock);
96
97 return __pcode_mailbox_rw(tile, mbox, data0, data1, timeout_ms, return_data, atomic);
98 }
99
xe_pcode_write_timeout(struct xe_tile * tile,u32 mbox,u32 data,int timeout)100 int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 data, int timeout)
101 {
102 int err;
103
104 mutex_lock(&tile->pcode.lock);
105 err = pcode_mailbox_rw(tile, mbox, &data, NULL, timeout, false, false);
106 mutex_unlock(&tile->pcode.lock);
107
108 return err;
109 }
110
xe_pcode_read(struct xe_tile * tile,u32 mbox,u32 * val,u32 * val1)111 int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1)
112 {
113 int err;
114
115 mutex_lock(&tile->pcode.lock);
116 err = pcode_mailbox_rw(tile, mbox, val, val1, 1, true, false);
117 mutex_unlock(&tile->pcode.lock);
118
119 return err;
120 }
121
pcode_try_request(struct xe_tile * tile,u32 mbox,u32 request,u32 reply_mask,u32 reply,u32 * status,bool atomic,int timeout_us,bool locked)122 static int pcode_try_request(struct xe_tile *tile, u32 mbox,
123 u32 request, u32 reply_mask, u32 reply,
124 u32 *status, bool atomic, int timeout_us, bool locked)
125 {
126 int slept, wait = 10;
127
128 xe_tile_assert(tile, timeout_us > 0);
129
130 for (slept = 0; slept < timeout_us; slept += wait) {
131 if (locked)
132 *status = pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
133 atomic);
134 else
135 *status = __pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
136 atomic);
137 if ((*status == 0) && ((request & reply_mask) == reply))
138 return 0;
139
140 if (atomic)
141 udelay(wait);
142 else
143 usleep_range(wait, wait << 1);
144 wait <<= 1;
145 }
146
147 return -ETIMEDOUT;
148 }
149
150 /**
151 * xe_pcode_request - send PCODE request until acknowledgment
152 * @tile: tile
153 * @mbox: PCODE mailbox ID the request is targeted for
154 * @request: request ID
155 * @reply_mask: mask used to check for request acknowledgment
156 * @reply: value used to check for request acknowledgment
157 * @timeout_base_ms: timeout for polling with preemption enabled
158 *
159 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
160 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
161 * The request is acknowledged once the PCODE reply dword equals @reply after
162 * applying @reply_mask. Polling is first attempted with preemption enabled
163 * for @timeout_base_ms and if this times out for another 50 ms with
164 * preemption disabled.
165 *
166 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
167 * other error as reported by PCODE.
168 */
xe_pcode_request(struct xe_tile * tile,u32 mbox,u32 request,u32 reply_mask,u32 reply,int timeout_base_ms)169 int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
170 u32 reply_mask, u32 reply, int timeout_base_ms)
171 {
172 u32 status;
173 int ret;
174
175 xe_tile_assert(tile, timeout_base_ms <= 3);
176
177 mutex_lock(&tile->pcode.lock);
178
179 ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
180 false, timeout_base_ms * 1000, true);
181 if (!ret)
182 goto out;
183
184 /*
185 * The above can time out if the number of requests was low (2 in the
186 * worst case) _and_ PCODE was busy for some reason even after a
187 * (queued) request and @timeout_base_ms delay. As a workaround retry
188 * the poll with preemption disabled to maximize the number of
189 * requests. Increase the timeout from @timeout_base_ms to 50ms to
190 * account for interrupts that could reduce the number of these
191 * requests, and for any quirks of the PCODE firmware that delays
192 * the request completion.
193 */
194 drm_err(&tile_to_xe(tile)->drm,
195 "PCODE timeout, retrying with preemption disabled\n");
196 preempt_disable();
197 ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
198 true, 50 * 1000, true);
199 preempt_enable();
200
201 out:
202 mutex_unlock(&tile->pcode.lock);
203 return status ? status : ret;
204 }
205 /**
206 * xe_pcode_init_min_freq_table - Initialize PCODE's QOS frequency table
207 * @tile: tile instance
208 * @min_gt_freq: Minimal (RPn) GT frequency in units of 50MHz.
209 * @max_gt_freq: Maximal (RP0) GT frequency in units of 50MHz.
210 *
211 * This function initialize PCODE's QOS frequency table for a proper minimal
212 * frequency/power steering decision, depending on the current requested GT
213 * frequency. For older platforms this was a more complete table including
214 * the IA freq. However for the latest platforms this table become a simple
215 * 1-1 Ring vs GT frequency. Even though, without setting it, PCODE might
216 * not take the right decisions for some memory frequencies and affect latency.
217 *
218 * It returns 0 on success, and -ERROR number on failure, -EINVAL if max
219 * frequency is higher then the minimal, and other errors directly translated
220 * from the PCODE Error returs:
221 * - -ENXIO: "Illegal Command"
222 * - -ETIMEDOUT: "Timed out"
223 * - -EINVAL: "Illegal Data"
224 * - -ENXIO, "Illegal Subcommand"
225 * - -EBUSY: "PCODE Locked"
226 * - -EOVERFLOW, "GT ratio out of range"
227 * - -EACCES, "PCODE Rejected"
228 * - -EPROTO, "Unknown"
229 */
xe_pcode_init_min_freq_table(struct xe_tile * tile,u32 min_gt_freq,u32 max_gt_freq)230 int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
231 u32 max_gt_freq)
232 {
233 int ret;
234 u32 freq;
235
236 if (!tile_to_xe(tile)->info.has_llc)
237 return 0;
238
239 if (max_gt_freq <= min_gt_freq)
240 return -EINVAL;
241
242 mutex_lock(&tile->pcode.lock);
243 for (freq = min_gt_freq; freq <= max_gt_freq; freq++) {
244 u32 data = freq << PCODE_FREQ_RING_RATIO_SHIFT | freq;
245
246 ret = pcode_mailbox_rw(tile, PCODE_WRITE_MIN_FREQ_TABLE,
247 &data, NULL, 1, false, false);
248 if (ret)
249 goto unlock;
250 }
251
252 unlock:
253 mutex_unlock(&tile->pcode.lock);
254 return ret;
255 }
256
257 /**
258 * xe_pcode_ready - Ensure PCODE is initialized
259 * @xe: xe instance
260 * @locked: true if lock held, false otherwise
261 *
262 * PCODE init mailbox is polled only on root gt of root tile
263 * as the root tile provides the initialization is complete only
264 * after all the tiles have completed the initialization.
265 * Called only on early probe without locks and with locks in
266 * resume path.
267 *
268 * Returns 0 on success, and -error number on failure.
269 */
xe_pcode_ready(struct xe_device * xe,bool locked)270 int xe_pcode_ready(struct xe_device *xe, bool locked)
271 {
272 u32 status, request = DGFX_GET_INIT_STATUS;
273 struct xe_tile *tile = xe_device_get_root_tile(xe);
274 int timeout_us = 180000000; /* 3 min */
275 int ret;
276
277 if (xe->info.skip_pcode)
278 return 0;
279
280 if (!IS_DGFX(xe))
281 return 0;
282
283 if (locked)
284 mutex_lock(&tile->pcode.lock);
285
286 ret = pcode_try_request(tile, DGFX_PCODE_STATUS, request,
287 DGFX_INIT_STATUS_COMPLETE,
288 DGFX_INIT_STATUS_COMPLETE,
289 &status, false, timeout_us, locked);
290
291 if (locked)
292 mutex_unlock(&tile->pcode.lock);
293
294 if (ret)
295 drm_err(&xe->drm,
296 "PCODE initialization timedout after: 3 min\n");
297
298 return ret;
299 }
300
301 /**
302 * xe_pcode_init: initialize components of PCODE
303 * @tile: tile instance
304 *
305 * This function initializes the xe_pcode component.
306 * To be called once only during probe.
307 */
xe_pcode_init(struct xe_tile * tile)308 void xe_pcode_init(struct xe_tile *tile)
309 {
310 drmm_mutex_init(&tile_to_xe(tile)->drm, &tile->pcode.lock);
311 }
312
313 /**
314 * xe_pcode_probe_early: initializes PCODE
315 * @xe: xe instance
316 *
317 * This function checks the initialization status of PCODE
318 * To be called once only during early probe without locks.
319 *
320 * Returns 0 on success, error code otherwise
321 */
xe_pcode_probe_early(struct xe_device * xe)322 int xe_pcode_probe_early(struct xe_device *xe)
323 {
324 return xe_pcode_ready(xe, false);
325 }
326