1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_pcode.h" 7 8 #include <linux/delay.h> 9 #include <linux/errno.h> 10 #include <linux/error-injection.h> 11 12 #include <drm/drm_managed.h> 13 14 #include "xe_assert.h" 15 #include "xe_device.h" 16 #include "xe_mmio.h" 17 #include "xe_pcode_api.h" 18 19 /** 20 * DOC: PCODE 21 * 22 * Xe PCODE is the component responsible for interfacing with the PCODE 23 * firmware. 24 * It shall provide a very simple ABI to other Xe components, but be the 25 * single and consolidated place that will communicate with PCODE. All read 26 * and write operations to PCODE will be internal and private to this component. 27 * 28 * What's next: 29 * - PCODE hw metrics 30 * - PCODE for display operations 31 */ 32 33 static int pcode_mailbox_status(struct xe_tile *tile) 34 { 35 u32 err; 36 static const struct pcode_err_decode err_decode[] = { 37 [PCODE_ILLEGAL_CMD] = {-ENXIO, "Illegal Command"}, 38 [PCODE_TIMEOUT] = {-ETIMEDOUT, "Timed out"}, 39 [PCODE_ILLEGAL_DATA] = {-EINVAL, "Illegal Data"}, 40 [PCODE_ILLEGAL_SUBCOMMAND] = {-ENXIO, "Illegal Subcommand"}, 41 [PCODE_LOCKED] = {-EBUSY, "PCODE Locked"}, 42 [PCODE_GT_RATIO_OUT_OF_RANGE] = {-EOVERFLOW, 43 "GT ratio out of range"}, 44 [PCODE_REJECTED] = {-EACCES, "PCODE Rejected"}, 45 [PCODE_ERROR_MASK] = {-EPROTO, "Unknown"}, 46 }; 47 48 err = xe_mmio_read32(&tile->mmio, PCODE_MAILBOX) & PCODE_ERROR_MASK; 49 if (err) { 50 drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err, 51 err_decode[err].str ?: "Unknown"); 52 return err_decode[err].errno ?: -EPROTO; 53 } 54 55 return 0; 56 } 57 58 static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1, 59 unsigned int timeout_ms, bool return_data, 60 bool atomic) 61 { 62 struct xe_mmio *mmio = &tile->mmio; 63 int err; 64 65 if (tile_to_xe(tile)->info.skip_pcode) 66 return 0; 67 68 if ((xe_mmio_read32(mmio, PCODE_MAILBOX) & PCODE_READY) != 0) 69 return -EAGAIN; 70 71 xe_mmio_write32(mmio, PCODE_DATA0, *data0); 72 xe_mmio_write32(mmio, PCODE_DATA1, data1 ? *data1 : 0); 73 xe_mmio_write32(mmio, PCODE_MAILBOX, PCODE_READY | mbox); 74 75 err = xe_mmio_wait32(mmio, PCODE_MAILBOX, PCODE_READY, 0, 76 timeout_ms * USEC_PER_MSEC, NULL, atomic); 77 if (err) 78 return err; 79 80 if (return_data) { 81 *data0 = xe_mmio_read32(mmio, PCODE_DATA0); 82 if (data1) 83 *data1 = xe_mmio_read32(mmio, PCODE_DATA1); 84 } 85 86 return pcode_mailbox_status(tile); 87 } 88 89 static int pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1, 90 unsigned int timeout_ms, bool return_data, 91 bool atomic) 92 { 93 if (tile_to_xe(tile)->info.skip_pcode) 94 return 0; 95 96 lockdep_assert_held(&tile->pcode.lock); 97 98 return __pcode_mailbox_rw(tile, mbox, data0, data1, timeout_ms, return_data, atomic); 99 } 100 101 int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 data, int timeout) 102 { 103 int err; 104 105 mutex_lock(&tile->pcode.lock); 106 err = pcode_mailbox_rw(tile, mbox, &data, NULL, timeout, false, false); 107 mutex_unlock(&tile->pcode.lock); 108 109 return err; 110 } 111 112 int xe_pcode_write64_timeout(struct xe_tile *tile, u32 mbox, u32 data0, u32 data1, int timeout) 113 { 114 int err; 115 116 mutex_lock(&tile->pcode.lock); 117 err = pcode_mailbox_rw(tile, mbox, &data0, &data1, timeout, false, false); 118 mutex_unlock(&tile->pcode.lock); 119 120 return err; 121 } 122 123 int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1) 124 { 125 int err; 126 127 mutex_lock(&tile->pcode.lock); 128 err = pcode_mailbox_rw(tile, mbox, val, val1, 1, true, false); 129 mutex_unlock(&tile->pcode.lock); 130 131 return err; 132 } 133 134 static int pcode_try_request(struct xe_tile *tile, u32 mbox, 135 u32 request, u32 reply_mask, u32 reply, 136 u32 *status, bool atomic, int timeout_us, bool locked) 137 { 138 int slept, wait = 10; 139 140 xe_tile_assert(tile, timeout_us > 0); 141 142 for (slept = 0; slept < timeout_us; slept += wait) { 143 if (locked) 144 *status = pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true, 145 atomic); 146 else 147 *status = __pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true, 148 atomic); 149 if ((*status == 0) && ((request & reply_mask) == reply)) 150 return 0; 151 152 if (atomic) 153 udelay(wait); 154 else 155 usleep_range(wait, wait << 1); 156 wait <<= 1; 157 } 158 159 return -ETIMEDOUT; 160 } 161 162 /** 163 * xe_pcode_request - send PCODE request until acknowledgment 164 * @tile: tile 165 * @mbox: PCODE mailbox ID the request is targeted for 166 * @request: request ID 167 * @reply_mask: mask used to check for request acknowledgment 168 * @reply: value used to check for request acknowledgment 169 * @timeout_base_ms: timeout for polling with preemption enabled 170 * 171 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE 172 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. 173 * The request is acknowledged once the PCODE reply dword equals @reply after 174 * applying @reply_mask. Polling is first attempted with preemption enabled 175 * for @timeout_base_ms and if this times out for another 50 ms with 176 * preemption disabled. 177 * 178 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some 179 * other error as reported by PCODE. 180 */ 181 int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request, 182 u32 reply_mask, u32 reply, int timeout_base_ms) 183 { 184 u32 status; 185 int ret; 186 187 xe_tile_assert(tile, timeout_base_ms <= 3); 188 189 mutex_lock(&tile->pcode.lock); 190 191 ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status, 192 false, timeout_base_ms * 1000, true); 193 if (!ret) 194 goto out; 195 196 /* 197 * The above can time out if the number of requests was low (2 in the 198 * worst case) _and_ PCODE was busy for some reason even after a 199 * (queued) request and @timeout_base_ms delay. As a workaround retry 200 * the poll with preemption disabled to maximize the number of 201 * requests. Increase the timeout from @timeout_base_ms to 50ms to 202 * account for interrupts that could reduce the number of these 203 * requests, and for any quirks of the PCODE firmware that delays 204 * the request completion. 205 */ 206 drm_err(&tile_to_xe(tile)->drm, 207 "PCODE timeout, retrying with preemption disabled\n"); 208 preempt_disable(); 209 ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status, 210 true, 50 * 1000, true); 211 preempt_enable(); 212 213 out: 214 mutex_unlock(&tile->pcode.lock); 215 return status ? status : ret; 216 } 217 /** 218 * xe_pcode_init_min_freq_table - Initialize PCODE's QOS frequency table 219 * @tile: tile instance 220 * @min_gt_freq: Minimal (RPn) GT frequency in units of 50MHz. 221 * @max_gt_freq: Maximal (RP0) GT frequency in units of 50MHz. 222 * 223 * This function initialize PCODE's QOS frequency table for a proper minimal 224 * frequency/power steering decision, depending on the current requested GT 225 * frequency. For older platforms this was a more complete table including 226 * the IA freq. However for the latest platforms this table become a simple 227 * 1-1 Ring vs GT frequency. Even though, without setting it, PCODE might 228 * not take the right decisions for some memory frequencies and affect latency. 229 * 230 * It returns 0 on success, and -ERROR number on failure, -EINVAL if max 231 * frequency is higher then the minimal, and other errors directly translated 232 * from the PCODE Error returns: 233 * - -ENXIO: "Illegal Command" 234 * - -ETIMEDOUT: "Timed out" 235 * - -EINVAL: "Illegal Data" 236 * - -ENXIO, "Illegal Subcommand" 237 * - -EBUSY: "PCODE Locked" 238 * - -EOVERFLOW, "GT ratio out of range" 239 * - -EACCES, "PCODE Rejected" 240 * - -EPROTO, "Unknown" 241 */ 242 int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq, 243 u32 max_gt_freq) 244 { 245 int ret; 246 u32 freq; 247 248 if (!tile_to_xe(tile)->info.has_llc) 249 return 0; 250 251 if (max_gt_freq <= min_gt_freq) 252 return -EINVAL; 253 254 mutex_lock(&tile->pcode.lock); 255 for (freq = min_gt_freq; freq <= max_gt_freq; freq++) { 256 u32 data = freq << PCODE_FREQ_RING_RATIO_SHIFT | freq; 257 258 ret = pcode_mailbox_rw(tile, PCODE_WRITE_MIN_FREQ_TABLE, 259 &data, NULL, 1, false, false); 260 if (ret) 261 goto unlock; 262 } 263 264 unlock: 265 mutex_unlock(&tile->pcode.lock); 266 return ret; 267 } 268 269 /** 270 * xe_pcode_ready - Ensure PCODE is initialized 271 * @xe: xe instance 272 * @locked: true if lock held, false otherwise 273 * 274 * PCODE init mailbox is polled only on root gt of root tile 275 * as the root tile provides the initialization is complete only 276 * after all the tiles have completed the initialization. 277 * Called only on early probe without locks and with locks in 278 * resume path. 279 * 280 * Returns 0 on success, and -error number on failure. 281 */ 282 int xe_pcode_ready(struct xe_device *xe, bool locked) 283 { 284 u32 status, request = DGFX_GET_INIT_STATUS; 285 struct xe_tile *tile = xe_device_get_root_tile(xe); 286 int timeout_us = 180000000; /* 3 min */ 287 int ret; 288 289 if (xe->info.skip_pcode) 290 return 0; 291 292 if (!IS_DGFX(xe)) 293 return 0; 294 295 if (locked) 296 mutex_lock(&tile->pcode.lock); 297 298 ret = pcode_try_request(tile, DGFX_PCODE_STATUS, request, 299 DGFX_INIT_STATUS_COMPLETE, 300 DGFX_INIT_STATUS_COMPLETE, 301 &status, false, timeout_us, locked); 302 303 if (locked) 304 mutex_unlock(&tile->pcode.lock); 305 306 if (ret) 307 drm_err(&xe->drm, 308 "PCODE initialization timedout after: 3 min\n"); 309 310 return ret; 311 } 312 313 /** 314 * xe_pcode_init: initialize components of PCODE 315 * @tile: tile instance 316 * 317 * This function initializes the xe_pcode component. 318 * To be called once only during probe. 319 */ 320 void xe_pcode_init(struct xe_tile *tile) 321 { 322 drmm_mutex_init(&tile_to_xe(tile)->drm, &tile->pcode.lock); 323 } 324 325 /** 326 * xe_pcode_probe_early: initializes PCODE 327 * @xe: xe instance 328 * 329 * This function checks the initialization status of PCODE 330 * To be called once only during early probe without locks. 331 * 332 * Returns 0 on success, error code otherwise 333 */ 334 int xe_pcode_probe_early(struct xe_device *xe) 335 { 336 return xe_pcode_ready(xe, false); 337 } 338 ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */ 339 340 /* Helpers with drm device. These should only be called by the display side */ 341 #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) 342 343 int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1) 344 { 345 struct xe_device *xe = to_xe_device(drm); 346 struct xe_tile *tile = xe_device_get_root_tile(xe); 347 348 return xe_pcode_read(tile, mbox, val, val1); 349 } 350 351 int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms) 352 { 353 struct xe_device *xe = to_xe_device(drm); 354 struct xe_tile *tile = xe_device_get_root_tile(xe); 355 356 return xe_pcode_write_timeout(tile, mbox, val, timeout_ms); 357 } 358 359 int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request, 360 u32 reply_mask, u32 reply, int timeout_base_ms) 361 { 362 struct xe_device *xe = to_xe_device(drm); 363 struct xe_tile *tile = xe_device_get_root_tile(xe); 364 365 return xe_pcode_request(tile, mbox, request, reply_mask, reply, timeout_base_ms); 366 } 367 368 #endif 369