1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include "xe_gt_tlb_invalidation.h" 7 8 #include "abi/guc_actions_abi.h" 9 #include "xe_device.h" 10 #include "xe_force_wake.h" 11 #include "xe_gt.h" 12 #include "xe_gt_printk.h" 13 #include "xe_guc.h" 14 #include "xe_guc_ct.h" 15 #include "xe_gt_stats.h" 16 #include "xe_mmio.h" 17 #include "xe_pm.h" 18 #include "xe_sriov.h" 19 #include "xe_trace.h" 20 #include "regs/xe_guc_regs.h" 21 22 #define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS 23 24 /* 25 * TLB inval depends on pending commands in the CT queue and then the real 26 * invalidation time. Double up the time to process full CT queue 27 * just to be on the safe side. 28 */ 29 static long tlb_timeout_jiffies(struct xe_gt *gt) 30 { 31 /* this reflects what HW/GuC needs to process TLB inv request */ 32 const long hw_tlb_timeout = HZ / 4; 33 34 /* this estimates actual delay caused by the CTB transport */ 35 long delay = xe_guc_ct_queue_proc_time_jiffies(>->uc.guc.ct); 36 37 return hw_tlb_timeout + 2 * delay; 38 } 39 40 static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence) 41 { 42 if (WARN_ON_ONCE(!fence->gt)) 43 return; 44 45 xe_pm_runtime_put(gt_to_xe(fence->gt)); 46 fence->gt = NULL; /* fini() should be called once */ 47 } 48 49 static void 50 __invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence) 51 { 52 bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags); 53 54 trace_xe_gt_tlb_invalidation_fence_signal(xe, fence); 55 xe_gt_tlb_invalidation_fence_fini(fence); 56 dma_fence_signal(&fence->base); 57 if (!stack) 58 dma_fence_put(&fence->base); 59 } 60 61 static void 62 invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence) 63 { 64 list_del(&fence->link); 65 __invalidation_fence_signal(xe, fence); 66 } 67 68 void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence) 69 { 70 if (WARN_ON_ONCE(!fence->gt)) 71 return; 72 73 __invalidation_fence_signal(gt_to_xe(fence->gt), fence); 74 } 75 76 static void xe_gt_tlb_fence_timeout(struct work_struct *work) 77 { 78 struct xe_gt *gt = container_of(work, struct xe_gt, 79 tlb_invalidation.fence_tdr.work); 80 struct xe_device *xe = gt_to_xe(gt); 81 struct xe_gt_tlb_invalidation_fence *fence, *next; 82 83 LNL_FLUSH_WORK(>->uc.guc.ct.g2h_worker); 84 85 spin_lock_irq(>->tlb_invalidation.pending_lock); 86 list_for_each_entry_safe(fence, next, 87 >->tlb_invalidation.pending_fences, link) { 88 s64 since_inval_ms = ktime_ms_delta(ktime_get(), 89 fence->invalidation_time); 90 91 if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt)) 92 break; 93 94 trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence); 95 xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d", 96 fence->seqno, gt->tlb_invalidation.seqno_recv); 97 98 fence->base.error = -ETIME; 99 invalidation_fence_signal(xe, fence); 100 } 101 if (!list_empty(>->tlb_invalidation.pending_fences)) 102 queue_delayed_work(system_wq, 103 >->tlb_invalidation.fence_tdr, 104 tlb_timeout_jiffies(gt)); 105 spin_unlock_irq(>->tlb_invalidation.pending_lock); 106 } 107 108 /** 109 * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state 110 * @gt: GT structure 111 * 112 * Initialize GT TLB invalidation state, purely software initialization, should 113 * be called once during driver load. 114 * 115 * Return: 0 on success, negative error code on error. 116 */ 117 int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt) 118 { 119 gt->tlb_invalidation.seqno = 1; 120 INIT_LIST_HEAD(>->tlb_invalidation.pending_fences); 121 spin_lock_init(>->tlb_invalidation.pending_lock); 122 spin_lock_init(>->tlb_invalidation.lock); 123 INIT_DELAYED_WORK(>->tlb_invalidation.fence_tdr, 124 xe_gt_tlb_fence_timeout); 125 126 return 0; 127 } 128 129 /** 130 * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset 131 * @gt: GT structure 132 * 133 * Signal any pending invalidation fences, should be called during a GT reset 134 */ 135 void xe_gt_tlb_invalidation_reset(struct xe_gt *gt) 136 { 137 struct xe_gt_tlb_invalidation_fence *fence, *next; 138 int pending_seqno; 139 140 /* 141 * CT channel is already disabled at this point. No new TLB requests can 142 * appear. 143 */ 144 145 mutex_lock(>->uc.guc.ct.lock); 146 spin_lock_irq(>->tlb_invalidation.pending_lock); 147 cancel_delayed_work(>->tlb_invalidation.fence_tdr); 148 /* 149 * We might have various kworkers waiting for TLB flushes to complete 150 * which are not tracked with an explicit TLB fence, however at this 151 * stage that will never happen since the CT is already disabled, so 152 * make sure we signal them here under the assumption that we have 153 * completed a full GT reset. 154 */ 155 if (gt->tlb_invalidation.seqno == 1) 156 pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1; 157 else 158 pending_seqno = gt->tlb_invalidation.seqno - 1; 159 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno); 160 161 list_for_each_entry_safe(fence, next, 162 >->tlb_invalidation.pending_fences, link) 163 invalidation_fence_signal(gt_to_xe(gt), fence); 164 spin_unlock_irq(>->tlb_invalidation.pending_lock); 165 mutex_unlock(>->uc.guc.ct.lock); 166 } 167 168 static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno) 169 { 170 int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv); 171 172 if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2)) 173 return false; 174 175 if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2)) 176 return true; 177 178 return seqno_recv >= seqno; 179 } 180 181 static int send_tlb_invalidation(struct xe_guc *guc, 182 struct xe_gt_tlb_invalidation_fence *fence, 183 u32 *action, int len) 184 { 185 struct xe_gt *gt = guc_to_gt(guc); 186 struct xe_device *xe = gt_to_xe(gt); 187 int seqno; 188 int ret; 189 190 xe_gt_assert(gt, fence); 191 192 /* 193 * XXX: The seqno algorithm relies on TLB invalidation being processed 194 * in order which they currently are, if that changes the algorithm will 195 * need to be updated. 196 */ 197 198 mutex_lock(&guc->ct.lock); 199 seqno = gt->tlb_invalidation.seqno; 200 fence->seqno = seqno; 201 trace_xe_gt_tlb_invalidation_fence_send(xe, fence); 202 action[1] = seqno; 203 ret = xe_guc_ct_send_locked(&guc->ct, action, len, 204 G2H_LEN_DW_TLB_INVALIDATE, 1); 205 if (!ret) { 206 spin_lock_irq(>->tlb_invalidation.pending_lock); 207 /* 208 * We haven't actually published the TLB fence as per 209 * pending_fences, but in theory our seqno could have already 210 * been written as we acquired the pending_lock. In such a case 211 * we can just go ahead and signal the fence here. 212 */ 213 if (tlb_invalidation_seqno_past(gt, seqno)) { 214 __invalidation_fence_signal(xe, fence); 215 } else { 216 fence->invalidation_time = ktime_get(); 217 list_add_tail(&fence->link, 218 >->tlb_invalidation.pending_fences); 219 220 if (list_is_singular(>->tlb_invalidation.pending_fences)) 221 queue_delayed_work(system_wq, 222 >->tlb_invalidation.fence_tdr, 223 tlb_timeout_jiffies(gt)); 224 } 225 spin_unlock_irq(>->tlb_invalidation.pending_lock); 226 } else { 227 __invalidation_fence_signal(xe, fence); 228 } 229 if (!ret) { 230 gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) % 231 TLB_INVALIDATION_SEQNO_MAX; 232 if (!gt->tlb_invalidation.seqno) 233 gt->tlb_invalidation.seqno = 1; 234 } 235 mutex_unlock(&guc->ct.lock); 236 xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1); 237 238 return ret; 239 } 240 241 #define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \ 242 XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \ 243 XE_GUC_TLB_INVAL_FLUSH_CACHE) 244 245 /** 246 * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC 247 * @gt: GT structure 248 * @fence: invalidation fence which will be signal on TLB invalidation 249 * completion 250 * 251 * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and 252 * caller can use the invalidation fence to wait for completion. 253 * 254 * Return: 0 on success, negative error code on error 255 */ 256 static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt, 257 struct xe_gt_tlb_invalidation_fence *fence) 258 { 259 u32 action[] = { 260 XE_GUC_ACTION_TLB_INVALIDATION, 261 0, /* seqno, replaced in send_tlb_invalidation */ 262 MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC), 263 }; 264 int ret; 265 266 ret = send_tlb_invalidation(>->uc.guc, fence, action, 267 ARRAY_SIZE(action)); 268 /* 269 * -ECANCELED indicates the CT is stopped for a GT reset. TLB caches 270 * should be nuked on a GT reset so this error can be ignored. 271 */ 272 if (ret == -ECANCELED) 273 return 0; 274 275 return ret; 276 } 277 278 /** 279 * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT 280 * @gt: GT structure 281 * 282 * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is 283 * synchronous. 284 * 285 * Return: 0 on success, negative error code on error 286 */ 287 int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) 288 { 289 struct xe_device *xe = gt_to_xe(gt); 290 unsigned int fw_ref; 291 292 if (xe_guc_ct_enabled(>->uc.guc.ct) && 293 gt->uc.guc.submission_state.enabled) { 294 struct xe_gt_tlb_invalidation_fence fence; 295 int ret; 296 297 xe_gt_tlb_invalidation_fence_init(gt, &fence, true); 298 ret = xe_gt_tlb_invalidation_guc(gt, &fence); 299 if (ret) 300 return ret; 301 302 xe_gt_tlb_invalidation_fence_wait(&fence); 303 } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) { 304 struct xe_mmio *mmio = >->mmio; 305 306 if (IS_SRIOV_VF(xe)) 307 return 0; 308 309 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 310 if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) { 311 xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1, 312 PVC_GUC_TLB_INV_DESC1_INVALIDATE); 313 xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0, 314 PVC_GUC_TLB_INV_DESC0_VALID); 315 } else { 316 xe_mmio_write32(mmio, GUC_TLB_INV_CR, 317 GUC_TLB_INV_CR_INVALIDATE); 318 } 319 xe_force_wake_put(gt_to_fw(gt), fw_ref); 320 } 321 322 return 0; 323 } 324 325 /* 326 * Ensure that roundup_pow_of_two(length) doesn't overflow. 327 * Note that roundup_pow_of_two() operates on unsigned long, 328 * not on u64. 329 */ 330 #define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX)) 331 332 /** 333 * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an 334 * address range 335 * 336 * @gt: GT structure 337 * @fence: invalidation fence which will be signal on TLB invalidation 338 * completion 339 * @start: start address 340 * @end: end address 341 * @asid: address space id 342 * 343 * Issue a range based TLB invalidation if supported, if not fallback to a full 344 * TLB invalidation. Completion of TLB is asynchronous and caller can use 345 * the invalidation fence to wait for completion. 346 * 347 * Return: Negative error code on error, 0 on success 348 */ 349 int xe_gt_tlb_invalidation_range(struct xe_gt *gt, 350 struct xe_gt_tlb_invalidation_fence *fence, 351 u64 start, u64 end, u32 asid) 352 { 353 struct xe_device *xe = gt_to_xe(gt); 354 #define MAX_TLB_INVALIDATION_LEN 7 355 u32 action[MAX_TLB_INVALIDATION_LEN]; 356 u64 length = end - start; 357 int len = 0; 358 359 xe_gt_assert(gt, fence); 360 361 /* Execlists not supported */ 362 if (gt_to_xe(gt)->info.force_execlist) { 363 __invalidation_fence_signal(xe, fence); 364 return 0; 365 } 366 367 action[len++] = XE_GUC_ACTION_TLB_INVALIDATION; 368 action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */ 369 if (!xe->info.has_range_tlb_invalidation || 370 length > MAX_RANGE_TLB_INVALIDATION_LENGTH) { 371 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL); 372 } else { 373 u64 orig_start = start; 374 u64 align; 375 376 if (length < SZ_4K) 377 length = SZ_4K; 378 379 /* 380 * We need to invalidate a higher granularity if start address 381 * is not aligned to length. When start is not aligned with 382 * length we need to find the length large enough to create an 383 * address mask covering the required range. 384 */ 385 align = roundup_pow_of_two(length); 386 start = ALIGN_DOWN(start, align); 387 end = ALIGN(end, align); 388 length = align; 389 while (start + length < end) { 390 length <<= 1; 391 start = ALIGN_DOWN(orig_start, length); 392 } 393 394 /* 395 * Minimum invalidation size for a 2MB page that the hardware 396 * expects is 16MB 397 */ 398 if (length >= SZ_2M) { 399 length = max_t(u64, SZ_16M, length); 400 start = ALIGN_DOWN(orig_start, length); 401 } 402 403 xe_gt_assert(gt, length >= SZ_4K); 404 xe_gt_assert(gt, is_power_of_2(length)); 405 xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1, 406 ilog2(SZ_2M) + 1))); 407 xe_gt_assert(gt, IS_ALIGNED(start, length)); 408 409 action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE); 410 action[len++] = asid; 411 action[len++] = lower_32_bits(start); 412 action[len++] = upper_32_bits(start); 413 action[len++] = ilog2(length) - ilog2(SZ_4K); 414 } 415 416 xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN); 417 418 return send_tlb_invalidation(>->uc.guc, fence, action, len); 419 } 420 421 /** 422 * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM 423 * @gt: graphics tile 424 * @vm: VM to invalidate 425 * 426 * Invalidate entire VM's address space 427 */ 428 void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm) 429 { 430 struct xe_gt_tlb_invalidation_fence fence; 431 u64 range = 1ull << vm->xe->info.va_bits; 432 int ret; 433 434 xe_gt_tlb_invalidation_fence_init(gt, &fence, true); 435 436 ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid); 437 if (ret < 0) 438 return; 439 440 xe_gt_tlb_invalidation_fence_wait(&fence); 441 } 442 443 /** 444 * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA 445 * @gt: GT structure 446 * @fence: invalidation fence which will be signal on TLB invalidation 447 * completion, can be NULL 448 * @vma: VMA to invalidate 449 * 450 * Issue a range based TLB invalidation if supported, if not fallback to a full 451 * TLB invalidation. Completion of TLB is asynchronous and caller can use 452 * the invalidation fence to wait for completion. 453 * 454 * Return: Negative error code on error, 0 on success 455 */ 456 int xe_gt_tlb_invalidation_vma(struct xe_gt *gt, 457 struct xe_gt_tlb_invalidation_fence *fence, 458 struct xe_vma *vma) 459 { 460 xe_gt_assert(gt, vma); 461 462 return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma), 463 xe_vma_end(vma), 464 xe_vma_vm(vma)->usm.asid); 465 } 466 467 /** 468 * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler 469 * @guc: guc 470 * @msg: message indicating TLB invalidation done 471 * @len: length of message 472 * 473 * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any 474 * invalidation fences for seqno. Algorithm for this depends on seqno being 475 * received in-order and asserts this assumption. 476 * 477 * Return: 0 on success, -EPROTO for malformed messages. 478 */ 479 int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len) 480 { 481 struct xe_gt *gt = guc_to_gt(guc); 482 struct xe_device *xe = gt_to_xe(gt); 483 struct xe_gt_tlb_invalidation_fence *fence, *next; 484 unsigned long flags; 485 486 if (unlikely(len != 1)) 487 return -EPROTO; 488 489 /* 490 * This can also be run both directly from the IRQ handler and also in 491 * process_g2h_msg(). Only one may process any individual CT message, 492 * however the order they are processed here could result in skipping a 493 * seqno. To handle that we just process all the seqnos from the last 494 * seqno_recv up to and including the one in msg[0]. The delta should be 495 * very small so there shouldn't be much of pending_fences we actually 496 * need to iterate over here. 497 * 498 * From GuC POV we expect the seqnos to always appear in-order, so if we 499 * see something later in the timeline we can be sure that anything 500 * appearing earlier has already signalled, just that we have yet to 501 * officially process the CT message like if racing against 502 * process_g2h_msg(). 503 */ 504 spin_lock_irqsave(>->tlb_invalidation.pending_lock, flags); 505 if (tlb_invalidation_seqno_past(gt, msg[0])) { 506 spin_unlock_irqrestore(>->tlb_invalidation.pending_lock, flags); 507 return 0; 508 } 509 510 WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]); 511 512 list_for_each_entry_safe(fence, next, 513 >->tlb_invalidation.pending_fences, link) { 514 trace_xe_gt_tlb_invalidation_fence_recv(xe, fence); 515 516 if (!tlb_invalidation_seqno_past(gt, fence->seqno)) 517 break; 518 519 invalidation_fence_signal(xe, fence); 520 } 521 522 if (!list_empty(>->tlb_invalidation.pending_fences)) 523 mod_delayed_work(system_wq, 524 >->tlb_invalidation.fence_tdr, 525 tlb_timeout_jiffies(gt)); 526 else 527 cancel_delayed_work(>->tlb_invalidation.fence_tdr); 528 529 spin_unlock_irqrestore(>->tlb_invalidation.pending_lock, flags); 530 531 return 0; 532 } 533 534 static const char * 535 invalidation_fence_get_driver_name(struct dma_fence *dma_fence) 536 { 537 return "xe"; 538 } 539 540 static const char * 541 invalidation_fence_get_timeline_name(struct dma_fence *dma_fence) 542 { 543 return "invalidation_fence"; 544 } 545 546 static const struct dma_fence_ops invalidation_fence_ops = { 547 .get_driver_name = invalidation_fence_get_driver_name, 548 .get_timeline_name = invalidation_fence_get_timeline_name, 549 }; 550 551 /** 552 * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence 553 * @gt: GT 554 * @fence: TLB invalidation fence to initialize 555 * @stack: fence is stack variable 556 * 557 * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini 558 * will be automatically called when fence is signalled (all fences must signal), 559 * even on error. 560 */ 561 void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt, 562 struct xe_gt_tlb_invalidation_fence *fence, 563 bool stack) 564 { 565 xe_pm_runtime_get_noresume(gt_to_xe(gt)); 566 567 spin_lock_irq(>->tlb_invalidation.lock); 568 dma_fence_init(&fence->base, &invalidation_fence_ops, 569 >->tlb_invalidation.lock, 570 dma_fence_context_alloc(1), 1); 571 spin_unlock_irq(>->tlb_invalidation.lock); 572 INIT_LIST_HEAD(&fence->link); 573 if (stack) 574 set_bit(FENCE_STACK_BIT, &fence->base.flags); 575 else 576 dma_fence_get(&fence->base); 577 fence->gt = gt; 578 } 579