1 /* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include "kfd_debug.h" 24 #include "kfd_device_queue_manager.h" 25 #include "kfd_topology.h" 26 #include <linux/file.h> 27 #include <uapi/linux/kfd_ioctl.h> 28 #include <uapi/linux/kfd_sysfs.h> 29 30 #define MAX_WATCH_ADDRESSES 4 31 32 int kfd_dbg_ev_query_debug_event(struct kfd_process *process, 33 unsigned int *queue_id, 34 unsigned int *gpu_id, 35 uint64_t exception_clear_mask, 36 uint64_t *event_status) 37 { 38 struct process_queue_manager *pqm; 39 struct process_queue_node *pqn; 40 int i; 41 42 if (!(process && process->debug_trap_enabled)) 43 return -ENODATA; 44 45 mutex_lock(&process->event_mutex); 46 *event_status = 0; 47 *queue_id = 0; 48 *gpu_id = 0; 49 50 /* find and report queue events */ 51 pqm = &process->pqm; 52 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 53 uint64_t tmp = process->exception_enable_mask; 54 55 if (!pqn->q) 56 continue; 57 58 tmp &= pqn->q->properties.exception_status; 59 60 if (!tmp) 61 continue; 62 63 *event_status = pqn->q->properties.exception_status; 64 *queue_id = pqn->q->properties.queue_id; 65 *gpu_id = pqn->q->device->id; 66 pqn->q->properties.exception_status &= ~exception_clear_mask; 67 goto out; 68 } 69 70 /* find and report device events */ 71 for (i = 0; i < process->n_pdds; i++) { 72 struct kfd_process_device *pdd = process->pdds[i]; 73 uint64_t tmp = process->exception_enable_mask 74 & pdd->exception_status; 75 76 if (!tmp) 77 continue; 78 79 *event_status = pdd->exception_status; 80 *gpu_id = pdd->dev->id; 81 pdd->exception_status &= ~exception_clear_mask; 82 goto out; 83 } 84 85 /* report process events */ 86 if (process->exception_enable_mask & process->exception_status) { 87 *event_status = process->exception_status; 88 process->exception_status &= ~exception_clear_mask; 89 } 90 91 out: 92 mutex_unlock(&process->event_mutex); 93 return *event_status ? 0 : -EAGAIN; 94 } 95 96 void debug_event_write_work_handler(struct work_struct *work) 97 { 98 struct kfd_process *process; 99 100 static const char write_data = '.'; 101 loff_t pos = 0; 102 103 process = container_of(work, 104 struct kfd_process, 105 debug_event_workarea); 106 107 if (process->debug_trap_enabled && process->dbg_ev_file) 108 kernel_write(process->dbg_ev_file, &write_data, 1, &pos); 109 } 110 111 /* update process/device/queue exception status, write to descriptor 112 * only if exception_status is enabled. 113 */ 114 bool kfd_dbg_ev_raise(uint64_t event_mask, 115 struct kfd_process *process, struct kfd_node *dev, 116 unsigned int source_id, bool use_worker, 117 void *exception_data, size_t exception_data_size) 118 { 119 struct process_queue_manager *pqm; 120 struct process_queue_node *pqn; 121 int i; 122 static const char write_data = '.'; 123 loff_t pos = 0; 124 bool is_subscribed = true; 125 126 if (!(process && process->debug_trap_enabled)) 127 return false; 128 129 mutex_lock(&process->event_mutex); 130 131 if (event_mask & KFD_EC_MASK_DEVICE) { 132 for (i = 0; i < process->n_pdds; i++) { 133 struct kfd_process_device *pdd = process->pdds[i]; 134 135 if (pdd->dev != dev) 136 continue; 137 138 pdd->exception_status |= event_mask & KFD_EC_MASK_DEVICE; 139 140 if (event_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { 141 if (!pdd->vm_fault_exc_data) { 142 pdd->vm_fault_exc_data = kmemdup( 143 exception_data, 144 exception_data_size, 145 GFP_KERNEL); 146 if (!pdd->vm_fault_exc_data) 147 pr_debug("Failed to allocate exception data memory"); 148 } else { 149 pr_debug("Debugger exception data not saved\n"); 150 print_hex_dump_bytes("exception data: ", 151 DUMP_PREFIX_OFFSET, 152 exception_data, 153 exception_data_size); 154 } 155 } 156 break; 157 } 158 } else if (event_mask & KFD_EC_MASK_PROCESS) { 159 process->exception_status |= event_mask & KFD_EC_MASK_PROCESS; 160 } else { 161 pqm = &process->pqm; 162 list_for_each_entry(pqn, &pqm->queues, 163 process_queue_list) { 164 int target_id; 165 166 if (!pqn->q) 167 continue; 168 169 target_id = event_mask & KFD_EC_MASK(EC_QUEUE_NEW) ? 170 pqn->q->properties.queue_id : 171 pqn->q->doorbell_id; 172 173 if (pqn->q->device != dev || target_id != source_id) 174 continue; 175 176 pqn->q->properties.exception_status |= event_mask; 177 break; 178 } 179 } 180 181 if (process->exception_enable_mask & event_mask) { 182 if (use_worker) 183 schedule_work(&process->debug_event_workarea); 184 else 185 kernel_write(process->dbg_ev_file, 186 &write_data, 187 1, 188 &pos); 189 } else { 190 is_subscribed = false; 191 } 192 193 mutex_unlock(&process->event_mutex); 194 195 return is_subscribed; 196 } 197 198 /* set pending event queue entry from ring entry */ 199 bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev, 200 unsigned int pasid, 201 uint32_t doorbell_id, 202 uint64_t trap_mask, 203 void *exception_data, 204 size_t exception_data_size) 205 { 206 struct kfd_process *p; 207 struct kfd_process_device *pdd = NULL; 208 bool signaled_to_debugger_or_runtime = false; 209 210 p = kfd_lookup_process_by_pasid(pasid, &pdd); 211 212 if (!pdd) 213 return false; 214 215 if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true, 216 exception_data, exception_data_size)) { 217 struct process_queue_manager *pqm; 218 struct process_queue_node *pqn; 219 220 if (!!(trap_mask & KFD_EC_MASK_QUEUE) && 221 p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { 222 mutex_lock(&p->mutex); 223 224 pqm = &p->pqm; 225 list_for_each_entry(pqn, &pqm->queues, 226 process_queue_list) { 227 228 if (!(pqn->q && pqn->q->device == dev && 229 pqn->q->doorbell_id == doorbell_id)) 230 continue; 231 232 kfd_send_exception_to_runtime(p, pqn->q->properties.queue_id, 233 trap_mask); 234 235 signaled_to_debugger_or_runtime = true; 236 237 break; 238 } 239 240 mutex_unlock(&p->mutex); 241 } else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { 242 kfd_evict_process_device(pdd); 243 kfd_signal_vm_fault_event(pdd, NULL, exception_data); 244 245 signaled_to_debugger_or_runtime = true; 246 } 247 } else { 248 signaled_to_debugger_or_runtime = true; 249 } 250 251 kfd_unref_process(p); 252 253 return signaled_to_debugger_or_runtime; 254 } 255 256 int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, 257 unsigned int dev_id, 258 unsigned int queue_id, 259 uint64_t error_reason) 260 { 261 if (error_reason & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { 262 struct kfd_process_device *pdd = NULL; 263 struct kfd_hsa_memory_exception_data *data; 264 int i; 265 266 for (i = 0; i < p->n_pdds; i++) { 267 if (p->pdds[i]->dev->id == dev_id) { 268 pdd = p->pdds[i]; 269 break; 270 } 271 } 272 273 if (!pdd) 274 return -ENODEV; 275 276 data = (struct kfd_hsa_memory_exception_data *) 277 pdd->vm_fault_exc_data; 278 279 kfd_evict_process_device(pdd); 280 kfd_signal_vm_fault_event(pdd, NULL, data); 281 error_reason &= ~KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION); 282 } 283 284 if (error_reason & (KFD_EC_MASK(EC_PROCESS_RUNTIME))) { 285 /* 286 * block should only happen after the debugger receives runtime 287 * enable notice. 288 */ 289 up(&p->runtime_enable_sema); 290 error_reason &= ~KFD_EC_MASK(EC_PROCESS_RUNTIME); 291 } 292 293 if (error_reason) 294 return kfd_send_exception_to_runtime(p, queue_id, error_reason); 295 296 return 0; 297 } 298 299 static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable) 300 { 301 struct mqd_update_info minfo = {0}; 302 int err; 303 304 if (!q) 305 return 0; 306 307 if (!kfd_dbg_has_cwsr_workaround(q->device)) 308 return 0; 309 310 if (enable && q->properties.is_user_cu_masked) 311 return -EBUSY; 312 313 minfo.update_flag = enable ? UPDATE_FLAG_DBG_WA_ENABLE : UPDATE_FLAG_DBG_WA_DISABLE; 314 315 q->properties.is_dbg_wa = enable; 316 err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo); 317 if (err) 318 q->properties.is_dbg_wa = false; 319 320 return err; 321 } 322 323 static int kfd_dbg_set_workaround(struct kfd_process *target, bool enable) 324 { 325 struct process_queue_manager *pqm = &target->pqm; 326 struct process_queue_node *pqn; 327 int r = 0; 328 329 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 330 r = kfd_dbg_set_queue_workaround(pqn->q, enable); 331 if (enable && r) 332 goto unwind; 333 } 334 335 return 0; 336 337 unwind: 338 list_for_each_entry(pqn, &pqm->queues, process_queue_list) 339 kfd_dbg_set_queue_workaround(pqn->q, false); 340 341 if (enable) 342 target->runtime_info.runtime_state = r == -EBUSY ? 343 DEBUG_RUNTIME_STATE_ENABLED_BUSY : 344 DEBUG_RUNTIME_STATE_ENABLED_ERROR; 345 346 return r; 347 } 348 349 int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en) 350 { 351 uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; 352 uint32_t flags = pdd->process->dbg_flags; 353 struct amdgpu_device *adev = pdd->dev->adev; 354 int r; 355 356 if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) 357 return 0; 358 359 if (!pdd->proc_ctx_cpu_ptr) { 360 r = amdgpu_amdkfd_alloc_kernel_mem(adev, 361 AMDGPU_MES_PROC_CTX_SIZE, 362 AMDGPU_GEM_DOMAIN_GTT, 363 &pdd->proc_ctx_bo, 364 &pdd->proc_ctx_gpu_addr, 365 &pdd->proc_ctx_cpu_ptr, 366 false); 367 if (r) { 368 dev_err(adev->dev, 369 "failed to allocate process context bo\n"); 370 return r; 371 } 372 memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE); 373 } 374 375 return amdgpu_mes_set_shader_debugger(pdd->dev->adev, 376 pdd->proc_ctx_gpu_addr, spi_dbg_cntl, 377 pdd->watch_points, flags, sq_trap_en, 378 ffs(pdd->dev->xcc_mask) - 1); 379 } 380 381 #define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1 382 static int kfd_dbg_get_dev_watch_id(struct kfd_process_device *pdd, int *watch_id) 383 { 384 int i; 385 386 *watch_id = KFD_DEBUGGER_INVALID_WATCH_POINT_ID; 387 388 spin_lock(&pdd->dev->watch_points_lock); 389 390 for (i = 0; i < MAX_WATCH_ADDRESSES; i++) { 391 /* device watchpoint in use so skip */ 392 if ((pdd->dev->alloc_watch_ids >> i) & 0x1) 393 continue; 394 395 pdd->alloc_watch_ids |= 0x1 << i; 396 pdd->dev->alloc_watch_ids |= 0x1 << i; 397 *watch_id = i; 398 spin_unlock(&pdd->dev->watch_points_lock); 399 return 0; 400 } 401 402 spin_unlock(&pdd->dev->watch_points_lock); 403 404 return -ENOMEM; 405 } 406 407 static void kfd_dbg_clear_dev_watch_id(struct kfd_process_device *pdd, int watch_id) 408 { 409 spin_lock(&pdd->dev->watch_points_lock); 410 411 /* process owns device watch point so safe to clear */ 412 if ((pdd->alloc_watch_ids >> watch_id) & 0x1) { 413 pdd->alloc_watch_ids &= ~(0x1 << watch_id); 414 pdd->dev->alloc_watch_ids &= ~(0x1 << watch_id); 415 } 416 417 spin_unlock(&pdd->dev->watch_points_lock); 418 } 419 420 static bool kfd_dbg_owns_dev_watch_id(struct kfd_process_device *pdd, int watch_id) 421 { 422 bool owns_watch_id = false; 423 424 spin_lock(&pdd->dev->watch_points_lock); 425 owns_watch_id = watch_id < MAX_WATCH_ADDRESSES && 426 ((pdd->alloc_watch_ids >> watch_id) & 0x1); 427 428 spin_unlock(&pdd->dev->watch_points_lock); 429 430 return owns_watch_id; 431 } 432 433 int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd, 434 uint32_t watch_id) 435 { 436 int r; 437 438 if (!kfd_dbg_owns_dev_watch_id(pdd, watch_id)) 439 return -EINVAL; 440 441 if (!pdd->dev->kfd->shared_resources.enable_mes) { 442 r = debug_lock_and_unmap(pdd->dev->dqm); 443 if (r) 444 return r; 445 } 446 447 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 448 pdd->watch_points[watch_id] = pdd->dev->kfd2kgd->clear_address_watch( 449 pdd->dev->adev, 450 watch_id); 451 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 452 453 if (!pdd->dev->kfd->shared_resources.enable_mes) 454 r = debug_map_and_unlock(pdd->dev->dqm); 455 else 456 r = kfd_dbg_set_mes_debug_mode(pdd, true); 457 458 kfd_dbg_clear_dev_watch_id(pdd, watch_id); 459 460 return r; 461 } 462 463 int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, 464 uint64_t watch_address, 465 uint32_t watch_address_mask, 466 uint32_t *watch_id, 467 uint32_t watch_mode) 468 { 469 int xcc_id, r = kfd_dbg_get_dev_watch_id(pdd, watch_id); 470 uint32_t xcc_mask = pdd->dev->xcc_mask; 471 472 if (r) 473 return r; 474 475 if (!pdd->dev->kfd->shared_resources.enable_mes) { 476 r = debug_lock_and_unmap(pdd->dev->dqm); 477 if (r) { 478 kfd_dbg_clear_dev_watch_id(pdd, *watch_id); 479 return r; 480 } 481 } 482 483 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 484 for_each_inst(xcc_id, xcc_mask) 485 pdd->watch_points[*watch_id] = pdd->dev->kfd2kgd->set_address_watch( 486 pdd->dev->adev, 487 watch_address, 488 watch_address_mask, 489 *watch_id, 490 watch_mode, 491 pdd->dev->vm_info.last_vmid_kfd, 492 xcc_id); 493 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 494 495 if (!pdd->dev->kfd->shared_resources.enable_mes) 496 r = debug_map_and_unlock(pdd->dev->dqm); 497 else 498 r = kfd_dbg_set_mes_debug_mode(pdd, true); 499 500 /* HWS is broken so no point in HW rollback but release the watchpoint anyways */ 501 if (r) 502 kfd_dbg_clear_dev_watch_id(pdd, *watch_id); 503 504 return 0; 505 } 506 507 static void kfd_dbg_clear_process_address_watch(struct kfd_process *target) 508 { 509 int i, j; 510 511 for (i = 0; i < target->n_pdds; i++) 512 for (j = 0; j < MAX_WATCH_ADDRESSES; j++) 513 kfd_dbg_trap_clear_dev_address_watch(target->pdds[i], j); 514 } 515 516 int kfd_dbg_trap_set_flags(struct kfd_process *target, uint32_t *flags) 517 { 518 uint32_t prev_flags = target->dbg_flags; 519 int i, r = 0, rewind_count = 0; 520 521 for (i = 0; i < target->n_pdds; i++) { 522 struct kfd_topology_device *topo_dev = 523 kfd_topology_device_by_id(target->pdds[i]->dev->id); 524 uint32_t caps = topo_dev->node_props.capability; 525 uint32_t caps2 = topo_dev->node_props.capability2; 526 527 if (!(caps & HSA_CAP_TRAP_DEBUG_PRECISE_MEMORY_OPERATIONS_SUPPORTED) && 528 (*flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP)) { 529 *flags = prev_flags; 530 return -EACCES; 531 } 532 533 if (!(caps & HSA_CAP_TRAP_DEBUG_PRECISE_ALU_OPERATIONS_SUPPORTED) && 534 (*flags & KFD_DBG_TRAP_FLAG_SINGLE_ALU_OP)) { 535 *flags = prev_flags; 536 return -EACCES; 537 } 538 539 if (!(caps2 & HSA_CAP2_TRAP_DEBUG_LDS_OUT_OF_ADDR_RANGE_SUPPORTED) && 540 (*flags & KFD_DBG_TRAP_FLAG_LDS_OUT_OF_ADDR_RANGE)) { 541 *flags = prev_flags; 542 return -EACCES; 543 } 544 } 545 546 target->dbg_flags = *flags; 547 *flags = prev_flags; 548 for (i = 0; i < target->n_pdds; i++) { 549 struct kfd_process_device *pdd = target->pdds[i]; 550 551 if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) 552 continue; 553 554 if (!pdd->dev->kfd->shared_resources.enable_mes) 555 r = debug_refresh_runlist(pdd->dev->dqm); 556 else 557 r = kfd_dbg_set_mes_debug_mode(pdd, true); 558 559 if (r) { 560 target->dbg_flags = prev_flags; 561 break; 562 } 563 564 rewind_count++; 565 } 566 567 /* Rewind flags */ 568 if (r) { 569 target->dbg_flags = prev_flags; 570 571 for (i = 0; i < rewind_count; i++) { 572 struct kfd_process_device *pdd = target->pdds[i]; 573 574 if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) 575 continue; 576 577 if (!pdd->dev->kfd->shared_resources.enable_mes) 578 debug_refresh_runlist(pdd->dev->dqm); 579 else 580 kfd_dbg_set_mes_debug_mode(pdd, true); 581 } 582 } 583 584 return r; 585 } 586 587 /* kfd_dbg_trap_deactivate: 588 * target: target process 589 * unwind: If this is unwinding a failed kfd_dbg_trap_enable() 590 * unwind_count: 591 * If unwind == true, how far down the pdd list we need 592 * to unwind 593 * else: ignored 594 */ 595 void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count) 596 { 597 int i; 598 599 if (!unwind) { 600 uint32_t flags = 0; 601 int resume_count = resume_queues(target, 0, NULL); 602 603 if (resume_count) 604 pr_debug("Resumed %d queues\n", resume_count); 605 606 cancel_work_sync(&target->debug_event_workarea); 607 kfd_dbg_clear_process_address_watch(target); 608 kfd_dbg_trap_set_wave_launch_mode(target, 0); 609 610 kfd_dbg_trap_set_flags(target, &flags); 611 } 612 613 for (i = 0; i < target->n_pdds; i++) { 614 struct kfd_process_device *pdd = target->pdds[i]; 615 616 /* If this is an unwind, and we have unwound the required 617 * enable calls on the pdd list, we need to stop now 618 * otherwise we may mess up another debugger session. 619 */ 620 if (unwind && i == unwind_count) 621 break; 622 623 kfd_process_set_trap_debug_flag(&pdd->qpd, false); 624 625 /* GFX off is already disabled by debug activate if not RLC restore supported. */ 626 if (kfd_dbg_is_rlc_restore_supported(pdd->dev)) 627 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 628 pdd->spi_dbg_override = 629 pdd->dev->kfd2kgd->disable_debug_trap( 630 pdd->dev->adev, 631 target->runtime_info.ttmp_setup, 632 pdd->dev->vm_info.last_vmid_kfd); 633 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 634 635 if (!kfd_dbg_is_per_vmid_supported(pdd->dev) && 636 release_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd)) 637 pr_err("Failed to release debug vmid on [%i]\n", pdd->dev->id); 638 639 if (!pdd->dev->kfd->shared_resources.enable_mes) 640 debug_refresh_runlist(pdd->dev->dqm); 641 else 642 kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); 643 } 644 645 kfd_dbg_set_workaround(target, false); 646 } 647 648 static void kfd_dbg_clean_exception_status(struct kfd_process *target) 649 { 650 struct process_queue_manager *pqm; 651 struct process_queue_node *pqn; 652 int i; 653 654 for (i = 0; i < target->n_pdds; i++) { 655 struct kfd_process_device *pdd = target->pdds[i]; 656 657 kfd_process_drain_interrupts(pdd); 658 659 pdd->exception_status = 0; 660 } 661 662 pqm = &target->pqm; 663 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 664 if (!pqn->q) 665 continue; 666 667 pqn->q->properties.exception_status = 0; 668 } 669 670 target->exception_status = 0; 671 } 672 673 int kfd_dbg_trap_disable(struct kfd_process *target) 674 { 675 if (!target->debug_trap_enabled) 676 return 0; 677 678 /* 679 * Defer deactivation to runtime if runtime not enabled otherwise reset 680 * attached running target runtime state to enable for re-attach. 681 */ 682 if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) 683 kfd_dbg_trap_deactivate(target, false, 0); 684 else if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED) 685 target->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED; 686 687 cancel_work_sync(&target->debug_event_workarea); 688 fput(target->dbg_ev_file); 689 target->dbg_ev_file = NULL; 690 691 if (target->debugger_process) { 692 atomic_dec(&target->debugger_process->debugged_process_count); 693 target->debugger_process = NULL; 694 } 695 696 target->debug_trap_enabled = false; 697 kfd_dbg_clean_exception_status(target); 698 kfd_unref_process(target); 699 700 return 0; 701 } 702 703 int kfd_dbg_trap_activate(struct kfd_process *target) 704 { 705 int i, r = 0; 706 707 r = kfd_dbg_set_workaround(target, true); 708 if (r) 709 return r; 710 711 for (i = 0; i < target->n_pdds; i++) { 712 struct kfd_process_device *pdd = target->pdds[i]; 713 714 if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) { 715 r = reserve_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd); 716 717 if (r) { 718 target->runtime_info.runtime_state = (r == -EBUSY) ? 719 DEBUG_RUNTIME_STATE_ENABLED_BUSY : 720 DEBUG_RUNTIME_STATE_ENABLED_ERROR; 721 722 goto unwind_err; 723 } 724 } 725 726 /* Disable GFX OFF to prevent garbage read/writes to debug registers. 727 * If RLC restore of debug registers is not supported and runtime enable 728 * hasn't done so already on ttmp setup request, restore the trap config registers. 729 * 730 * If RLC restore of debug registers is not supported, keep gfx off disabled for 731 * the debug session. 732 */ 733 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 734 if (!(kfd_dbg_is_rlc_restore_supported(pdd->dev) || 735 target->runtime_info.ttmp_setup)) 736 pdd->dev->kfd2kgd->enable_debug_trap(pdd->dev->adev, true, 737 pdd->dev->vm_info.last_vmid_kfd); 738 739 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap( 740 pdd->dev->adev, 741 false, 742 pdd->dev->vm_info.last_vmid_kfd); 743 744 if (kfd_dbg_is_rlc_restore_supported(pdd->dev)) 745 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 746 747 /* 748 * Setting the debug flag in the trap handler requires that the TMA has been 749 * allocated, which occurs during CWSR initialization. 750 * In the event that CWSR has not been initialized at this point, setting the 751 * flag will be called again during CWSR initialization if the target process 752 * is still debug enabled. 753 */ 754 kfd_process_set_trap_debug_flag(&pdd->qpd, true); 755 756 if (!pdd->dev->kfd->shared_resources.enable_mes) 757 r = debug_refresh_runlist(pdd->dev->dqm); 758 else 759 r = kfd_dbg_set_mes_debug_mode(pdd, true); 760 761 if (r) { 762 target->runtime_info.runtime_state = 763 DEBUG_RUNTIME_STATE_ENABLED_ERROR; 764 goto unwind_err; 765 } 766 } 767 768 return 0; 769 770 unwind_err: 771 /* Enabling debug failed, we need to disable on 772 * all GPUs so the enable is all or nothing. 773 */ 774 kfd_dbg_trap_deactivate(target, true, i); 775 return r; 776 } 777 778 int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, 779 void __user *runtime_info, uint32_t *runtime_size) 780 { 781 struct file *f; 782 uint32_t copy_size; 783 int i, r = 0; 784 785 if (target->debug_trap_enabled) 786 return -EALREADY; 787 788 /* Enable pre-checks */ 789 for (i = 0; i < target->n_pdds; i++) { 790 struct kfd_process_device *pdd = target->pdds[i]; 791 792 if (!KFD_IS_SOC15(pdd->dev)) 793 return -ENODEV; 794 795 if (pdd->qpd.num_gws && (!kfd_dbg_has_gws_support(pdd->dev) || 796 kfd_dbg_has_cwsr_workaround(pdd->dev))) 797 return -EBUSY; 798 } 799 800 copy_size = min((size_t)(*runtime_size), sizeof(target->runtime_info)); 801 802 f = fget(fd); 803 if (!f) { 804 pr_err("Failed to get file for (%i)\n", fd); 805 return -EBADF; 806 } 807 808 target->dbg_ev_file = f; 809 810 /* defer activation to runtime if not runtime enabled */ 811 if (target->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) 812 kfd_dbg_trap_activate(target); 813 814 /* We already hold the process reference but hold another one for the 815 * debug session. 816 */ 817 kref_get(&target->ref); 818 target->debug_trap_enabled = true; 819 820 if (target->debugger_process) 821 atomic_inc(&target->debugger_process->debugged_process_count); 822 823 if (copy_to_user(runtime_info, (void *)&target->runtime_info, copy_size)) { 824 kfd_dbg_trap_deactivate(target, false, 0); 825 r = -EFAULT; 826 } 827 828 *runtime_size = sizeof(target->runtime_info); 829 830 return r; 831 } 832 833 static int kfd_dbg_validate_trap_override_request(struct kfd_process *p, 834 uint32_t trap_override, 835 uint32_t trap_mask_request, 836 uint32_t *trap_mask_supported) 837 { 838 int i = 0; 839 840 *trap_mask_supported = 0xffffffff; 841 842 for (i = 0; i < p->n_pdds; i++) { 843 struct kfd_process_device *pdd = p->pdds[i]; 844 int err = pdd->dev->kfd2kgd->validate_trap_override_request( 845 pdd->dev->adev, 846 trap_override, 847 trap_mask_supported); 848 849 if (err) 850 return err; 851 } 852 853 if (trap_mask_request & ~*trap_mask_supported) 854 return -EACCES; 855 856 return 0; 857 } 858 859 int kfd_dbg_trap_set_wave_launch_override(struct kfd_process *target, 860 uint32_t trap_override, 861 uint32_t trap_mask_bits, 862 uint32_t trap_mask_request, 863 uint32_t *trap_mask_prev, 864 uint32_t *trap_mask_supported) 865 { 866 int r = 0, i; 867 868 r = kfd_dbg_validate_trap_override_request(target, 869 trap_override, 870 trap_mask_request, 871 trap_mask_supported); 872 873 if (r) 874 return r; 875 876 for (i = 0; i < target->n_pdds; i++) { 877 struct kfd_process_device *pdd = target->pdds[i]; 878 879 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 880 pdd->spi_dbg_override = pdd->dev->kfd2kgd->set_wave_launch_trap_override( 881 pdd->dev->adev, 882 pdd->dev->vm_info.last_vmid_kfd, 883 trap_override, 884 trap_mask_bits, 885 trap_mask_request, 886 trap_mask_prev, 887 pdd->spi_dbg_override); 888 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 889 890 if (!pdd->dev->kfd->shared_resources.enable_mes) 891 r = debug_refresh_runlist(pdd->dev->dqm); 892 else 893 r = kfd_dbg_set_mes_debug_mode(pdd, true); 894 895 if (r) 896 break; 897 } 898 899 return r; 900 } 901 902 int kfd_dbg_trap_set_wave_launch_mode(struct kfd_process *target, 903 uint8_t wave_launch_mode) 904 { 905 int r = 0, i; 906 907 if (wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL && 908 wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT && 909 wave_launch_mode != KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG) 910 return -EINVAL; 911 912 for (i = 0; i < target->n_pdds; i++) { 913 struct kfd_process_device *pdd = target->pdds[i]; 914 915 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 916 pdd->spi_dbg_launch_mode = pdd->dev->kfd2kgd->set_wave_launch_mode( 917 pdd->dev->adev, 918 wave_launch_mode, 919 pdd->dev->vm_info.last_vmid_kfd); 920 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 921 922 if (!pdd->dev->kfd->shared_resources.enable_mes) 923 r = debug_refresh_runlist(pdd->dev->dqm); 924 else 925 r = kfd_dbg_set_mes_debug_mode(pdd, true); 926 927 if (r) 928 break; 929 } 930 931 return r; 932 } 933 934 int kfd_dbg_trap_query_exception_info(struct kfd_process *target, 935 uint32_t source_id, 936 uint32_t exception_code, 937 bool clear_exception, 938 void __user *info, 939 uint32_t *info_size) 940 { 941 bool found = false; 942 int r = 0; 943 uint32_t copy_size, actual_info_size = 0; 944 uint64_t *exception_status_ptr = NULL; 945 946 if (!target) 947 return -EINVAL; 948 949 if (!info || !info_size) 950 return -EINVAL; 951 952 mutex_lock(&target->event_mutex); 953 954 if (KFD_DBG_EC_TYPE_IS_QUEUE(exception_code)) { 955 /* Per queue exceptions */ 956 struct queue *queue = NULL; 957 int i; 958 959 for (i = 0; i < target->n_pdds; i++) { 960 struct kfd_process_device *pdd = target->pdds[i]; 961 struct qcm_process_device *qpd = &pdd->qpd; 962 963 list_for_each_entry(queue, &qpd->queues_list, list) { 964 if (!found && queue->properties.queue_id == source_id) { 965 found = true; 966 break; 967 } 968 } 969 if (found) 970 break; 971 } 972 973 if (!found) { 974 r = -EINVAL; 975 goto out; 976 } 977 978 if (!(queue->properties.exception_status & KFD_EC_MASK(exception_code))) { 979 r = -ENODATA; 980 goto out; 981 } 982 exception_status_ptr = &queue->properties.exception_status; 983 } else if (KFD_DBG_EC_TYPE_IS_DEVICE(exception_code)) { 984 /* Per device exceptions */ 985 struct kfd_process_device *pdd = NULL; 986 int i; 987 988 for (i = 0; i < target->n_pdds; i++) { 989 pdd = target->pdds[i]; 990 if (pdd->dev->id == source_id) { 991 found = true; 992 break; 993 } 994 } 995 996 if (!found) { 997 r = -EINVAL; 998 goto out; 999 } 1000 1001 if (!(pdd->exception_status & KFD_EC_MASK(exception_code))) { 1002 r = -ENODATA; 1003 goto out; 1004 } 1005 1006 if (exception_code == EC_DEVICE_MEMORY_VIOLATION) { 1007 copy_size = min((size_t)(*info_size), pdd->vm_fault_exc_data_size); 1008 1009 if (copy_to_user(info, pdd->vm_fault_exc_data, copy_size)) { 1010 r = -EFAULT; 1011 goto out; 1012 } 1013 actual_info_size = pdd->vm_fault_exc_data_size; 1014 if (clear_exception) { 1015 kfree(pdd->vm_fault_exc_data); 1016 pdd->vm_fault_exc_data = NULL; 1017 pdd->vm_fault_exc_data_size = 0; 1018 } 1019 } 1020 exception_status_ptr = &pdd->exception_status; 1021 } else if (KFD_DBG_EC_TYPE_IS_PROCESS(exception_code)) { 1022 /* Per process exceptions */ 1023 if (!(target->exception_status & KFD_EC_MASK(exception_code))) { 1024 r = -ENODATA; 1025 goto out; 1026 } 1027 1028 if (exception_code == EC_PROCESS_RUNTIME) { 1029 copy_size = min((size_t)(*info_size), sizeof(target->runtime_info)); 1030 1031 if (copy_to_user(info, (void *)&target->runtime_info, copy_size)) { 1032 r = -EFAULT; 1033 goto out; 1034 } 1035 1036 actual_info_size = sizeof(target->runtime_info); 1037 } 1038 1039 exception_status_ptr = &target->exception_status; 1040 } else { 1041 pr_debug("Bad exception type [%i]\n", exception_code); 1042 r = -EINVAL; 1043 goto out; 1044 } 1045 1046 *info_size = actual_info_size; 1047 if (clear_exception) 1048 *exception_status_ptr &= ~KFD_EC_MASK(exception_code); 1049 out: 1050 mutex_unlock(&target->event_mutex); 1051 return r; 1052 } 1053 1054 int kfd_dbg_trap_device_snapshot(struct kfd_process *target, 1055 uint64_t exception_clear_mask, 1056 void __user *user_info, 1057 uint32_t *number_of_device_infos, 1058 uint32_t *entry_size) 1059 { 1060 struct kfd_dbg_device_info_entry device_info; 1061 uint32_t tmp_entry_size, tmp_num_devices; 1062 int i, r = 0; 1063 1064 if (!(target && user_info && number_of_device_infos && entry_size)) 1065 return -EINVAL; 1066 1067 tmp_entry_size = *entry_size; 1068 1069 tmp_num_devices = min_t(size_t, *number_of_device_infos, target->n_pdds); 1070 *number_of_device_infos = target->n_pdds; 1071 *entry_size = min_t(size_t, *entry_size, sizeof(device_info)); 1072 1073 if (!tmp_num_devices) 1074 return 0; 1075 1076 memset(&device_info, 0, sizeof(device_info)); 1077 1078 mutex_lock(&target->event_mutex); 1079 1080 /* Run over all pdd of the process */ 1081 for (i = 0; i < tmp_num_devices; i++) { 1082 struct kfd_process_device *pdd = target->pdds[i]; 1083 struct kfd_topology_device *topo_dev = kfd_topology_device_by_id(pdd->dev->id); 1084 1085 device_info.gpu_id = pdd->dev->id; 1086 device_info.exception_status = pdd->exception_status; 1087 device_info.lds_base = pdd->lds_base; 1088 device_info.lds_limit = pdd->lds_limit; 1089 device_info.scratch_base = pdd->scratch_base; 1090 device_info.scratch_limit = pdd->scratch_limit; 1091 device_info.gpuvm_base = pdd->gpuvm_base; 1092 device_info.gpuvm_limit = pdd->gpuvm_limit; 1093 device_info.location_id = topo_dev->node_props.location_id; 1094 device_info.vendor_id = topo_dev->node_props.vendor_id; 1095 device_info.device_id = topo_dev->node_props.device_id; 1096 device_info.revision_id = pdd->dev->adev->pdev->revision; 1097 device_info.subsystem_vendor_id = pdd->dev->adev->pdev->subsystem_vendor; 1098 device_info.subsystem_device_id = pdd->dev->adev->pdev->subsystem_device; 1099 device_info.fw_version = pdd->dev->kfd->mec_fw_version; 1100 device_info.gfx_target_version = 1101 topo_dev->node_props.gfx_target_version; 1102 device_info.simd_count = topo_dev->node_props.simd_count; 1103 device_info.max_waves_per_simd = 1104 topo_dev->node_props.max_waves_per_simd; 1105 device_info.array_count = topo_dev->node_props.array_count; 1106 device_info.simd_arrays_per_engine = 1107 topo_dev->node_props.simd_arrays_per_engine; 1108 device_info.num_xcc = NUM_XCC(pdd->dev->xcc_mask); 1109 device_info.capability = topo_dev->node_props.capability; 1110 device_info.debug_prop = topo_dev->node_props.debug_prop; 1111 1112 if (exception_clear_mask) 1113 pdd->exception_status &= ~exception_clear_mask; 1114 1115 if (copy_to_user(user_info, &device_info, *entry_size)) { 1116 r = -EFAULT; 1117 break; 1118 } 1119 1120 user_info += tmp_entry_size; 1121 } 1122 1123 mutex_unlock(&target->event_mutex); 1124 1125 return r; 1126 } 1127 1128 void kfd_dbg_set_enabled_debug_exception_mask(struct kfd_process *target, 1129 uint64_t exception_set_mask) 1130 { 1131 uint64_t found_mask = 0; 1132 struct process_queue_manager *pqm; 1133 struct process_queue_node *pqn; 1134 static const char write_data = '.'; 1135 loff_t pos = 0; 1136 int i; 1137 1138 mutex_lock(&target->event_mutex); 1139 1140 found_mask |= target->exception_status; 1141 1142 pqm = &target->pqm; 1143 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { 1144 if (!pqn->q) 1145 continue; 1146 1147 found_mask |= pqn->q->properties.exception_status; 1148 } 1149 1150 for (i = 0; i < target->n_pdds; i++) { 1151 struct kfd_process_device *pdd = target->pdds[i]; 1152 1153 found_mask |= pdd->exception_status; 1154 } 1155 1156 if (exception_set_mask & found_mask) 1157 kernel_write(target->dbg_ev_file, &write_data, 1, &pos); 1158 1159 target->exception_enable_mask = exception_set_mask; 1160 1161 mutex_unlock(&target->event_mutex); 1162 } 1163