1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/device.h> 25 #include <linux/err.h> 26 #include <linux/fs.h> 27 #include <linux/file.h> 28 #include <linux/sched.h> 29 #include <linux/slab.h> 30 #include <linux/uaccess.h> 31 #include <linux/compat.h> 32 #include <uapi/linux/kfd_ioctl.h> 33 #include <linux/time.h> 34 #include <linux/mm.h> 35 #include <linux/mman.h> 36 #include <linux/ptrace.h> 37 #include <linux/dma-buf.h> 38 #include <linux/processor.h> 39 #include "kfd_priv.h" 40 #include "kfd_device_queue_manager.h" 41 #include "kfd_svm.h" 42 #include "amdgpu_amdkfd.h" 43 #include "kfd_smi_events.h" 44 #include "amdgpu_dma_buf.h" 45 #include "kfd_debug.h" 46 47 static long kfd_ioctl(struct file *, unsigned int, unsigned long); 48 static int kfd_open(struct inode *, struct file *); 49 static int kfd_release(struct inode *, struct file *); 50 static int kfd_mmap(struct file *, struct vm_area_struct *); 51 52 static const char kfd_dev_name[] = "kfd"; 53 54 static const struct file_operations kfd_fops = { 55 .owner = THIS_MODULE, 56 .unlocked_ioctl = kfd_ioctl, 57 .compat_ioctl = compat_ptr_ioctl, 58 .open = kfd_open, 59 .release = kfd_release, 60 .mmap = kfd_mmap, 61 }; 62 63 static int kfd_char_dev_major = -1; 64 struct device *kfd_device; 65 static const struct class kfd_class = { 66 .name = kfd_dev_name, 67 }; 68 69 static inline struct kfd_process_device *kfd_lock_pdd_by_id(struct kfd_process *p, __u32 gpu_id) 70 { 71 struct kfd_process_device *pdd; 72 73 mutex_lock(&p->mutex); 74 pdd = kfd_process_device_data_by_id(p, gpu_id); 75 76 if (pdd) 77 return pdd; 78 79 mutex_unlock(&p->mutex); 80 return NULL; 81 } 82 83 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd) 84 { 85 mutex_unlock(&pdd->process->mutex); 86 } 87 88 int kfd_chardev_init(void) 89 { 90 int err = 0; 91 92 kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops); 93 err = kfd_char_dev_major; 94 if (err < 0) 95 goto err_register_chrdev; 96 97 err = class_register(&kfd_class); 98 if (err) 99 goto err_class_create; 100 101 kfd_device = device_create(&kfd_class, NULL, 102 MKDEV(kfd_char_dev_major, 0), 103 NULL, kfd_dev_name); 104 err = PTR_ERR(kfd_device); 105 if (IS_ERR(kfd_device)) 106 goto err_device_create; 107 108 return 0; 109 110 err_device_create: 111 class_unregister(&kfd_class); 112 err_class_create: 113 unregister_chrdev(kfd_char_dev_major, kfd_dev_name); 114 err_register_chrdev: 115 return err; 116 } 117 118 void kfd_chardev_exit(void) 119 { 120 device_destroy(&kfd_class, MKDEV(kfd_char_dev_major, 0)); 121 class_unregister(&kfd_class); 122 unregister_chrdev(kfd_char_dev_major, kfd_dev_name); 123 kfd_device = NULL; 124 } 125 126 127 static int kfd_open(struct inode *inode, struct file *filep) 128 { 129 struct kfd_process *process; 130 bool is_32bit_user_mode; 131 132 if (iminor(inode) != 0) 133 return -ENODEV; 134 135 is_32bit_user_mode = in_compat_syscall(); 136 137 if (is_32bit_user_mode) { 138 dev_warn(kfd_device, 139 "Process %d (32-bit) failed to open /dev/kfd\n" 140 "32-bit processes are not supported by amdkfd\n", 141 current->pid); 142 return -EPERM; 143 } 144 145 process = kfd_create_process(current); 146 if (IS_ERR(process)) 147 return PTR_ERR(process); 148 149 if (kfd_process_init_cwsr_apu(process, filep)) { 150 kfd_unref_process(process); 151 return -EFAULT; 152 } 153 154 /* filep now owns the reference returned by kfd_create_process */ 155 filep->private_data = process; 156 157 dev_dbg(kfd_device, "process pid %d opened kfd node, compat mode (32 bit) - %d\n", 158 process->lead_thread->pid, process->is_32bit_user_mode); 159 160 return 0; 161 } 162 163 static int kfd_release(struct inode *inode, struct file *filep) 164 { 165 struct kfd_process *process = filep->private_data; 166 167 if (!process) 168 return 0; 169 170 if (process->context_id != KFD_CONTEXT_ID_PRIMARY) 171 kfd_process_notifier_release_internal(process); 172 173 kfd_unref_process(process); 174 175 return 0; 176 } 177 178 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, 179 void *data) 180 { 181 struct kfd_ioctl_get_version_args *args = data; 182 183 args->major_version = KFD_IOCTL_MAJOR_VERSION; 184 args->minor_version = KFD_IOCTL_MINOR_VERSION; 185 186 return 0; 187 } 188 189 static int set_queue_properties_from_user(struct queue_properties *q_properties, 190 struct kfd_ioctl_create_queue_args *args) 191 { 192 /* 193 * Repurpose queue percentage to accommodate new features: 194 * bit 0-7: queue percentage 195 * bit 8-15: pm4_target_xcc 196 */ 197 if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) { 198 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); 199 return -EINVAL; 200 } 201 202 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { 203 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); 204 return -EINVAL; 205 } 206 207 if ((args->ring_base_address) && 208 (!access_ok((const void __user *) args->ring_base_address, 209 sizeof(uint64_t)))) { 210 pr_err("Can't access ring base address\n"); 211 return -EFAULT; 212 } 213 214 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { 215 pr_err("Ring size must be a power of 2 or 0\n"); 216 return -EINVAL; 217 } 218 219 if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) { 220 args->ring_size = KFD_MIN_QUEUE_RING_SIZE; 221 pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE"); 222 } 223 224 if (!access_ok((const void __user *) args->read_pointer_address, 225 sizeof(uint32_t))) { 226 pr_err("Can't access read pointer\n"); 227 return -EFAULT; 228 } 229 230 if (!access_ok((const void __user *) args->write_pointer_address, 231 sizeof(uint32_t))) { 232 pr_err("Can't access write pointer\n"); 233 return -EFAULT; 234 } 235 236 if (args->eop_buffer_address && 237 !access_ok((const void __user *) args->eop_buffer_address, 238 sizeof(uint32_t))) { 239 pr_debug("Can't access eop buffer"); 240 return -EFAULT; 241 } 242 243 if (args->ctx_save_restore_address && 244 !access_ok((const void __user *) args->ctx_save_restore_address, 245 sizeof(uint32_t))) { 246 pr_debug("Can't access ctx save restore buffer"); 247 return -EFAULT; 248 } 249 250 q_properties->is_interop = false; 251 q_properties->is_gws = false; 252 q_properties->queue_percent = args->queue_percentage & 0xFF; 253 /* bit 8-15 are repurposed to be PM4 target XCC */ 254 q_properties->pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF; 255 q_properties->priority = args->queue_priority; 256 q_properties->queue_address = args->ring_base_address; 257 q_properties->queue_size = args->ring_size; 258 q_properties->read_ptr = (void __user *)args->read_pointer_address; 259 q_properties->write_ptr = (void __user *)args->write_pointer_address; 260 q_properties->eop_ring_buffer_address = args->eop_buffer_address; 261 q_properties->eop_ring_buffer_size = args->eop_buffer_size; 262 q_properties->ctx_save_restore_area_address = 263 args->ctx_save_restore_address; 264 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size; 265 q_properties->ctl_stack_size = args->ctl_stack_size; 266 q_properties->sdma_engine_id = args->sdma_engine_id; 267 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE || 268 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) 269 q_properties->type = KFD_QUEUE_TYPE_COMPUTE; 270 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA) 271 q_properties->type = KFD_QUEUE_TYPE_SDMA; 272 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_XGMI) 273 q_properties->type = KFD_QUEUE_TYPE_SDMA_XGMI; 274 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID) 275 q_properties->type = KFD_QUEUE_TYPE_SDMA_BY_ENG_ID; 276 else 277 return -ENOTSUPP; 278 279 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL) 280 q_properties->format = KFD_QUEUE_FORMAT_AQL; 281 else 282 q_properties->format = KFD_QUEUE_FORMAT_PM4; 283 284 pr_debug("Queue Percentage: %d, %d\n", 285 q_properties->queue_percent, args->queue_percentage); 286 287 pr_debug("Queue Priority: %d, %d\n", 288 q_properties->priority, args->queue_priority); 289 290 pr_debug("Queue Address: 0x%llX, 0x%llX\n", 291 q_properties->queue_address, args->ring_base_address); 292 293 pr_debug("Queue Size: 0x%llX, %u\n", 294 q_properties->queue_size, args->ring_size); 295 296 pr_debug("Queue r/w Pointers: %px, %px\n", 297 q_properties->read_ptr, 298 q_properties->write_ptr); 299 300 pr_debug("Queue Format: %d\n", q_properties->format); 301 302 pr_debug("Queue EOP: 0x%llX\n", q_properties->eop_ring_buffer_address); 303 304 pr_debug("Queue CTX save area: 0x%llX\n", 305 q_properties->ctx_save_restore_area_address); 306 307 return 0; 308 } 309 310 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, 311 void *data) 312 { 313 struct kfd_ioctl_create_queue_args *args = data; 314 struct kfd_node *dev; 315 int err = 0; 316 unsigned int queue_id; 317 struct kfd_process_device *pdd; 318 struct queue_properties q_properties; 319 uint32_t doorbell_offset_in_process = 0; 320 321 memset(&q_properties, 0, sizeof(struct queue_properties)); 322 323 pr_debug("Creating queue ioctl\n"); 324 325 err = set_queue_properties_from_user(&q_properties, args); 326 if (err) 327 return err; 328 329 pr_debug("Looking for gpu id 0x%x\n", args->gpu_id); 330 331 mutex_lock(&p->mutex); 332 333 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 334 if (!pdd) { 335 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); 336 err = -EINVAL; 337 goto err_pdd; 338 } 339 dev = pdd->dev; 340 341 pdd = kfd_bind_process_to_device(dev, p); 342 if (IS_ERR(pdd)) { 343 err = -ESRCH; 344 goto err_bind_process; 345 } 346 347 if (q_properties.type == KFD_QUEUE_TYPE_SDMA_BY_ENG_ID) { 348 int max_sdma_eng_id = kfd_get_num_sdma_engines(dev) + 349 kfd_get_num_xgmi_sdma_engines(dev) - 1; 350 351 if (q_properties.sdma_engine_id > max_sdma_eng_id) { 352 err = -EINVAL; 353 pr_err("sdma_engine_id %i exceeds maximum id of %i\n", 354 q_properties.sdma_engine_id, max_sdma_eng_id); 355 goto err_sdma_engine_id; 356 } 357 } 358 359 if (!pdd->qpd.proc_doorbells) { 360 err = kfd_alloc_process_doorbells(dev->kfd, pdd); 361 if (err) { 362 pr_debug("failed to allocate process doorbells\n"); 363 goto err_bind_process; 364 } 365 } 366 367 err = kfd_queue_acquire_buffers(pdd, &q_properties); 368 if (err) { 369 pr_debug("failed to acquire user queue buffers\n"); 370 goto err_acquire_queue_buf; 371 } 372 373 pr_debug("Creating queue for process pid %d on gpu 0x%x\n", 374 p->lead_thread->pid, 375 dev->id); 376 377 err = pqm_create_queue(&p->pqm, dev, &q_properties, &queue_id, 378 NULL, NULL, NULL, &doorbell_offset_in_process); 379 if (err != 0) 380 goto err_create_queue; 381 382 args->queue_id = queue_id; 383 384 385 /* Return gpu_id as doorbell offset for mmap usage */ 386 args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; 387 args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); 388 if (KFD_IS_SOC15(dev)) 389 /* On SOC15 ASICs, include the doorbell offset within the 390 * process doorbell frame, which is 2 pages. 391 */ 392 args->doorbell_offset |= doorbell_offset_in_process; 393 394 mutex_unlock(&p->mutex); 395 396 pr_debug("Queue id %d was created successfully\n", args->queue_id); 397 398 pr_debug("Ring buffer address == 0x%016llX\n", 399 args->ring_base_address); 400 401 pr_debug("Read ptr address == 0x%016llX\n", 402 args->read_pointer_address); 403 404 pr_debug("Write ptr address == 0x%016llX\n", 405 args->write_pointer_address); 406 407 kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0); 408 return 0; 409 410 err_create_queue: 411 kfd_queue_unref_bo_vas(pdd, &q_properties); 412 kfd_queue_release_buffers(pdd, &q_properties); 413 err_acquire_queue_buf: 414 err_sdma_engine_id: 415 err_bind_process: 416 err_pdd: 417 mutex_unlock(&p->mutex); 418 return err; 419 } 420 421 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, 422 void *data) 423 { 424 int retval; 425 struct kfd_ioctl_destroy_queue_args *args = data; 426 427 pr_debug("Destroying queue id %d for process pid %d\n", 428 args->queue_id, 429 p->lead_thread->pid); 430 431 mutex_lock(&p->mutex); 432 433 retval = pqm_destroy_queue(&p->pqm, args->queue_id); 434 435 mutex_unlock(&p->mutex); 436 return retval; 437 } 438 439 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, 440 void *data) 441 { 442 int retval; 443 struct kfd_ioctl_update_queue_args *args = data; 444 struct queue_properties properties; 445 446 /* 447 * Repurpose queue percentage to accommodate new features: 448 * bit 0-7: queue percentage 449 * bit 8-15: pm4_target_xcc 450 */ 451 if ((args->queue_percentage & 0xFF) > KFD_MAX_QUEUE_PERCENTAGE) { 452 pr_err("Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); 453 return -EINVAL; 454 } 455 456 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { 457 pr_err("Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); 458 return -EINVAL; 459 } 460 461 if ((args->ring_base_address) && 462 (!access_ok((const void __user *) args->ring_base_address, 463 sizeof(uint64_t)))) { 464 pr_err("Can't access ring base address\n"); 465 return -EFAULT; 466 } 467 468 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { 469 pr_err("Ring size must be a power of 2 or 0\n"); 470 return -EINVAL; 471 } 472 473 if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) { 474 args->ring_size = KFD_MIN_QUEUE_RING_SIZE; 475 pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE"); 476 } 477 478 properties.queue_address = args->ring_base_address; 479 properties.queue_size = args->ring_size; 480 properties.queue_percent = args->queue_percentage & 0xFF; 481 /* bit 8-15 are repurposed to be PM4 target XCC */ 482 properties.pm4_target_xcc = (args->queue_percentage >> 8) & 0xFF; 483 properties.priority = args->queue_priority; 484 485 pr_debug("Updating queue id %d for process pid %d\n", 486 args->queue_id, p->lead_thread->pid); 487 488 mutex_lock(&p->mutex); 489 490 retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties); 491 492 mutex_unlock(&p->mutex); 493 494 return retval; 495 } 496 497 static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, 498 void *data) 499 { 500 int retval; 501 const int max_num_cus = 1024; 502 struct kfd_ioctl_set_cu_mask_args *args = data; 503 struct mqd_update_info minfo = {0}; 504 uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr; 505 size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32); 506 507 if ((args->num_cu_mask % 32) != 0) { 508 pr_debug("num_cu_mask 0x%x must be a multiple of 32", 509 args->num_cu_mask); 510 return -EINVAL; 511 } 512 513 minfo.cu_mask.count = args->num_cu_mask; 514 if (minfo.cu_mask.count == 0) { 515 pr_debug("CU mask cannot be 0"); 516 return -EINVAL; 517 } 518 519 /* To prevent an unreasonably large CU mask size, set an arbitrary 520 * limit of max_num_cus bits. We can then just drop any CU mask bits 521 * past max_num_cus bits and just use the first max_num_cus bits. 522 */ 523 if (minfo.cu_mask.count > max_num_cus) { 524 pr_debug("CU mask cannot be greater than 1024 bits"); 525 minfo.cu_mask.count = max_num_cus; 526 cu_mask_size = sizeof(uint32_t) * (max_num_cus/32); 527 } 528 529 minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size); 530 if (IS_ERR(minfo.cu_mask.ptr)) { 531 pr_debug("Could not copy CU mask from userspace"); 532 return PTR_ERR(minfo.cu_mask.ptr); 533 } 534 535 mutex_lock(&p->mutex); 536 537 retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo); 538 539 mutex_unlock(&p->mutex); 540 541 kfree(minfo.cu_mask.ptr); 542 return retval; 543 } 544 545 static int kfd_ioctl_get_queue_wave_state(struct file *filep, 546 struct kfd_process *p, void *data) 547 { 548 struct kfd_ioctl_get_queue_wave_state_args *args = data; 549 int r; 550 551 mutex_lock(&p->mutex); 552 553 r = pqm_get_wave_state(&p->pqm, args->queue_id, 554 (void __user *)args->ctl_stack_address, 555 &args->ctl_stack_used_size, 556 &args->save_area_used_size); 557 558 mutex_unlock(&p->mutex); 559 560 return r; 561 } 562 563 static int kfd_ioctl_set_memory_policy(struct file *filep, 564 struct kfd_process *p, void *data) 565 { 566 struct kfd_ioctl_set_memory_policy_args *args = data; 567 int err = 0; 568 struct kfd_process_device *pdd; 569 enum cache_policy default_policy, alternate_policy; 570 571 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT 572 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 573 return -EINVAL; 574 } 575 576 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT 577 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { 578 return -EINVAL; 579 } 580 581 mutex_lock(&p->mutex); 582 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 583 if (!pdd) { 584 pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); 585 err = -EINVAL; 586 goto err_pdd; 587 } 588 589 pdd = kfd_bind_process_to_device(pdd->dev, p); 590 if (IS_ERR(pdd)) { 591 err = -ESRCH; 592 goto out; 593 } 594 595 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) 596 ? cache_policy_coherent : cache_policy_noncoherent; 597 598 alternate_policy = 599 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) 600 ? cache_policy_coherent : cache_policy_noncoherent; 601 602 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm, 603 &pdd->qpd, 604 default_policy, 605 alternate_policy, 606 (void __user *)args->alternate_aperture_base, 607 args->alternate_aperture_size, 608 args->misc_process_flag)) 609 err = -EINVAL; 610 611 out: 612 err_pdd: 613 mutex_unlock(&p->mutex); 614 615 return err; 616 } 617 618 static int kfd_ioctl_set_trap_handler(struct file *filep, 619 struct kfd_process *p, void *data) 620 { 621 struct kfd_ioctl_set_trap_handler_args *args = data; 622 int err = 0; 623 struct kfd_process_device *pdd; 624 625 mutex_lock(&p->mutex); 626 627 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 628 if (!pdd) { 629 err = -EINVAL; 630 goto err_pdd; 631 } 632 633 pdd = kfd_bind_process_to_device(pdd->dev, p); 634 if (IS_ERR(pdd)) { 635 err = -ESRCH; 636 goto out; 637 } 638 639 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr); 640 641 out: 642 err_pdd: 643 mutex_unlock(&p->mutex); 644 645 return err; 646 } 647 648 static int kfd_ioctl_dbg_register(struct file *filep, 649 struct kfd_process *p, void *data) 650 { 651 return -EPERM; 652 } 653 654 static int kfd_ioctl_dbg_unregister(struct file *filep, 655 struct kfd_process *p, void *data) 656 { 657 return -EPERM; 658 } 659 660 static int kfd_ioctl_dbg_address_watch(struct file *filep, 661 struct kfd_process *p, void *data) 662 { 663 return -EPERM; 664 } 665 666 /* Parse and generate fixed size data structure for wave control */ 667 static int kfd_ioctl_dbg_wave_control(struct file *filep, 668 struct kfd_process *p, void *data) 669 { 670 return -EPERM; 671 } 672 673 static int kfd_ioctl_get_clock_counters(struct file *filep, 674 struct kfd_process *p, void *data) 675 { 676 struct kfd_ioctl_get_clock_counters_args *args = data; 677 struct kfd_process_device *pdd; 678 679 mutex_lock(&p->mutex); 680 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 681 mutex_unlock(&p->mutex); 682 if (pdd) 683 /* Reading GPU clock counter from KGD */ 684 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev); 685 else 686 /* Node without GPU resource */ 687 args->gpu_clock_counter = 0; 688 689 /* No access to rdtsc. Using raw monotonic time */ 690 args->cpu_clock_counter = ktime_get_raw_ns(); 691 args->system_clock_counter = ktime_get_boottime_ns(); 692 693 /* Since the counter is in nano-seconds we use 1GHz frequency */ 694 args->system_clock_freq = 1000000000; 695 696 return 0; 697 } 698 699 700 static int kfd_ioctl_get_process_apertures(struct file *filp, 701 struct kfd_process *p, void *data) 702 { 703 struct kfd_ioctl_get_process_apertures_args *args = data; 704 struct kfd_process_device_apertures *pAperture; 705 int i; 706 707 dev_dbg(kfd_device, "get apertures for process pid %d", p->lead_thread->pid); 708 709 args->num_of_nodes = 0; 710 711 mutex_lock(&p->mutex); 712 /* Run over all pdd of the process */ 713 for (i = 0; i < p->n_pdds; i++) { 714 struct kfd_process_device *pdd = p->pdds[i]; 715 716 pAperture = 717 &args->process_apertures[args->num_of_nodes]; 718 pAperture->gpu_id = pdd->dev->id; 719 pAperture->lds_base = pdd->lds_base; 720 pAperture->lds_limit = pdd->lds_limit; 721 pAperture->gpuvm_base = pdd->gpuvm_base; 722 pAperture->gpuvm_limit = pdd->gpuvm_limit; 723 pAperture->scratch_base = pdd->scratch_base; 724 pAperture->scratch_limit = pdd->scratch_limit; 725 726 dev_dbg(kfd_device, 727 "node id %u\n", args->num_of_nodes); 728 dev_dbg(kfd_device, 729 "gpu id %u\n", pdd->dev->id); 730 dev_dbg(kfd_device, 731 "lds_base %llX\n", pdd->lds_base); 732 dev_dbg(kfd_device, 733 "lds_limit %llX\n", pdd->lds_limit); 734 dev_dbg(kfd_device, 735 "gpuvm_base %llX\n", pdd->gpuvm_base); 736 dev_dbg(kfd_device, 737 "gpuvm_limit %llX\n", pdd->gpuvm_limit); 738 dev_dbg(kfd_device, 739 "scratch_base %llX\n", pdd->scratch_base); 740 dev_dbg(kfd_device, 741 "scratch_limit %llX\n", pdd->scratch_limit); 742 743 if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS) 744 break; 745 } 746 mutex_unlock(&p->mutex); 747 748 return 0; 749 } 750 751 static int kfd_ioctl_get_process_apertures_new(struct file *filp, 752 struct kfd_process *p, void *data) 753 { 754 struct kfd_ioctl_get_process_apertures_new_args *args = data; 755 struct kfd_process_device_apertures *pa; 756 int ret; 757 int i; 758 759 dev_dbg(kfd_device, "get apertures for process pid %d", 760 p->lead_thread->pid); 761 762 if (args->num_of_nodes == 0) { 763 /* Return number of nodes, so that user space can alloacate 764 * sufficient memory 765 */ 766 mutex_lock(&p->mutex); 767 args->num_of_nodes = p->n_pdds; 768 goto out_unlock; 769 } 770 771 /* Fill in process-aperture information for all available 772 * nodes, but not more than args->num_of_nodes as that is 773 * the amount of memory allocated by user 774 */ 775 pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures), 776 GFP_KERNEL); 777 if (!pa) 778 return -ENOMEM; 779 780 mutex_lock(&p->mutex); 781 782 if (!p->n_pdds) { 783 args->num_of_nodes = 0; 784 kfree(pa); 785 goto out_unlock; 786 } 787 788 /* Run over all pdd of the process */ 789 for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) { 790 struct kfd_process_device *pdd = p->pdds[i]; 791 792 pa[i].gpu_id = pdd->dev->id; 793 pa[i].lds_base = pdd->lds_base; 794 pa[i].lds_limit = pdd->lds_limit; 795 pa[i].gpuvm_base = pdd->gpuvm_base; 796 pa[i].gpuvm_limit = pdd->gpuvm_limit; 797 pa[i].scratch_base = pdd->scratch_base; 798 pa[i].scratch_limit = pdd->scratch_limit; 799 800 dev_dbg(kfd_device, 801 "gpu id %u\n", pdd->dev->id); 802 dev_dbg(kfd_device, 803 "lds_base %llX\n", pdd->lds_base); 804 dev_dbg(kfd_device, 805 "lds_limit %llX\n", pdd->lds_limit); 806 dev_dbg(kfd_device, 807 "gpuvm_base %llX\n", pdd->gpuvm_base); 808 dev_dbg(kfd_device, 809 "gpuvm_limit %llX\n", pdd->gpuvm_limit); 810 dev_dbg(kfd_device, 811 "scratch_base %llX\n", pdd->scratch_base); 812 dev_dbg(kfd_device, 813 "scratch_limit %llX\n", pdd->scratch_limit); 814 } 815 mutex_unlock(&p->mutex); 816 817 args->num_of_nodes = i; 818 ret = copy_to_user( 819 (void __user *)args->kfd_process_device_apertures_ptr, 820 pa, 821 (i * sizeof(struct kfd_process_device_apertures))); 822 kfree(pa); 823 return ret ? -EFAULT : 0; 824 825 out_unlock: 826 mutex_unlock(&p->mutex); 827 return 0; 828 } 829 830 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, 831 void *data) 832 { 833 struct kfd_ioctl_create_event_args *args = data; 834 int err; 835 836 /* For dGPUs the event page is allocated in user mode. The 837 * handle is passed to KFD with the first call to this IOCTL 838 * through the event_page_offset field. 839 */ 840 if (args->event_page_offset) { 841 mutex_lock(&p->mutex); 842 err = kfd_kmap_event_page(p, args->event_page_offset); 843 mutex_unlock(&p->mutex); 844 if (err) 845 return err; 846 } 847 848 err = kfd_event_create(filp, p, args->event_type, 849 args->auto_reset != 0, args->node_id, 850 &args->event_id, &args->event_trigger_data, 851 &args->event_page_offset, 852 &args->event_slot_index); 853 854 pr_debug("Created event (id:0x%08x) (%s)\n", args->event_id, __func__); 855 return err; 856 } 857 858 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p, 859 void *data) 860 { 861 struct kfd_ioctl_destroy_event_args *args = data; 862 863 return kfd_event_destroy(p, args->event_id); 864 } 865 866 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p, 867 void *data) 868 { 869 struct kfd_ioctl_set_event_args *args = data; 870 871 return kfd_set_event(p, args->event_id); 872 } 873 874 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p, 875 void *data) 876 { 877 struct kfd_ioctl_reset_event_args *args = data; 878 879 return kfd_reset_event(p, args->event_id); 880 } 881 882 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p, 883 void *data) 884 { 885 struct kfd_ioctl_wait_events_args *args = data; 886 887 return kfd_wait_on_events(p, args->num_events, 888 (void __user *)args->events_ptr, 889 (args->wait_for_all != 0), 890 &args->timeout, &args->wait_result); 891 } 892 static int kfd_ioctl_set_scratch_backing_va(struct file *filep, 893 struct kfd_process *p, void *data) 894 { 895 struct kfd_ioctl_set_scratch_backing_va_args *args = data; 896 struct kfd_process_device *pdd; 897 struct kfd_node *dev; 898 long err; 899 900 mutex_lock(&p->mutex); 901 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 902 if (!pdd) { 903 err = -EINVAL; 904 goto err_pdd; 905 } 906 dev = pdd->dev; 907 908 pdd = kfd_bind_process_to_device(dev, p); 909 if (IS_ERR(pdd)) { 910 err = PTR_ERR(pdd); 911 goto bind_process_to_device_fail; 912 } 913 914 pdd->qpd.sh_hidden_private_base = args->va_addr; 915 916 mutex_unlock(&p->mutex); 917 918 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && 919 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) 920 dev->kfd2kgd->set_scratch_backing_va( 921 dev->adev, args->va_addr, pdd->qpd.vmid); 922 923 return 0; 924 925 bind_process_to_device_fail: 926 err_pdd: 927 mutex_unlock(&p->mutex); 928 return err; 929 } 930 931 static int kfd_ioctl_get_tile_config(struct file *filep, 932 struct kfd_process *p, void *data) 933 { 934 struct kfd_ioctl_get_tile_config_args *args = data; 935 struct kfd_process_device *pdd; 936 struct tile_config config; 937 int err = 0; 938 939 mutex_lock(&p->mutex); 940 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 941 mutex_unlock(&p->mutex); 942 if (!pdd) 943 return -EINVAL; 944 945 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config); 946 947 args->gb_addr_config = config.gb_addr_config; 948 args->num_banks = config.num_banks; 949 args->num_ranks = config.num_ranks; 950 951 if (args->num_tile_configs > config.num_tile_configs) 952 args->num_tile_configs = config.num_tile_configs; 953 err = copy_to_user((void __user *)args->tile_config_ptr, 954 config.tile_config_ptr, 955 args->num_tile_configs * sizeof(uint32_t)); 956 if (err) { 957 args->num_tile_configs = 0; 958 return -EFAULT; 959 } 960 961 if (args->num_macro_tile_configs > config.num_macro_tile_configs) 962 args->num_macro_tile_configs = 963 config.num_macro_tile_configs; 964 err = copy_to_user((void __user *)args->macro_tile_config_ptr, 965 config.macro_tile_config_ptr, 966 args->num_macro_tile_configs * sizeof(uint32_t)); 967 if (err) { 968 args->num_macro_tile_configs = 0; 969 return -EFAULT; 970 } 971 972 return 0; 973 } 974 975 static int kfd_ioctl_acquire_vm(struct file *filep, struct kfd_process *p, 976 void *data) 977 { 978 struct kfd_ioctl_acquire_vm_args *args = data; 979 struct kfd_process_device *pdd; 980 struct file *drm_file; 981 int ret; 982 983 drm_file = fget(args->drm_fd); 984 if (!drm_file) 985 return -EINVAL; 986 987 mutex_lock(&p->mutex); 988 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 989 if (!pdd) { 990 ret = -EINVAL; 991 goto err_pdd; 992 } 993 994 if (pdd->drm_file) { 995 ret = pdd->drm_file == drm_file ? 0 : -EBUSY; 996 goto err_drm_file; 997 } 998 999 ret = kfd_process_device_init_vm(pdd, drm_file); 1000 if (ret) 1001 goto err_unlock; 1002 1003 /* On success, the PDD keeps the drm_file reference */ 1004 mutex_unlock(&p->mutex); 1005 1006 return 0; 1007 1008 err_unlock: 1009 err_pdd: 1010 err_drm_file: 1011 mutex_unlock(&p->mutex); 1012 fput(drm_file); 1013 return ret; 1014 } 1015 1016 bool kfd_dev_is_large_bar(struct kfd_node *dev) 1017 { 1018 if (dev->kfd->adev->debug_largebar) { 1019 pr_debug("Simulate large-bar allocation on non large-bar machine\n"); 1020 return true; 1021 } 1022 1023 if (dev->local_mem_info.local_mem_size_private == 0 && 1024 dev->local_mem_info.local_mem_size_public > 0) 1025 return true; 1026 1027 if (dev->local_mem_info.local_mem_size_public == 0 && 1028 dev->kfd->adev->gmc.is_app_apu) { 1029 pr_debug("APP APU, Consider like a large bar system\n"); 1030 return true; 1031 } 1032 1033 return false; 1034 } 1035 1036 static int kfd_ioctl_get_available_memory(struct file *filep, 1037 struct kfd_process *p, void *data) 1038 { 1039 struct kfd_ioctl_get_available_memory_args *args = data; 1040 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id); 1041 1042 if (!pdd) 1043 return -EINVAL; 1044 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev, 1045 pdd->dev->node_id); 1046 kfd_unlock_pdd(pdd); 1047 return 0; 1048 } 1049 1050 static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, 1051 struct kfd_process *p, void *data) 1052 { 1053 struct kfd_ioctl_alloc_memory_of_gpu_args *args = data; 1054 struct kfd_process_device *pdd; 1055 void *mem; 1056 struct kfd_node *dev; 1057 int idr_handle; 1058 long err; 1059 uint64_t offset = args->mmap_offset; 1060 uint32_t flags = args->flags; 1061 1062 if (args->size == 0) 1063 return -EINVAL; 1064 1065 if (p->context_id != KFD_CONTEXT_ID_PRIMARY && (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) { 1066 pr_debug("USERPTR is not supported on non-primary kfd_process\n"); 1067 1068 return -EOPNOTSUPP; 1069 } 1070 1071 #if IS_ENABLED(CONFIG_HSA_AMD_SVM) 1072 /* Flush pending deferred work to avoid racing with deferred actions 1073 * from previous memory map changes (e.g. munmap). 1074 */ 1075 svm_range_list_lock_and_flush_work(&p->svms, current->mm); 1076 mutex_lock(&p->svms.lock); 1077 mmap_write_unlock(current->mm); 1078 1079 /* Skip a special case that allocates VRAM without VA, 1080 * VA will be invalid of 0. 1081 */ 1082 if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) && 1083 interval_tree_iter_first(&p->svms.objects, 1084 args->va_addr >> PAGE_SHIFT, 1085 (args->va_addr + args->size - 1) >> PAGE_SHIFT)) { 1086 pr_err("Address: 0x%llx already allocated by SVM\n", 1087 args->va_addr); 1088 mutex_unlock(&p->svms.lock); 1089 return -EADDRINUSE; 1090 } 1091 1092 /* When register user buffer check if it has been registered by svm by 1093 * buffer cpu virtual address. 1094 */ 1095 if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) && 1096 interval_tree_iter_first(&p->svms.objects, 1097 args->mmap_offset >> PAGE_SHIFT, 1098 (args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) { 1099 pr_err("User Buffer Address: 0x%llx already allocated by SVM\n", 1100 args->mmap_offset); 1101 mutex_unlock(&p->svms.lock); 1102 return -EADDRINUSE; 1103 } 1104 1105 mutex_unlock(&p->svms.lock); 1106 #endif 1107 mutex_lock(&p->mutex); 1108 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 1109 if (!pdd) { 1110 err = -EINVAL; 1111 goto err_pdd; 1112 } 1113 1114 dev = pdd->dev; 1115 1116 if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) && 1117 (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) && 1118 !kfd_dev_is_large_bar(dev)) { 1119 pr_err("Alloc host visible vram on small bar is not allowed\n"); 1120 err = -EINVAL; 1121 goto err_large_bar; 1122 } 1123 1124 pdd = kfd_bind_process_to_device(dev, p); 1125 if (IS_ERR(pdd)) { 1126 err = PTR_ERR(pdd); 1127 goto err_unlock; 1128 } 1129 1130 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { 1131 if (args->size != kfd_doorbell_process_slice(dev->kfd)) { 1132 err = -EINVAL; 1133 goto err_unlock; 1134 } 1135 offset = kfd_get_process_doorbells(pdd); 1136 if (!offset) { 1137 err = -ENOMEM; 1138 goto err_unlock; 1139 } 1140 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) { 1141 if (args->size != PAGE_SIZE) { 1142 err = -EINVAL; 1143 goto err_unlock; 1144 } 1145 offset = dev->adev->rmmio_remap.bus_addr; 1146 if (!offset || (PAGE_SIZE > 4096)) { 1147 err = -ENOMEM; 1148 goto err_unlock; 1149 } 1150 } 1151 1152 err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 1153 dev->adev, args->va_addr, args->size, 1154 pdd->drm_priv, (struct kgd_mem **) &mem, &offset, 1155 flags, false); 1156 1157 if (err) 1158 goto err_unlock; 1159 1160 idr_handle = kfd_process_device_create_obj_handle(pdd, mem); 1161 if (idr_handle < 0) { 1162 err = -EFAULT; 1163 goto err_free; 1164 } 1165 1166 /* Update the VRAM usage count */ 1167 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 1168 uint64_t size = args->size; 1169 1170 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM) 1171 size >>= 1; 1172 atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage); 1173 } 1174 1175 mutex_unlock(&p->mutex); 1176 1177 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); 1178 args->mmap_offset = offset; 1179 1180 /* MMIO is mapped through kfd device 1181 * Generate a kfd mmap offset 1182 */ 1183 if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) 1184 args->mmap_offset = KFD_MMAP_TYPE_MMIO 1185 | KFD_MMAP_GPU_ID(args->gpu_id); 1186 1187 return 0; 1188 1189 err_free: 1190 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, 1191 pdd->drm_priv, NULL); 1192 err_unlock: 1193 err_pdd: 1194 err_large_bar: 1195 mutex_unlock(&p->mutex); 1196 return err; 1197 } 1198 1199 static int kfd_ioctl_free_memory_of_gpu(struct file *filep, 1200 struct kfd_process *p, void *data) 1201 { 1202 struct kfd_ioctl_free_memory_of_gpu_args *args = data; 1203 struct kfd_process_device *pdd; 1204 void *mem; 1205 int ret; 1206 uint64_t size = 0; 1207 1208 mutex_lock(&p->mutex); 1209 /* 1210 * Safeguard to prevent user space from freeing signal BO. 1211 * It will be freed at process termination. 1212 */ 1213 if (p->signal_handle && (p->signal_handle == args->handle)) { 1214 pr_err("Free signal BO is not allowed\n"); 1215 ret = -EPERM; 1216 goto err_unlock; 1217 } 1218 1219 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); 1220 if (!pdd) { 1221 pr_err("Process device data doesn't exist\n"); 1222 ret = -EINVAL; 1223 goto err_pdd; 1224 } 1225 1226 mem = kfd_process_device_translate_handle( 1227 pdd, GET_IDR_HANDLE(args->handle)); 1228 if (!mem) { 1229 ret = -EINVAL; 1230 goto err_unlock; 1231 } 1232 1233 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, 1234 (struct kgd_mem *)mem, pdd->drm_priv, &size); 1235 1236 /* If freeing the buffer failed, leave the handle in place for 1237 * clean-up during process tear-down. 1238 */ 1239 if (!ret) 1240 kfd_process_device_remove_obj_handle( 1241 pdd, GET_IDR_HANDLE(args->handle)); 1242 1243 atomic64_sub(size, &pdd->vram_usage); 1244 1245 err_unlock: 1246 err_pdd: 1247 mutex_unlock(&p->mutex); 1248 return ret; 1249 } 1250 1251 static int kfd_ioctl_map_memory_to_gpu(struct file *filep, 1252 struct kfd_process *p, void *data) 1253 { 1254 struct kfd_ioctl_map_memory_to_gpu_args *args = data; 1255 struct kfd_process_device *pdd, *peer_pdd; 1256 void *mem; 1257 struct kfd_node *dev; 1258 long err = 0; 1259 int i; 1260 uint32_t *devices_arr = NULL; 1261 1262 if (!args->n_devices) { 1263 pr_debug("Device IDs array empty\n"); 1264 return -EINVAL; 1265 } 1266 if (args->n_success > args->n_devices) { 1267 pr_debug("n_success exceeds n_devices\n"); 1268 return -EINVAL; 1269 } 1270 1271 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), 1272 GFP_KERNEL); 1273 if (!devices_arr) 1274 return -ENOMEM; 1275 1276 err = copy_from_user(devices_arr, 1277 (void __user *)args->device_ids_array_ptr, 1278 args->n_devices * sizeof(*devices_arr)); 1279 if (err != 0) { 1280 err = -EFAULT; 1281 goto copy_from_user_failed; 1282 } 1283 1284 mutex_lock(&p->mutex); 1285 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); 1286 if (!pdd) { 1287 err = -EINVAL; 1288 goto get_process_device_data_failed; 1289 } 1290 dev = pdd->dev; 1291 1292 pdd = kfd_bind_process_to_device(dev, p); 1293 if (IS_ERR(pdd)) { 1294 err = PTR_ERR(pdd); 1295 goto bind_process_to_device_failed; 1296 } 1297 1298 mem = kfd_process_device_translate_handle(pdd, 1299 GET_IDR_HANDLE(args->handle)); 1300 if (!mem) { 1301 err = -ENOMEM; 1302 goto get_mem_obj_from_handle_failed; 1303 } 1304 1305 for (i = args->n_success; i < args->n_devices; i++) { 1306 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); 1307 if (!peer_pdd) { 1308 pr_debug("Getting device by id failed for 0x%x\n", 1309 devices_arr[i]); 1310 err = -EINVAL; 1311 goto get_mem_obj_from_handle_failed; 1312 } 1313 1314 peer_pdd = kfd_bind_process_to_device(peer_pdd->dev, p); 1315 if (IS_ERR(peer_pdd)) { 1316 err = PTR_ERR(peer_pdd); 1317 goto get_mem_obj_from_handle_failed; 1318 } 1319 1320 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1321 peer_pdd->dev->adev, (struct kgd_mem *)mem, 1322 peer_pdd->drm_priv); 1323 if (err) { 1324 struct pci_dev *pdev = peer_pdd->dev->adev->pdev; 1325 1326 dev_err(dev->adev->dev, 1327 "Failed to map peer:%04x:%02x:%02x.%d mem_domain:%d\n", 1328 pci_domain_nr(pdev->bus), 1329 pdev->bus->number, 1330 PCI_SLOT(pdev->devfn), 1331 PCI_FUNC(pdev->devfn), 1332 ((struct kgd_mem *)mem)->domain); 1333 goto map_memory_to_gpu_failed; 1334 } 1335 args->n_success = i+1; 1336 } 1337 1338 err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); 1339 if (err) { 1340 pr_debug("Sync memory failed, wait interrupted by user signal\n"); 1341 goto sync_memory_failed; 1342 } 1343 1344 mutex_unlock(&p->mutex); 1345 1346 /* Flush TLBs after waiting for the page table updates to complete */ 1347 for (i = 0; i < args->n_devices; i++) { 1348 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); 1349 if (WARN_ON_ONCE(!peer_pdd)) 1350 continue; 1351 kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); 1352 } 1353 kfree(devices_arr); 1354 1355 return err; 1356 1357 get_process_device_data_failed: 1358 bind_process_to_device_failed: 1359 get_mem_obj_from_handle_failed: 1360 map_memory_to_gpu_failed: 1361 sync_memory_failed: 1362 mutex_unlock(&p->mutex); 1363 copy_from_user_failed: 1364 kfree(devices_arr); 1365 1366 return err; 1367 } 1368 1369 static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, 1370 struct kfd_process *p, void *data) 1371 { 1372 struct kfd_ioctl_unmap_memory_from_gpu_args *args = data; 1373 struct kfd_process_device *pdd, *peer_pdd; 1374 void *mem; 1375 long err = 0; 1376 uint32_t *devices_arr = NULL, i; 1377 bool flush_tlb; 1378 1379 if (!args->n_devices) { 1380 pr_debug("Device IDs array empty\n"); 1381 return -EINVAL; 1382 } 1383 if (args->n_success > args->n_devices) { 1384 pr_debug("n_success exceeds n_devices\n"); 1385 return -EINVAL; 1386 } 1387 1388 devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), 1389 GFP_KERNEL); 1390 if (!devices_arr) 1391 return -ENOMEM; 1392 1393 err = copy_from_user(devices_arr, 1394 (void __user *)args->device_ids_array_ptr, 1395 args->n_devices * sizeof(*devices_arr)); 1396 if (err != 0) { 1397 err = -EFAULT; 1398 goto copy_from_user_failed; 1399 } 1400 1401 mutex_lock(&p->mutex); 1402 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle)); 1403 if (!pdd) { 1404 err = -EINVAL; 1405 goto bind_process_to_device_failed; 1406 } 1407 1408 mem = kfd_process_device_translate_handle(pdd, 1409 GET_IDR_HANDLE(args->handle)); 1410 if (!mem) { 1411 err = -ENOMEM; 1412 goto get_mem_obj_from_handle_failed; 1413 } 1414 1415 for (i = args->n_success; i < args->n_devices; i++) { 1416 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); 1417 if (!peer_pdd) { 1418 err = -EINVAL; 1419 goto get_mem_obj_from_handle_failed; 1420 } 1421 err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 1422 peer_pdd->dev->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv); 1423 if (err) { 1424 pr_debug("Failed to unmap from gpu %d/%d\n", i, args->n_devices); 1425 goto unmap_memory_from_gpu_failed; 1426 } 1427 args->n_success = i+1; 1428 } 1429 1430 flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd); 1431 if (flush_tlb) { 1432 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, 1433 (struct kgd_mem *) mem, true); 1434 if (err) { 1435 pr_debug("Sync memory failed, wait interrupted by user signal\n"); 1436 goto sync_memory_failed; 1437 } 1438 } 1439 1440 /* Flush TLBs after waiting for the page table updates to complete */ 1441 for (i = 0; i < args->n_devices; i++) { 1442 peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); 1443 if (WARN_ON_ONCE(!peer_pdd)) 1444 continue; 1445 if (flush_tlb) 1446 kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); 1447 1448 /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */ 1449 err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv); 1450 if (err) 1451 goto sync_memory_failed; 1452 } 1453 1454 mutex_unlock(&p->mutex); 1455 1456 kfree(devices_arr); 1457 1458 return 0; 1459 1460 bind_process_to_device_failed: 1461 get_mem_obj_from_handle_failed: 1462 unmap_memory_from_gpu_failed: 1463 sync_memory_failed: 1464 mutex_unlock(&p->mutex); 1465 copy_from_user_failed: 1466 kfree(devices_arr); 1467 return err; 1468 } 1469 1470 static int kfd_ioctl_alloc_queue_gws(struct file *filep, 1471 struct kfd_process *p, void *data) 1472 { 1473 int retval; 1474 struct kfd_ioctl_alloc_queue_gws_args *args = data; 1475 struct queue *q; 1476 struct kfd_node *dev; 1477 1478 mutex_lock(&p->mutex); 1479 q = pqm_get_user_queue(&p->pqm, args->queue_id); 1480 1481 if (q) { 1482 dev = q->device; 1483 } else { 1484 retval = -EINVAL; 1485 goto out_unlock; 1486 } 1487 1488 if (!dev->gws) { 1489 retval = -ENODEV; 1490 goto out_unlock; 1491 } 1492 1493 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 1494 retval = -ENODEV; 1495 goto out_unlock; 1496 } 1497 1498 if (p->debug_trap_enabled && (!kfd_dbg_has_gws_support(dev) || 1499 kfd_dbg_has_cwsr_workaround(dev))) { 1500 retval = -EBUSY; 1501 goto out_unlock; 1502 } 1503 1504 retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL); 1505 mutex_unlock(&p->mutex); 1506 1507 args->first_gws = 0; 1508 return retval; 1509 1510 out_unlock: 1511 mutex_unlock(&p->mutex); 1512 return retval; 1513 } 1514 1515 static int kfd_ioctl_get_dmabuf_info(struct file *filep, 1516 struct kfd_process *p, void *data) 1517 { 1518 struct kfd_ioctl_get_dmabuf_info_args *args = data; 1519 struct kfd_node *dev = NULL; 1520 struct amdgpu_device *dmabuf_adev; 1521 void *metadata_buffer = NULL; 1522 uint32_t flags; 1523 int8_t xcp_id; 1524 unsigned int i; 1525 int r; 1526 1527 /* Find a KFD GPU device that supports the get_dmabuf_info query */ 1528 for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++) 1529 if (dev && !kfd_devcgroup_check_permission(dev)) 1530 break; 1531 if (!dev) 1532 return -EINVAL; 1533 1534 if (args->metadata_ptr) { 1535 metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL); 1536 if (!metadata_buffer) 1537 return -ENOMEM; 1538 } 1539 1540 /* Get dmabuf info from KGD */ 1541 r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd, 1542 &dmabuf_adev, &args->size, 1543 metadata_buffer, args->metadata_size, 1544 &args->metadata_size, &flags, &xcp_id); 1545 if (r) 1546 goto exit; 1547 1548 if (xcp_id >= 0) 1549 args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id; 1550 else 1551 args->gpu_id = dev->id; 1552 args->flags = flags; 1553 1554 /* Copy metadata buffer to user mode */ 1555 if (metadata_buffer) { 1556 r = copy_to_user((void __user *)args->metadata_ptr, 1557 metadata_buffer, args->metadata_size); 1558 if (r != 0) 1559 r = -EFAULT; 1560 } 1561 1562 exit: 1563 kfree(metadata_buffer); 1564 1565 return r; 1566 } 1567 1568 static int kfd_ioctl_import_dmabuf(struct file *filep, 1569 struct kfd_process *p, void *data) 1570 { 1571 struct kfd_ioctl_import_dmabuf_args *args = data; 1572 struct kfd_process_device *pdd; 1573 int idr_handle; 1574 uint64_t size; 1575 void *mem; 1576 int r; 1577 1578 mutex_lock(&p->mutex); 1579 pdd = kfd_process_device_data_by_id(p, args->gpu_id); 1580 if (!pdd) { 1581 r = -EINVAL; 1582 goto err_unlock; 1583 } 1584 1585 pdd = kfd_bind_process_to_device(pdd->dev, p); 1586 if (IS_ERR(pdd)) { 1587 r = PTR_ERR(pdd); 1588 goto err_unlock; 1589 } 1590 1591 r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd, 1592 args->va_addr, pdd->drm_priv, 1593 (struct kgd_mem **)&mem, &size, 1594 NULL); 1595 if (r) 1596 goto err_unlock; 1597 1598 idr_handle = kfd_process_device_create_obj_handle(pdd, mem); 1599 if (idr_handle < 0) { 1600 r = -EFAULT; 1601 goto err_free; 1602 } 1603 1604 mutex_unlock(&p->mutex); 1605 1606 args->handle = MAKE_HANDLE(args->gpu_id, idr_handle); 1607 1608 return 0; 1609 1610 err_free: 1611 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem, 1612 pdd->drm_priv, NULL); 1613 err_unlock: 1614 mutex_unlock(&p->mutex); 1615 return r; 1616 } 1617 1618 static int kfd_ioctl_export_dmabuf(struct file *filep, 1619 struct kfd_process *p, void *data) 1620 { 1621 struct kfd_ioctl_export_dmabuf_args *args = data; 1622 struct kfd_process_device *pdd; 1623 struct dma_buf *dmabuf; 1624 struct kfd_node *dev; 1625 void *mem; 1626 int ret = 0; 1627 1628 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); 1629 if (!dev) 1630 return -EINVAL; 1631 1632 mutex_lock(&p->mutex); 1633 1634 pdd = kfd_get_process_device_data(dev, p); 1635 if (!pdd) { 1636 ret = -EINVAL; 1637 goto err_unlock; 1638 } 1639 1640 mem = kfd_process_device_translate_handle(pdd, 1641 GET_IDR_HANDLE(args->handle)); 1642 if (!mem) { 1643 ret = -EINVAL; 1644 goto err_unlock; 1645 } 1646 1647 ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf); 1648 mutex_unlock(&p->mutex); 1649 if (ret) 1650 goto err_out; 1651 1652 ret = dma_buf_fd(dmabuf, args->flags); 1653 if (ret < 0) { 1654 dma_buf_put(dmabuf); 1655 goto err_out; 1656 } 1657 /* dma_buf_fd assigns the reference count to the fd, no need to 1658 * put the reference here. 1659 */ 1660 args->dmabuf_fd = ret; 1661 1662 return 0; 1663 1664 err_unlock: 1665 mutex_unlock(&p->mutex); 1666 err_out: 1667 return ret; 1668 } 1669 1670 /* Handle requests for watching SMI events */ 1671 static int kfd_ioctl_smi_events(struct file *filep, 1672 struct kfd_process *p, void *data) 1673 { 1674 struct kfd_ioctl_smi_events_args *args = data; 1675 struct kfd_process_device *pdd; 1676 1677 mutex_lock(&p->mutex); 1678 1679 pdd = kfd_process_device_data_by_id(p, args->gpuid); 1680 mutex_unlock(&p->mutex); 1681 if (!pdd) 1682 return -EINVAL; 1683 1684 return kfd_smi_event_open(pdd->dev, &args->anon_fd); 1685 } 1686 1687 #if IS_ENABLED(CONFIG_HSA_AMD_SVM) 1688 1689 static int kfd_ioctl_set_xnack_mode(struct file *filep, 1690 struct kfd_process *p, void *data) 1691 { 1692 struct kfd_ioctl_set_xnack_mode_args *args = data; 1693 int r = 0; 1694 1695 mutex_lock(&p->mutex); 1696 if (args->xnack_enabled >= 0) { 1697 if (!list_empty(&p->pqm.queues)) { 1698 pr_debug("Process has user queues running\n"); 1699 r = -EBUSY; 1700 goto out_unlock; 1701 } 1702 1703 if (p->xnack_enabled == args->xnack_enabled) 1704 goto out_unlock; 1705 1706 if (args->xnack_enabled && !kfd_process_xnack_mode(p, true)) { 1707 r = -EPERM; 1708 goto out_unlock; 1709 } 1710 1711 r = svm_range_switch_xnack_reserve_mem(p, args->xnack_enabled); 1712 } else { 1713 args->xnack_enabled = p->xnack_enabled; 1714 } 1715 1716 out_unlock: 1717 mutex_unlock(&p->mutex); 1718 1719 return r; 1720 } 1721 1722 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data) 1723 { 1724 struct kfd_ioctl_svm_args *args = data; 1725 int r = 0; 1726 1727 if (p->context_id != KFD_CONTEXT_ID_PRIMARY) { 1728 pr_debug("SVM ioctl not supported on non-primary kfd process\n"); 1729 1730 return -EOPNOTSUPP; 1731 } 1732 1733 pr_debug("start 0x%llx size 0x%llx op 0x%x nattr 0x%x\n", 1734 args->start_addr, args->size, args->op, args->nattr); 1735 1736 if ((args->start_addr & ~PAGE_MASK) || (args->size & ~PAGE_MASK)) 1737 return -EINVAL; 1738 if (!args->start_addr || !args->size) 1739 return -EINVAL; 1740 1741 r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr, 1742 args->attrs); 1743 1744 return r; 1745 } 1746 #else 1747 static int kfd_ioctl_set_xnack_mode(struct file *filep, 1748 struct kfd_process *p, void *data) 1749 { 1750 return -EPERM; 1751 } 1752 static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data) 1753 { 1754 return -EPERM; 1755 } 1756 #endif 1757 1758 static int criu_checkpoint_process(struct kfd_process *p, 1759 uint8_t __user *user_priv_data, 1760 uint64_t *priv_offset) 1761 { 1762 struct kfd_criu_process_priv_data process_priv; 1763 int ret; 1764 1765 memset(&process_priv, 0, sizeof(process_priv)); 1766 1767 process_priv.version = KFD_CRIU_PRIV_VERSION; 1768 /* For CR, we don't consider negative xnack mode which is used for 1769 * querying without changing it, here 0 simply means disabled and 1 1770 * means enabled so retry for finding a valid PTE. 1771 */ 1772 process_priv.xnack_mode = p->xnack_enabled ? 1 : 0; 1773 1774 ret = copy_to_user(user_priv_data + *priv_offset, 1775 &process_priv, sizeof(process_priv)); 1776 1777 if (ret) { 1778 pr_err("Failed to copy process information to user\n"); 1779 ret = -EFAULT; 1780 } 1781 1782 *priv_offset += sizeof(process_priv); 1783 return ret; 1784 } 1785 1786 static int criu_checkpoint_devices(struct kfd_process *p, 1787 uint32_t num_devices, 1788 uint8_t __user *user_addr, 1789 uint8_t __user *user_priv_data, 1790 uint64_t *priv_offset) 1791 { 1792 struct kfd_criu_device_priv_data *device_priv = NULL; 1793 struct kfd_criu_device_bucket *device_buckets = NULL; 1794 int ret = 0, i; 1795 1796 device_buckets = kvzalloc(num_devices * sizeof(*device_buckets), GFP_KERNEL); 1797 if (!device_buckets) { 1798 ret = -ENOMEM; 1799 goto exit; 1800 } 1801 1802 device_priv = kvzalloc(num_devices * sizeof(*device_priv), GFP_KERNEL); 1803 if (!device_priv) { 1804 ret = -ENOMEM; 1805 goto exit; 1806 } 1807 1808 for (i = 0; i < num_devices; i++) { 1809 struct kfd_process_device *pdd = p->pdds[i]; 1810 1811 device_buckets[i].user_gpu_id = pdd->user_gpu_id; 1812 device_buckets[i].actual_gpu_id = pdd->dev->id; 1813 1814 /* 1815 * priv_data does not contain useful information for now and is reserved for 1816 * future use, so we do not set its contents. 1817 */ 1818 } 1819 1820 ret = copy_to_user(user_addr, device_buckets, num_devices * sizeof(*device_buckets)); 1821 if (ret) { 1822 pr_err("Failed to copy device information to user\n"); 1823 ret = -EFAULT; 1824 goto exit; 1825 } 1826 1827 ret = copy_to_user(user_priv_data + *priv_offset, 1828 device_priv, 1829 num_devices * sizeof(*device_priv)); 1830 if (ret) { 1831 pr_err("Failed to copy device information to user\n"); 1832 ret = -EFAULT; 1833 } 1834 *priv_offset += num_devices * sizeof(*device_priv); 1835 1836 exit: 1837 kvfree(device_buckets); 1838 kvfree(device_priv); 1839 return ret; 1840 } 1841 1842 static uint32_t get_process_num_bos(struct kfd_process *p) 1843 { 1844 uint32_t num_of_bos = 0; 1845 int i; 1846 1847 /* Run over all PDDs of the process */ 1848 for (i = 0; i < p->n_pdds; i++) { 1849 struct kfd_process_device *pdd = p->pdds[i]; 1850 void *mem; 1851 int id; 1852 1853 idr_for_each_entry(&pdd->alloc_idr, mem, id) { 1854 struct kgd_mem *kgd_mem = (struct kgd_mem *)mem; 1855 1856 if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base) 1857 num_of_bos++; 1858 } 1859 } 1860 return num_of_bos; 1861 } 1862 1863 static int criu_get_prime_handle(struct kgd_mem *mem, 1864 int flags, u32 *shared_fd, 1865 struct file **file) 1866 { 1867 struct dma_buf *dmabuf; 1868 int ret; 1869 1870 ret = amdgpu_amdkfd_gpuvm_export_dmabuf(mem, &dmabuf); 1871 if (ret) { 1872 pr_err("dmabuf export failed for the BO\n"); 1873 return ret; 1874 } 1875 1876 ret = get_unused_fd_flags(flags); 1877 if (ret < 0) { 1878 pr_err("dmabuf create fd failed, ret:%d\n", ret); 1879 goto out_free_dmabuf; 1880 } 1881 1882 *shared_fd = ret; 1883 *file = dmabuf->file; 1884 return 0; 1885 1886 out_free_dmabuf: 1887 dma_buf_put(dmabuf); 1888 return ret; 1889 } 1890 1891 static void commit_files(struct file **files, 1892 struct kfd_criu_bo_bucket *bo_buckets, 1893 unsigned int count, 1894 int err) 1895 { 1896 while (count--) { 1897 struct file *file = files[count]; 1898 1899 if (!file) 1900 continue; 1901 if (err) { 1902 fput(file); 1903 put_unused_fd(bo_buckets[count].dmabuf_fd); 1904 } else { 1905 fd_install(bo_buckets[count].dmabuf_fd, file); 1906 } 1907 } 1908 } 1909 1910 static int criu_checkpoint_bos(struct kfd_process *p, 1911 uint32_t num_bos, 1912 uint8_t __user *user_bos, 1913 uint8_t __user *user_priv_data, 1914 uint64_t *priv_offset) 1915 { 1916 struct kfd_criu_bo_bucket *bo_buckets; 1917 struct kfd_criu_bo_priv_data *bo_privs; 1918 struct file **files = NULL; 1919 int ret = 0, pdd_index, bo_index = 0, id; 1920 void *mem; 1921 1922 bo_buckets = kvzalloc(num_bos * sizeof(*bo_buckets), GFP_KERNEL); 1923 if (!bo_buckets) 1924 return -ENOMEM; 1925 1926 bo_privs = kvzalloc(num_bos * sizeof(*bo_privs), GFP_KERNEL); 1927 if (!bo_privs) { 1928 ret = -ENOMEM; 1929 goto exit; 1930 } 1931 1932 files = kvzalloc(num_bos * sizeof(struct file *), GFP_KERNEL); 1933 if (!files) { 1934 ret = -ENOMEM; 1935 goto exit; 1936 } 1937 1938 for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) { 1939 struct kfd_process_device *pdd = p->pdds[pdd_index]; 1940 struct amdgpu_bo *dumper_bo; 1941 struct kgd_mem *kgd_mem; 1942 1943 idr_for_each_entry(&pdd->alloc_idr, mem, id) { 1944 struct kfd_criu_bo_bucket *bo_bucket; 1945 struct kfd_criu_bo_priv_data *bo_priv; 1946 int i, dev_idx = 0; 1947 1948 kgd_mem = (struct kgd_mem *)mem; 1949 dumper_bo = kgd_mem->bo; 1950 1951 /* Skip checkpointing BOs that are used for Trap handler 1952 * code and state. Currently, these BOs have a VA that 1953 * is less GPUVM Base 1954 */ 1955 if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base) 1956 continue; 1957 1958 bo_bucket = &bo_buckets[bo_index]; 1959 bo_priv = &bo_privs[bo_index]; 1960 1961 bo_bucket->gpu_id = pdd->user_gpu_id; 1962 bo_bucket->addr = (uint64_t)kgd_mem->va; 1963 bo_bucket->size = amdgpu_bo_size(dumper_bo); 1964 bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags; 1965 bo_priv->idr_handle = id; 1966 1967 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 1968 ret = amdgpu_ttm_tt_get_userptr(&dumper_bo->tbo, 1969 &bo_priv->user_addr); 1970 if (ret) { 1971 pr_err("Failed to obtain user address for user-pointer bo\n"); 1972 goto exit; 1973 } 1974 } 1975 if (bo_bucket->alloc_flags 1976 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { 1977 ret = criu_get_prime_handle(kgd_mem, 1978 bo_bucket->alloc_flags & 1979 KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? DRM_RDWR : 0, 1980 &bo_bucket->dmabuf_fd, &files[bo_index]); 1981 if (ret) 1982 goto exit; 1983 } else { 1984 bo_bucket->dmabuf_fd = KFD_INVALID_FD; 1985 } 1986 1987 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) 1988 bo_bucket->offset = KFD_MMAP_TYPE_DOORBELL | 1989 KFD_MMAP_GPU_ID(pdd->dev->id); 1990 else if (bo_bucket->alloc_flags & 1991 KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) 1992 bo_bucket->offset = KFD_MMAP_TYPE_MMIO | 1993 KFD_MMAP_GPU_ID(pdd->dev->id); 1994 else 1995 bo_bucket->offset = amdgpu_bo_mmap_offset(dumper_bo); 1996 1997 for (i = 0; i < p->n_pdds; i++) { 1998 if (amdgpu_amdkfd_bo_mapped_to_dev(p->pdds[i]->drm_priv, kgd_mem)) 1999 bo_priv->mapped_gpuids[dev_idx++] = p->pdds[i]->user_gpu_id; 2000 } 2001 2002 pr_debug("bo_size = 0x%llx, bo_addr = 0x%llx bo_offset = 0x%llx\n" 2003 "gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x", 2004 bo_bucket->size, 2005 bo_bucket->addr, 2006 bo_bucket->offset, 2007 bo_bucket->gpu_id, 2008 bo_bucket->alloc_flags, 2009 bo_priv->idr_handle); 2010 bo_index++; 2011 } 2012 } 2013 2014 ret = copy_to_user(user_bos, bo_buckets, num_bos * sizeof(*bo_buckets)); 2015 if (ret) { 2016 pr_err("Failed to copy BO information to user\n"); 2017 ret = -EFAULT; 2018 goto exit; 2019 } 2020 2021 ret = copy_to_user(user_priv_data + *priv_offset, bo_privs, num_bos * sizeof(*bo_privs)); 2022 if (ret) { 2023 pr_err("Failed to copy BO priv information to user\n"); 2024 ret = -EFAULT; 2025 goto exit; 2026 } 2027 2028 *priv_offset += num_bos * sizeof(*bo_privs); 2029 2030 exit: 2031 commit_files(files, bo_buckets, bo_index, ret); 2032 kvfree(files); 2033 kvfree(bo_buckets); 2034 kvfree(bo_privs); 2035 return ret; 2036 } 2037 2038 static int criu_get_process_object_info(struct kfd_process *p, 2039 uint32_t *num_devices, 2040 uint32_t *num_bos, 2041 uint32_t *num_objects, 2042 uint64_t *objs_priv_size) 2043 { 2044 uint64_t queues_priv_data_size, svm_priv_data_size, priv_size; 2045 uint32_t num_queues, num_events, num_svm_ranges; 2046 int ret; 2047 2048 *num_devices = p->n_pdds; 2049 *num_bos = get_process_num_bos(p); 2050 2051 ret = kfd_process_get_queue_info(p, &num_queues, &queues_priv_data_size); 2052 if (ret) 2053 return ret; 2054 2055 num_events = kfd_get_num_events(p); 2056 2057 svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size); 2058 2059 *num_objects = num_queues + num_events + num_svm_ranges; 2060 2061 if (objs_priv_size) { 2062 priv_size = sizeof(struct kfd_criu_process_priv_data); 2063 priv_size += *num_devices * sizeof(struct kfd_criu_device_priv_data); 2064 priv_size += *num_bos * sizeof(struct kfd_criu_bo_priv_data); 2065 priv_size += queues_priv_data_size; 2066 priv_size += num_events * sizeof(struct kfd_criu_event_priv_data); 2067 priv_size += svm_priv_data_size; 2068 *objs_priv_size = priv_size; 2069 } 2070 return 0; 2071 } 2072 2073 static int criu_checkpoint(struct file *filep, 2074 struct kfd_process *p, 2075 struct kfd_ioctl_criu_args *args) 2076 { 2077 int ret; 2078 uint32_t num_devices, num_bos, num_objects; 2079 uint64_t priv_size, priv_offset = 0, bo_priv_offset; 2080 2081 if (!args->devices || !args->bos || !args->priv_data) 2082 return -EINVAL; 2083 2084 mutex_lock(&p->mutex); 2085 2086 if (!p->n_pdds) { 2087 pr_err("No pdd for given process\n"); 2088 ret = -ENODEV; 2089 goto exit_unlock; 2090 } 2091 2092 /* Confirm all process queues are evicted */ 2093 if (!p->queues_paused) { 2094 pr_err("Cannot dump process when queues are not in evicted state\n"); 2095 /* CRIU plugin did not call op PROCESS_INFO before checkpointing */ 2096 ret = -EINVAL; 2097 goto exit_unlock; 2098 } 2099 2100 ret = criu_get_process_object_info(p, &num_devices, &num_bos, &num_objects, &priv_size); 2101 if (ret) 2102 goto exit_unlock; 2103 2104 if (num_devices != args->num_devices || 2105 num_bos != args->num_bos || 2106 num_objects != args->num_objects || 2107 priv_size != args->priv_data_size) { 2108 2109 ret = -EINVAL; 2110 goto exit_unlock; 2111 } 2112 2113 /* each function will store private data inside priv_data and adjust priv_offset */ 2114 ret = criu_checkpoint_process(p, (uint8_t __user *)args->priv_data, &priv_offset); 2115 if (ret) 2116 goto exit_unlock; 2117 2118 ret = criu_checkpoint_devices(p, num_devices, (uint8_t __user *)args->devices, 2119 (uint8_t __user *)args->priv_data, &priv_offset); 2120 if (ret) 2121 goto exit_unlock; 2122 2123 /* Leave room for BOs in the private data. They need to be restored 2124 * before events, but we checkpoint them last to simplify the error 2125 * handling. 2126 */ 2127 bo_priv_offset = priv_offset; 2128 priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data); 2129 2130 if (num_objects) { 2131 ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data, 2132 &priv_offset); 2133 if (ret) 2134 goto exit_unlock; 2135 2136 ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data, 2137 &priv_offset); 2138 if (ret) 2139 goto exit_unlock; 2140 2141 ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset); 2142 if (ret) 2143 goto exit_unlock; 2144 } 2145 2146 /* This must be the last thing in this function that can fail. 2147 * Otherwise we leak dmabuf file descriptors. 2148 */ 2149 ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos, 2150 (uint8_t __user *)args->priv_data, &bo_priv_offset); 2151 2152 exit_unlock: 2153 mutex_unlock(&p->mutex); 2154 if (ret) 2155 pr_err("Failed to dump CRIU ret:%d\n", ret); 2156 else 2157 pr_debug("CRIU dump ret:%d\n", ret); 2158 2159 return ret; 2160 } 2161 2162 static int criu_restore_process(struct kfd_process *p, 2163 struct kfd_ioctl_criu_args *args, 2164 uint64_t *priv_offset, 2165 uint64_t max_priv_data_size) 2166 { 2167 int ret = 0; 2168 struct kfd_criu_process_priv_data process_priv; 2169 2170 if (*priv_offset + sizeof(process_priv) > max_priv_data_size) 2171 return -EINVAL; 2172 2173 ret = copy_from_user(&process_priv, 2174 (void __user *)(args->priv_data + *priv_offset), 2175 sizeof(process_priv)); 2176 if (ret) { 2177 pr_err("Failed to copy process private information from user\n"); 2178 ret = -EFAULT; 2179 goto exit; 2180 } 2181 *priv_offset += sizeof(process_priv); 2182 2183 if (process_priv.version != KFD_CRIU_PRIV_VERSION) { 2184 pr_err("Invalid CRIU API version (checkpointed:%d current:%d)\n", 2185 process_priv.version, KFD_CRIU_PRIV_VERSION); 2186 return -EINVAL; 2187 } 2188 2189 pr_debug("Setting XNACK mode\n"); 2190 if (process_priv.xnack_mode && !kfd_process_xnack_mode(p, true)) { 2191 pr_err("xnack mode cannot be set\n"); 2192 ret = -EPERM; 2193 goto exit; 2194 } else { 2195 pr_debug("set xnack mode: %d\n", process_priv.xnack_mode); 2196 p->xnack_enabled = process_priv.xnack_mode; 2197 } 2198 2199 exit: 2200 return ret; 2201 } 2202 2203 static int criu_restore_devices(struct kfd_process *p, 2204 struct kfd_ioctl_criu_args *args, 2205 uint64_t *priv_offset, 2206 uint64_t max_priv_data_size) 2207 { 2208 struct kfd_criu_device_bucket *device_buckets; 2209 struct kfd_criu_device_priv_data *device_privs; 2210 int ret = 0; 2211 uint32_t i; 2212 2213 if (args->num_devices != p->n_pdds) 2214 return -EINVAL; 2215 2216 if (*priv_offset + (args->num_devices * sizeof(*device_privs)) > max_priv_data_size) 2217 return -EINVAL; 2218 2219 device_buckets = kmalloc_array(args->num_devices, sizeof(*device_buckets), GFP_KERNEL); 2220 if (!device_buckets) 2221 return -ENOMEM; 2222 2223 ret = copy_from_user(device_buckets, (void __user *)args->devices, 2224 args->num_devices * sizeof(*device_buckets)); 2225 if (ret) { 2226 pr_err("Failed to copy devices buckets from user\n"); 2227 ret = -EFAULT; 2228 goto exit; 2229 } 2230 2231 for (i = 0; i < args->num_devices; i++) { 2232 struct kfd_node *dev; 2233 struct kfd_process_device *pdd; 2234 struct file *drm_file; 2235 2236 /* device private data is not currently used */ 2237 2238 if (!device_buckets[i].user_gpu_id) { 2239 pr_err("Invalid user gpu_id\n"); 2240 ret = -EINVAL; 2241 goto exit; 2242 } 2243 2244 dev = kfd_device_by_id(device_buckets[i].actual_gpu_id); 2245 if (!dev) { 2246 pr_err("Failed to find device with gpu_id = %x\n", 2247 device_buckets[i].actual_gpu_id); 2248 ret = -EINVAL; 2249 goto exit; 2250 } 2251 2252 pdd = kfd_get_process_device_data(dev, p); 2253 if (!pdd) { 2254 pr_err("Failed to get pdd for gpu_id = %x\n", 2255 device_buckets[i].actual_gpu_id); 2256 ret = -EINVAL; 2257 goto exit; 2258 } 2259 pdd->user_gpu_id = device_buckets[i].user_gpu_id; 2260 2261 drm_file = fget(device_buckets[i].drm_fd); 2262 if (!drm_file) { 2263 pr_err("Invalid render node file descriptor sent from plugin (%d)\n", 2264 device_buckets[i].drm_fd); 2265 ret = -EINVAL; 2266 goto exit; 2267 } 2268 2269 if (pdd->drm_file) { 2270 ret = -EINVAL; 2271 goto exit; 2272 } 2273 2274 /* create the vm using render nodes for kfd pdd */ 2275 if (kfd_process_device_init_vm(pdd, drm_file)) { 2276 pr_err("could not init vm for given pdd\n"); 2277 /* On success, the PDD keeps the drm_file reference */ 2278 fput(drm_file); 2279 ret = -EINVAL; 2280 goto exit; 2281 } 2282 /* 2283 * pdd now already has the vm bound to render node so below api won't create a new 2284 * exclusive kfd mapping but use existing one with renderDXXX but is still needed 2285 * for iommu v2 binding and runtime pm. 2286 */ 2287 pdd = kfd_bind_process_to_device(dev, p); 2288 if (IS_ERR(pdd)) { 2289 ret = PTR_ERR(pdd); 2290 goto exit; 2291 } 2292 2293 if (!pdd->qpd.proc_doorbells) { 2294 ret = kfd_alloc_process_doorbells(dev->kfd, pdd); 2295 if (ret) 2296 goto exit; 2297 } 2298 } 2299 2300 /* 2301 * We are not copying device private data from user as we are not using the data for now, 2302 * but we still adjust for its private data. 2303 */ 2304 *priv_offset += args->num_devices * sizeof(*device_privs); 2305 2306 exit: 2307 kfree(device_buckets); 2308 return ret; 2309 } 2310 2311 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, 2312 struct kfd_criu_bo_bucket *bo_bucket, 2313 struct kfd_criu_bo_priv_data *bo_priv, 2314 struct kgd_mem **kgd_mem) 2315 { 2316 int idr_handle; 2317 int ret; 2318 const bool criu_resume = true; 2319 u64 offset; 2320 2321 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) { 2322 if (bo_bucket->size != 2323 kfd_doorbell_process_slice(pdd->dev->kfd)) 2324 return -EINVAL; 2325 2326 offset = kfd_get_process_doorbells(pdd); 2327 if (!offset) 2328 return -ENOMEM; 2329 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) { 2330 /* MMIO BOs need remapped bus address */ 2331 if (bo_bucket->size != PAGE_SIZE) { 2332 pr_err("Invalid page size\n"); 2333 return -EINVAL; 2334 } 2335 offset = pdd->dev->adev->rmmio_remap.bus_addr; 2336 if (!offset || (PAGE_SIZE > 4096)) { 2337 pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n"); 2338 return -ENOMEM; 2339 } 2340 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { 2341 offset = bo_priv->user_addr; 2342 } 2343 /* Create the BO */ 2344 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr, 2345 bo_bucket->size, pdd->drm_priv, kgd_mem, 2346 &offset, bo_bucket->alloc_flags, criu_resume); 2347 if (ret) { 2348 pr_err("Could not create the BO\n"); 2349 return ret; 2350 } 2351 pr_debug("New BO created: size:0x%llx addr:0x%llx offset:0x%llx\n", 2352 bo_bucket->size, bo_bucket->addr, offset); 2353 2354 /* Restore previous IDR handle */ 2355 pr_debug("Restoring old IDR handle for the BO"); 2356 idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle, 2357 bo_priv->idr_handle + 1, GFP_KERNEL); 2358 2359 if (idr_handle < 0) { 2360 pr_err("Could not allocate idr\n"); 2361 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv, 2362 NULL); 2363 return -ENOMEM; 2364 } 2365 2366 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) 2367 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id); 2368 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) { 2369 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id); 2370 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 2371 bo_bucket->restored_offset = offset; 2372 } else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { 2373 bo_bucket->restored_offset = offset; 2374 /* Update the VRAM usage count */ 2375 atomic64_add(bo_bucket->size, &pdd->vram_usage); 2376 } 2377 return 0; 2378 } 2379 2380 static int criu_restore_bo(struct kfd_process *p, 2381 struct kfd_criu_bo_bucket *bo_bucket, 2382 struct kfd_criu_bo_priv_data *bo_priv, 2383 struct file **file) 2384 { 2385 struct kfd_process_device *pdd; 2386 struct kgd_mem *kgd_mem; 2387 int ret; 2388 int j; 2389 2390 pr_debug("Restoring BO size:0x%llx addr:0x%llx gpu_id:0x%x flags:0x%x idr_handle:0x%x\n", 2391 bo_bucket->size, bo_bucket->addr, bo_bucket->gpu_id, bo_bucket->alloc_flags, 2392 bo_priv->idr_handle); 2393 2394 pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id); 2395 if (!pdd) { 2396 pr_err("Failed to get pdd\n"); 2397 return -ENODEV; 2398 } 2399 2400 ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem); 2401 if (ret) 2402 return ret; 2403 2404 /* now map these BOs to GPU/s */ 2405 for (j = 0; j < p->n_pdds; j++) { 2406 struct kfd_node *peer; 2407 struct kfd_process_device *peer_pdd; 2408 2409 if (!bo_priv->mapped_gpuids[j]) 2410 break; 2411 2412 peer_pdd = kfd_process_device_data_by_id(p, bo_priv->mapped_gpuids[j]); 2413 if (!peer_pdd) 2414 return -EINVAL; 2415 2416 peer = peer_pdd->dev; 2417 2418 peer_pdd = kfd_bind_process_to_device(peer, p); 2419 if (IS_ERR(peer_pdd)) 2420 return PTR_ERR(peer_pdd); 2421 2422 ret = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(peer->adev, kgd_mem, 2423 peer_pdd->drm_priv); 2424 if (ret) { 2425 pr_err("Failed to map to gpu %d/%d\n", j, p->n_pdds); 2426 return ret; 2427 } 2428 } 2429 2430 pr_debug("map memory was successful for the BO\n"); 2431 /* create the dmabuf object and export the bo */ 2432 if (bo_bucket->alloc_flags 2433 & (KFD_IOC_ALLOC_MEM_FLAGS_VRAM | KFD_IOC_ALLOC_MEM_FLAGS_GTT)) { 2434 ret = criu_get_prime_handle(kgd_mem, DRM_RDWR, 2435 &bo_bucket->dmabuf_fd, file); 2436 if (ret) 2437 return ret; 2438 } else { 2439 bo_bucket->dmabuf_fd = KFD_INVALID_FD; 2440 } 2441 2442 return 0; 2443 } 2444 2445 static int criu_restore_bos(struct kfd_process *p, 2446 struct kfd_ioctl_criu_args *args, 2447 uint64_t *priv_offset, 2448 uint64_t max_priv_data_size) 2449 { 2450 struct kfd_criu_bo_bucket *bo_buckets = NULL; 2451 struct kfd_criu_bo_priv_data *bo_privs = NULL; 2452 struct file **files = NULL; 2453 int ret = 0; 2454 uint32_t i = 0; 2455 2456 if (*priv_offset + (args->num_bos * sizeof(*bo_privs)) > max_priv_data_size) 2457 return -EINVAL; 2458 2459 /* Prevent MMU notifications until stage-4 IOCTL (CRIU_RESUME) is received */ 2460 amdgpu_amdkfd_block_mmu_notifications(p->kgd_process_info); 2461 2462 bo_buckets = kvmalloc_array(args->num_bos, sizeof(*bo_buckets), GFP_KERNEL); 2463 if (!bo_buckets) 2464 return -ENOMEM; 2465 2466 files = kvzalloc(args->num_bos * sizeof(struct file *), GFP_KERNEL); 2467 if (!files) { 2468 ret = -ENOMEM; 2469 goto exit; 2470 } 2471 2472 ret = copy_from_user(bo_buckets, (void __user *)args->bos, 2473 args->num_bos * sizeof(*bo_buckets)); 2474 if (ret) { 2475 pr_err("Failed to copy BOs information from user\n"); 2476 ret = -EFAULT; 2477 goto exit; 2478 } 2479 2480 bo_privs = kvmalloc_array(args->num_bos, sizeof(*bo_privs), GFP_KERNEL); 2481 if (!bo_privs) { 2482 ret = -ENOMEM; 2483 goto exit; 2484 } 2485 2486 ret = copy_from_user(bo_privs, (void __user *)args->priv_data + *priv_offset, 2487 args->num_bos * sizeof(*bo_privs)); 2488 if (ret) { 2489 pr_err("Failed to copy BOs information from user\n"); 2490 ret = -EFAULT; 2491 goto exit; 2492 } 2493 *priv_offset += args->num_bos * sizeof(*bo_privs); 2494 2495 /* Create and map new BOs */ 2496 for (; i < args->num_bos; i++) { 2497 ret = criu_restore_bo(p, &bo_buckets[i], &bo_privs[i], &files[i]); 2498 if (ret) { 2499 pr_debug("Failed to restore BO[%d] ret%d\n", i, ret); 2500 goto exit; 2501 } 2502 } /* done */ 2503 2504 /* Copy only the buckets back so user can read bo_buckets[N].restored_offset */ 2505 ret = copy_to_user((void __user *)args->bos, 2506 bo_buckets, 2507 (args->num_bos * sizeof(*bo_buckets))); 2508 if (ret) 2509 ret = -EFAULT; 2510 2511 exit: 2512 commit_files(files, bo_buckets, i, ret); 2513 kvfree(files); 2514 kvfree(bo_buckets); 2515 kvfree(bo_privs); 2516 return ret; 2517 } 2518 2519 static int criu_restore_objects(struct file *filep, 2520 struct kfd_process *p, 2521 struct kfd_ioctl_criu_args *args, 2522 uint64_t *priv_offset, 2523 uint64_t max_priv_data_size) 2524 { 2525 int ret = 0; 2526 uint32_t i; 2527 2528 BUILD_BUG_ON(offsetof(struct kfd_criu_queue_priv_data, object_type)); 2529 BUILD_BUG_ON(offsetof(struct kfd_criu_event_priv_data, object_type)); 2530 BUILD_BUG_ON(offsetof(struct kfd_criu_svm_range_priv_data, object_type)); 2531 2532 for (i = 0; i < args->num_objects; i++) { 2533 uint32_t object_type; 2534 2535 if (*priv_offset + sizeof(object_type) > max_priv_data_size) { 2536 pr_err("Invalid private data size\n"); 2537 return -EINVAL; 2538 } 2539 2540 ret = get_user(object_type, (uint32_t __user *)(args->priv_data + *priv_offset)); 2541 if (ret) { 2542 pr_err("Failed to copy private information from user\n"); 2543 goto exit; 2544 } 2545 2546 switch (object_type) { 2547 case KFD_CRIU_OBJECT_TYPE_QUEUE: 2548 ret = kfd_criu_restore_queue(p, (uint8_t __user *)args->priv_data, 2549 priv_offset, max_priv_data_size); 2550 if (ret) 2551 goto exit; 2552 break; 2553 case KFD_CRIU_OBJECT_TYPE_EVENT: 2554 ret = kfd_criu_restore_event(filep, p, (uint8_t __user *)args->priv_data, 2555 priv_offset, max_priv_data_size); 2556 if (ret) 2557 goto exit; 2558 break; 2559 case KFD_CRIU_OBJECT_TYPE_SVM_RANGE: 2560 ret = kfd_criu_restore_svm(p, (uint8_t __user *)args->priv_data, 2561 priv_offset, max_priv_data_size); 2562 if (ret) 2563 goto exit; 2564 break; 2565 default: 2566 pr_err("Invalid object type:%u at index:%d\n", object_type, i); 2567 ret = -EINVAL; 2568 goto exit; 2569 } 2570 } 2571 exit: 2572 return ret; 2573 } 2574 2575 static int criu_restore(struct file *filep, 2576 struct kfd_process *p, 2577 struct kfd_ioctl_criu_args *args) 2578 { 2579 uint64_t priv_offset = 0; 2580 int ret = 0; 2581 2582 pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n", 2583 args->num_devices, args->num_bos, args->num_objects, args->priv_data_size); 2584 2585 if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data || 2586 !args->priv_data_size || !args->num_devices) 2587 return -EINVAL; 2588 2589 mutex_lock(&p->mutex); 2590 2591 /* 2592 * Set the process to evicted state to avoid running any new queues before all the memory 2593 * mappings are ready. 2594 */ 2595 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_RESTORE); 2596 if (ret) 2597 goto exit_unlock; 2598 2599 /* Each function will adjust priv_offset based on how many bytes they consumed */ 2600 ret = criu_restore_process(p, args, &priv_offset, args->priv_data_size); 2601 if (ret) 2602 goto exit_unlock; 2603 2604 ret = criu_restore_devices(p, args, &priv_offset, args->priv_data_size); 2605 if (ret) 2606 goto exit_unlock; 2607 2608 ret = criu_restore_bos(p, args, &priv_offset, args->priv_data_size); 2609 if (ret) 2610 goto exit_unlock; 2611 2612 ret = criu_restore_objects(filep, p, args, &priv_offset, args->priv_data_size); 2613 if (ret) 2614 goto exit_unlock; 2615 2616 if (priv_offset != args->priv_data_size) { 2617 pr_err("Invalid private data size\n"); 2618 ret = -EINVAL; 2619 } 2620 2621 exit_unlock: 2622 mutex_unlock(&p->mutex); 2623 if (ret) 2624 pr_err("Failed to restore CRIU ret:%d\n", ret); 2625 else 2626 pr_debug("CRIU restore successful\n"); 2627 2628 return ret; 2629 } 2630 2631 static int criu_unpause(struct file *filep, 2632 struct kfd_process *p, 2633 struct kfd_ioctl_criu_args *args) 2634 { 2635 int ret; 2636 2637 mutex_lock(&p->mutex); 2638 2639 if (!p->queues_paused) { 2640 mutex_unlock(&p->mutex); 2641 return -EINVAL; 2642 } 2643 2644 ret = kfd_process_restore_queues(p); 2645 if (ret) 2646 pr_err("Failed to unpause queues ret:%d\n", ret); 2647 else 2648 p->queues_paused = false; 2649 2650 mutex_unlock(&p->mutex); 2651 2652 return ret; 2653 } 2654 2655 static int criu_resume(struct file *filep, 2656 struct kfd_process *p, 2657 struct kfd_ioctl_criu_args *args) 2658 { 2659 struct kfd_process *target = NULL; 2660 struct pid *pid = NULL; 2661 int ret = 0; 2662 2663 pr_debug("Inside %s, target pid for criu restore: %d\n", __func__, 2664 args->pid); 2665 2666 pid = find_get_pid(args->pid); 2667 if (!pid) { 2668 pr_err("Cannot find pid info for %i\n", args->pid); 2669 return -ESRCH; 2670 } 2671 2672 pr_debug("calling kfd_lookup_process_by_pid\n"); 2673 target = kfd_lookup_process_by_pid(pid); 2674 2675 put_pid(pid); 2676 2677 if (!target) { 2678 pr_debug("Cannot find process info for %i\n", args->pid); 2679 return -ESRCH; 2680 } 2681 2682 mutex_lock(&target->mutex); 2683 ret = kfd_criu_resume_svm(target); 2684 if (ret) { 2685 pr_err("kfd_criu_resume_svm failed for %i\n", args->pid); 2686 goto exit; 2687 } 2688 2689 ret = amdgpu_amdkfd_criu_resume(target->kgd_process_info); 2690 if (ret) 2691 pr_err("amdgpu_amdkfd_criu_resume failed for %i\n", args->pid); 2692 2693 exit: 2694 mutex_unlock(&target->mutex); 2695 2696 kfd_unref_process(target); 2697 return ret; 2698 } 2699 2700 static int criu_process_info(struct file *filep, 2701 struct kfd_process *p, 2702 struct kfd_ioctl_criu_args *args) 2703 { 2704 int ret = 0; 2705 2706 mutex_lock(&p->mutex); 2707 2708 if (!p->n_pdds) { 2709 pr_err("No pdd for given process\n"); 2710 ret = -ENODEV; 2711 goto err_unlock; 2712 } 2713 2714 ret = kfd_process_evict_queues(p, KFD_QUEUE_EVICTION_CRIU_CHECKPOINT); 2715 if (ret) 2716 goto err_unlock; 2717 2718 p->queues_paused = true; 2719 2720 args->pid = task_pid_nr_ns(p->lead_thread, 2721 task_active_pid_ns(p->lead_thread)); 2722 2723 ret = criu_get_process_object_info(p, &args->num_devices, &args->num_bos, 2724 &args->num_objects, &args->priv_data_size); 2725 if (ret) 2726 goto err_unlock; 2727 2728 dev_dbg(kfd_device, "Num of devices:%u bos:%u objects:%u priv_data_size:%lld\n", 2729 args->num_devices, args->num_bos, args->num_objects, 2730 args->priv_data_size); 2731 2732 err_unlock: 2733 if (ret) { 2734 kfd_process_restore_queues(p); 2735 p->queues_paused = false; 2736 } 2737 mutex_unlock(&p->mutex); 2738 return ret; 2739 } 2740 2741 static int kfd_ioctl_criu(struct file *filep, struct kfd_process *p, void *data) 2742 { 2743 struct kfd_ioctl_criu_args *args = data; 2744 int ret; 2745 2746 dev_dbg(kfd_device, "CRIU operation: %d\n", args->op); 2747 switch (args->op) { 2748 case KFD_CRIU_OP_PROCESS_INFO: 2749 ret = criu_process_info(filep, p, args); 2750 break; 2751 case KFD_CRIU_OP_CHECKPOINT: 2752 ret = criu_checkpoint(filep, p, args); 2753 break; 2754 case KFD_CRIU_OP_UNPAUSE: 2755 ret = criu_unpause(filep, p, args); 2756 break; 2757 case KFD_CRIU_OP_RESTORE: 2758 ret = criu_restore(filep, p, args); 2759 break; 2760 case KFD_CRIU_OP_RESUME: 2761 ret = criu_resume(filep, p, args); 2762 break; 2763 default: 2764 dev_dbg(kfd_device, "Unsupported CRIU operation:%d\n", args->op); 2765 ret = -EINVAL; 2766 break; 2767 } 2768 2769 if (ret) 2770 dev_dbg(kfd_device, "CRIU operation:%d err:%d\n", args->op, ret); 2771 2772 return ret; 2773 } 2774 2775 static int runtime_enable(struct kfd_process *p, uint64_t r_debug, 2776 bool enable_ttmp_setup) 2777 { 2778 int i = 0, ret = 0; 2779 2780 if (p->is_runtime_retry) 2781 goto retry; 2782 2783 if (p->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_DISABLED) 2784 return -EBUSY; 2785 2786 for (i = 0; i < p->n_pdds; i++) { 2787 struct kfd_process_device *pdd = p->pdds[i]; 2788 2789 if (pdd->qpd.queue_count) 2790 return -EEXIST; 2791 2792 /* 2793 * Setup TTMPs by default. 2794 * Note that this call must remain here for MES ADD QUEUE to 2795 * skip_process_ctx_clear unconditionally as the first call to 2796 * SET_SHADER_DEBUGGER clears any stale process context data 2797 * saved in MES. 2798 */ 2799 if (pdd->dev->kfd->shared_resources.enable_mes) 2800 kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev)); 2801 } 2802 2803 p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_ENABLED; 2804 p->runtime_info.r_debug = r_debug; 2805 p->runtime_info.ttmp_setup = enable_ttmp_setup; 2806 2807 if (p->runtime_info.ttmp_setup) { 2808 for (i = 0; i < p->n_pdds; i++) { 2809 struct kfd_process_device *pdd = p->pdds[i]; 2810 2811 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) { 2812 amdgpu_gfx_off_ctrl(pdd->dev->adev, false); 2813 pdd->dev->kfd2kgd->enable_debug_trap( 2814 pdd->dev->adev, 2815 true, 2816 pdd->dev->vm_info.last_vmid_kfd); 2817 } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { 2818 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap( 2819 pdd->dev->adev, 2820 false, 2821 0); 2822 } 2823 } 2824 } 2825 2826 retry: 2827 if (p->debug_trap_enabled) { 2828 if (!p->is_runtime_retry) { 2829 kfd_dbg_trap_activate(p); 2830 kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME), 2831 p, NULL, 0, false, NULL, 0); 2832 } 2833 2834 mutex_unlock(&p->mutex); 2835 ret = down_interruptible(&p->runtime_enable_sema); 2836 mutex_lock(&p->mutex); 2837 2838 p->is_runtime_retry = !!ret; 2839 } 2840 2841 return ret; 2842 } 2843 2844 static int runtime_disable(struct kfd_process *p) 2845 { 2846 int i = 0, ret = 0; 2847 bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED; 2848 2849 p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED; 2850 p->runtime_info.r_debug = 0; 2851 2852 if (p->debug_trap_enabled) { 2853 if (was_enabled) 2854 kfd_dbg_trap_deactivate(p, false, 0); 2855 2856 if (!p->is_runtime_retry) 2857 kfd_dbg_ev_raise(KFD_EC_MASK(EC_PROCESS_RUNTIME), 2858 p, NULL, 0, false, NULL, 0); 2859 2860 mutex_unlock(&p->mutex); 2861 ret = down_interruptible(&p->runtime_enable_sema); 2862 mutex_lock(&p->mutex); 2863 2864 p->is_runtime_retry = !!ret; 2865 if (ret) 2866 return ret; 2867 } 2868 2869 if (was_enabled && p->runtime_info.ttmp_setup) { 2870 for (i = 0; i < p->n_pdds; i++) { 2871 struct kfd_process_device *pdd = p->pdds[i]; 2872 2873 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) 2874 amdgpu_gfx_off_ctrl(pdd->dev->adev, true); 2875 } 2876 } 2877 2878 p->runtime_info.ttmp_setup = false; 2879 2880 /* disable ttmp setup */ 2881 for (i = 0; i < p->n_pdds; i++) { 2882 struct kfd_process_device *pdd = p->pdds[i]; 2883 int last_err = 0; 2884 2885 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) { 2886 pdd->spi_dbg_override = 2887 pdd->dev->kfd2kgd->disable_debug_trap( 2888 pdd->dev->adev, 2889 false, 2890 pdd->dev->vm_info.last_vmid_kfd); 2891 2892 if (!pdd->dev->kfd->shared_resources.enable_mes) 2893 last_err = debug_refresh_runlist(pdd->dev->dqm); 2894 else 2895 last_err = kfd_dbg_set_mes_debug_mode(pdd, 2896 !kfd_dbg_has_cwsr_workaround(pdd->dev)); 2897 2898 if (last_err) 2899 ret = last_err; 2900 } 2901 } 2902 2903 return ret; 2904 } 2905 2906 static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data) 2907 { 2908 struct kfd_ioctl_runtime_enable_args *args = data; 2909 int r; 2910 2911 mutex_lock(&p->mutex); 2912 2913 if (args->mode_mask & KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK) 2914 r = runtime_enable(p, args->r_debug, 2915 !!(args->mode_mask & KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK)); 2916 else 2917 r = runtime_disable(p); 2918 2919 mutex_unlock(&p->mutex); 2920 2921 return r; 2922 } 2923 2924 static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data) 2925 { 2926 struct kfd_ioctl_dbg_trap_args *args = data; 2927 struct task_struct *thread = NULL; 2928 struct mm_struct *mm = NULL; 2929 struct pid *pid = NULL; 2930 struct kfd_process *target = NULL; 2931 struct kfd_process_device *pdd = NULL; 2932 int r = 0; 2933 2934 if (p->context_id != KFD_CONTEXT_ID_PRIMARY) { 2935 pr_debug("Set debug trap ioctl can not be invoked on non-primary kfd process\n"); 2936 2937 return -EOPNOTSUPP; 2938 } 2939 2940 if (sched_policy == KFD_SCHED_POLICY_NO_HWS) { 2941 pr_err("Debugging does not support sched_policy %i", sched_policy); 2942 return -EINVAL; 2943 } 2944 2945 pid = find_get_pid(args->pid); 2946 if (!pid) { 2947 pr_debug("Cannot find pid info for %i\n", args->pid); 2948 r = -ESRCH; 2949 goto out; 2950 } 2951 2952 thread = get_pid_task(pid, PIDTYPE_PID); 2953 if (!thread) { 2954 r = -ESRCH; 2955 goto out; 2956 } 2957 2958 mm = get_task_mm(thread); 2959 if (!mm) { 2960 r = -ESRCH; 2961 goto out; 2962 } 2963 2964 if (args->op == KFD_IOC_DBG_TRAP_ENABLE) { 2965 bool create_process; 2966 2967 rcu_read_lock(); 2968 create_process = thread && thread != current && ptrace_parent(thread) == current; 2969 rcu_read_unlock(); 2970 2971 target = create_process ? kfd_create_process(thread) : 2972 kfd_lookup_process_by_pid(pid); 2973 } else { 2974 target = kfd_lookup_process_by_pid(pid); 2975 } 2976 2977 if (IS_ERR_OR_NULL(target)) { 2978 pr_debug("Cannot find process PID %i to debug\n", args->pid); 2979 r = target ? PTR_ERR(target) : -ESRCH; 2980 target = NULL; 2981 goto out; 2982 } 2983 2984 if (target->context_id != KFD_CONTEXT_ID_PRIMARY) { 2985 pr_debug("Set debug trap ioctl not supported on non-primary kfd process\n"); 2986 r = -EOPNOTSUPP; 2987 goto out; 2988 } 2989 2990 /* Check if target is still PTRACED. */ 2991 rcu_read_lock(); 2992 if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE 2993 && ptrace_parent(target->lead_thread) != current) { 2994 pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid); 2995 r = -EPERM; 2996 } 2997 rcu_read_unlock(); 2998 2999 if (r) 3000 goto out; 3001 3002 mutex_lock(&target->mutex); 3003 3004 if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) { 3005 pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op); 3006 r = -EINVAL; 3007 goto unlock_out; 3008 } 3009 3010 if (target->runtime_info.runtime_state != DEBUG_RUNTIME_STATE_ENABLED && 3011 (args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE || 3012 args->op == KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE || 3013 args->op == KFD_IOC_DBG_TRAP_SUSPEND_QUEUES || 3014 args->op == KFD_IOC_DBG_TRAP_RESUME_QUEUES || 3015 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH || 3016 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH || 3017 args->op == KFD_IOC_DBG_TRAP_SET_FLAGS)) { 3018 r = -EPERM; 3019 goto unlock_out; 3020 } 3021 3022 if (args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH || 3023 args->op == KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH) { 3024 int user_gpu_id = kfd_process_get_user_gpu_id(target, 3025 args->op == KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH ? 3026 args->set_node_address_watch.gpu_id : 3027 args->clear_node_address_watch.gpu_id); 3028 3029 pdd = kfd_process_device_data_by_id(target, user_gpu_id); 3030 if (user_gpu_id == -EINVAL || !pdd) { 3031 r = -ENODEV; 3032 goto unlock_out; 3033 } 3034 } 3035 3036 switch (args->op) { 3037 case KFD_IOC_DBG_TRAP_ENABLE: 3038 if (target != p) 3039 target->debugger_process = p; 3040 3041 r = kfd_dbg_trap_enable(target, 3042 args->enable.dbg_fd, 3043 (void __user *)args->enable.rinfo_ptr, 3044 &args->enable.rinfo_size); 3045 if (!r) 3046 target->exception_enable_mask = args->enable.exception_mask; 3047 3048 break; 3049 case KFD_IOC_DBG_TRAP_DISABLE: 3050 r = kfd_dbg_trap_disable(target); 3051 break; 3052 case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT: 3053 r = kfd_dbg_send_exception_to_runtime(target, 3054 args->send_runtime_event.gpu_id, 3055 args->send_runtime_event.queue_id, 3056 args->send_runtime_event.exception_mask); 3057 break; 3058 case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED: 3059 kfd_dbg_set_enabled_debug_exception_mask(target, 3060 args->set_exceptions_enabled.exception_mask); 3061 break; 3062 case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE: 3063 r = kfd_dbg_trap_set_wave_launch_override(target, 3064 args->launch_override.override_mode, 3065 args->launch_override.enable_mask, 3066 args->launch_override.support_request_mask, 3067 &args->launch_override.enable_mask, 3068 &args->launch_override.support_request_mask); 3069 break; 3070 case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE: 3071 r = kfd_dbg_trap_set_wave_launch_mode(target, 3072 args->launch_mode.launch_mode); 3073 break; 3074 case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: 3075 r = suspend_queues(target, 3076 args->suspend_queues.num_queues, 3077 args->suspend_queues.grace_period, 3078 args->suspend_queues.exception_mask, 3079 (uint32_t *)args->suspend_queues.queue_array_ptr); 3080 3081 break; 3082 case KFD_IOC_DBG_TRAP_RESUME_QUEUES: 3083 r = resume_queues(target, args->resume_queues.num_queues, 3084 (uint32_t *)args->resume_queues.queue_array_ptr); 3085 break; 3086 case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: 3087 r = kfd_dbg_trap_set_dev_address_watch(pdd, 3088 args->set_node_address_watch.address, 3089 args->set_node_address_watch.mask, 3090 &args->set_node_address_watch.id, 3091 args->set_node_address_watch.mode); 3092 break; 3093 case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH: 3094 r = kfd_dbg_trap_clear_dev_address_watch(pdd, 3095 args->clear_node_address_watch.id); 3096 break; 3097 case KFD_IOC_DBG_TRAP_SET_FLAGS: 3098 r = kfd_dbg_trap_set_flags(target, &args->set_flags.flags); 3099 break; 3100 case KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT: 3101 r = kfd_dbg_ev_query_debug_event(target, 3102 &args->query_debug_event.queue_id, 3103 &args->query_debug_event.gpu_id, 3104 args->query_debug_event.exception_mask, 3105 &args->query_debug_event.exception_mask); 3106 break; 3107 case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: 3108 r = kfd_dbg_trap_query_exception_info(target, 3109 args->query_exception_info.source_id, 3110 args->query_exception_info.exception_code, 3111 args->query_exception_info.clear_exception, 3112 (void __user *)args->query_exception_info.info_ptr, 3113 &args->query_exception_info.info_size); 3114 break; 3115 case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: 3116 r = pqm_get_queue_snapshot(&target->pqm, 3117 args->queue_snapshot.exception_mask, 3118 (void __user *)args->queue_snapshot.snapshot_buf_ptr, 3119 &args->queue_snapshot.num_queues, 3120 &args->queue_snapshot.entry_size); 3121 break; 3122 case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: 3123 r = kfd_dbg_trap_device_snapshot(target, 3124 args->device_snapshot.exception_mask, 3125 (void __user *)args->device_snapshot.snapshot_buf_ptr, 3126 &args->device_snapshot.num_devices, 3127 &args->device_snapshot.entry_size); 3128 break; 3129 default: 3130 pr_err("Invalid option: %i\n", args->op); 3131 r = -EINVAL; 3132 } 3133 3134 unlock_out: 3135 mutex_unlock(&target->mutex); 3136 3137 out: 3138 if (thread) 3139 put_task_struct(thread); 3140 3141 if (mm) 3142 mmput(mm); 3143 3144 if (pid) 3145 put_pid(pid); 3146 3147 if (target) 3148 kfd_unref_process(target); 3149 3150 return r; 3151 } 3152 3153 /* userspace programs need to invoke this ioctl explicitly on a FD to 3154 * create a secondary kfd_process which replacing its primary kfd_process 3155 */ 3156 static int kfd_ioctl_create_process(struct file *filep, struct kfd_process *p, void *data) 3157 { 3158 struct kfd_process *process; 3159 int ret; 3160 3161 /* Each FD owns only one kfd_process */ 3162 if (p->context_id != KFD_CONTEXT_ID_PRIMARY) 3163 return -EINVAL; 3164 3165 if (!filep->private_data || !p) 3166 return -EINVAL; 3167 3168 mutex_lock(&kfd_processes_mutex); 3169 if (p != filep->private_data) { 3170 mutex_unlock(&kfd_processes_mutex); 3171 return -EINVAL; 3172 } 3173 3174 process = create_process(current, false); 3175 if (IS_ERR(process)) { 3176 mutex_unlock(&kfd_processes_mutex); 3177 return PTR_ERR(process); 3178 } 3179 3180 filep->private_data = process; 3181 mutex_unlock(&kfd_processes_mutex); 3182 3183 ret = kfd_create_process_sysfs(process); 3184 if (ret) 3185 pr_warn("Failed to create sysfs entry for the kfd_process"); 3186 3187 /* Each open() increases kref of the primary kfd_process, 3188 * so we need to reduce it here when we create a new secondary process replacing it 3189 */ 3190 kfd_unref_process(p); 3191 3192 return 0; 3193 } 3194 3195 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ 3196 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \ 3197 .cmd_drv = 0, .name = #ioctl} 3198 3199 /** Ioctl table */ 3200 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { 3201 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION, 3202 kfd_ioctl_get_version, 0), 3203 3204 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE, 3205 kfd_ioctl_create_queue, 0), 3206 3207 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE, 3208 kfd_ioctl_destroy_queue, 0), 3209 3210 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY, 3211 kfd_ioctl_set_memory_policy, 0), 3212 3213 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS, 3214 kfd_ioctl_get_clock_counters, 0), 3215 3216 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES, 3217 kfd_ioctl_get_process_apertures, 0), 3218 3219 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE, 3220 kfd_ioctl_update_queue, 0), 3221 3222 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT, 3223 kfd_ioctl_create_event, 0), 3224 3225 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT, 3226 kfd_ioctl_destroy_event, 0), 3227 3228 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT, 3229 kfd_ioctl_set_event, 0), 3230 3231 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT, 3232 kfd_ioctl_reset_event, 0), 3233 3234 AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS, 3235 kfd_ioctl_wait_events, 0), 3236 3237 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_REGISTER_DEPRECATED, 3238 kfd_ioctl_dbg_register, 0), 3239 3240 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED, 3241 kfd_ioctl_dbg_unregister, 0), 3242 3243 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED, 3244 kfd_ioctl_dbg_address_watch, 0), 3245 3246 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED, 3247 kfd_ioctl_dbg_wave_control, 0), 3248 3249 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_SCRATCH_BACKING_VA, 3250 kfd_ioctl_set_scratch_backing_va, 0), 3251 3252 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG, 3253 kfd_ioctl_get_tile_config, 0), 3254 3255 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER, 3256 kfd_ioctl_set_trap_handler, 0), 3257 3258 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES_NEW, 3259 kfd_ioctl_get_process_apertures_new, 0), 3260 3261 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ACQUIRE_VM, 3262 kfd_ioctl_acquire_vm, 0), 3263 3264 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_MEMORY_OF_GPU, 3265 kfd_ioctl_alloc_memory_of_gpu, 0), 3266 3267 AMDKFD_IOCTL_DEF(AMDKFD_IOC_FREE_MEMORY_OF_GPU, 3268 kfd_ioctl_free_memory_of_gpu, 0), 3269 3270 AMDKFD_IOCTL_DEF(AMDKFD_IOC_MAP_MEMORY_TO_GPU, 3271 kfd_ioctl_map_memory_to_gpu, 0), 3272 3273 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU, 3274 kfd_ioctl_unmap_memory_from_gpu, 0), 3275 3276 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK, 3277 kfd_ioctl_set_cu_mask, 0), 3278 3279 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE, 3280 kfd_ioctl_get_queue_wave_state, 0), 3281 3282 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO, 3283 kfd_ioctl_get_dmabuf_info, 0), 3284 3285 AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF, 3286 kfd_ioctl_import_dmabuf, 0), 3287 3288 AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS, 3289 kfd_ioctl_alloc_queue_gws, 0), 3290 3291 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SMI_EVENTS, 3292 kfd_ioctl_smi_events, 0), 3293 3294 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SVM, kfd_ioctl_svm, 0), 3295 3296 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_XNACK_MODE, 3297 kfd_ioctl_set_xnack_mode, 0), 3298 3299 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CRIU_OP, 3300 kfd_ioctl_criu, KFD_IOC_FLAG_CHECKPOINT_RESTORE), 3301 3302 AMDKFD_IOCTL_DEF(AMDKFD_IOC_AVAILABLE_MEMORY, 3303 kfd_ioctl_get_available_memory, 0), 3304 3305 AMDKFD_IOCTL_DEF(AMDKFD_IOC_EXPORT_DMABUF, 3306 kfd_ioctl_export_dmabuf, 0), 3307 3308 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RUNTIME_ENABLE, 3309 kfd_ioctl_runtime_enable, 0), 3310 3311 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_TRAP, 3312 kfd_ioctl_set_debug_trap, 0), 3313 3314 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_PROCESS, 3315 kfd_ioctl_create_process, 0), 3316 }; 3317 3318 #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) 3319 3320 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 3321 { 3322 struct kfd_process *process; 3323 amdkfd_ioctl_t *func; 3324 const struct amdkfd_ioctl_desc *ioctl = NULL; 3325 unsigned int nr = _IOC_NR(cmd); 3326 char stack_kdata[128]; 3327 char *kdata = NULL; 3328 unsigned int usize, asize; 3329 int retcode = -EINVAL; 3330 bool ptrace_attached = false; 3331 3332 if (nr >= AMDKFD_CORE_IOCTL_COUNT) { 3333 retcode = -ENOTTY; 3334 goto err_i1; 3335 } 3336 3337 if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { 3338 u32 amdkfd_size; 3339 3340 ioctl = &amdkfd_ioctls[nr]; 3341 3342 amdkfd_size = _IOC_SIZE(ioctl->cmd); 3343 usize = asize = _IOC_SIZE(cmd); 3344 if (amdkfd_size > asize) 3345 asize = amdkfd_size; 3346 3347 cmd = ioctl->cmd; 3348 } else { 3349 retcode = -ENOTTY; 3350 goto err_i1; 3351 } 3352 3353 dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg); 3354 3355 /* Get the process struct from the filep. Only the process 3356 * that opened /dev/kfd can use the file descriptor. Child 3357 * processes need to create their own KFD device context. 3358 */ 3359 process = filep->private_data; 3360 3361 rcu_read_lock(); 3362 if ((ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE) && 3363 ptrace_parent(process->lead_thread) == current) 3364 ptrace_attached = true; 3365 rcu_read_unlock(); 3366 3367 if (process->lead_thread != current->group_leader 3368 && !ptrace_attached) { 3369 dev_dbg(kfd_device, "Using KFD FD in wrong process\n"); 3370 retcode = -EBADF; 3371 goto err_i1; 3372 } 3373 3374 /* Do not trust userspace, use our own definition */ 3375 func = ioctl->func; 3376 3377 if (unlikely(!func)) { 3378 dev_dbg(kfd_device, "no function\n"); 3379 retcode = -EINVAL; 3380 goto err_i1; 3381 } 3382 3383 /* 3384 * Versions of docker shipped in Ubuntu 18.xx and 20.xx do not support 3385 * CAP_CHECKPOINT_RESTORE, so we also allow access if CAP_SYS_ADMIN as CAP_SYS_ADMIN is a 3386 * more priviledged access. 3387 */ 3388 if (unlikely(ioctl->flags & KFD_IOC_FLAG_CHECKPOINT_RESTORE)) { 3389 if (!capable(CAP_CHECKPOINT_RESTORE) && 3390 !capable(CAP_SYS_ADMIN)) { 3391 retcode = -EACCES; 3392 goto err_i1; 3393 } 3394 } 3395 3396 if (cmd & (IOC_IN | IOC_OUT)) { 3397 if (asize <= sizeof(stack_kdata)) { 3398 kdata = stack_kdata; 3399 } else { 3400 kdata = kmalloc(asize, GFP_KERNEL); 3401 if (!kdata) { 3402 retcode = -ENOMEM; 3403 goto err_i1; 3404 } 3405 } 3406 if (asize > usize) 3407 memset(kdata + usize, 0, asize - usize); 3408 } 3409 3410 if (cmd & IOC_IN) { 3411 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { 3412 retcode = -EFAULT; 3413 goto err_i1; 3414 } 3415 } else if (cmd & IOC_OUT) { 3416 memset(kdata, 0, usize); 3417 } 3418 3419 retcode = func(filep, process, kdata); 3420 3421 if (cmd & IOC_OUT) 3422 if (copy_to_user((void __user *)arg, kdata, usize) != 0) 3423 retcode = -EFAULT; 3424 3425 err_i1: 3426 if (!ioctl) 3427 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", 3428 task_pid_nr(current), cmd, nr); 3429 3430 if (kdata != stack_kdata) 3431 kfree(kdata); 3432 3433 if (retcode) 3434 dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n", 3435 nr, arg, retcode); 3436 3437 return retcode; 3438 } 3439 3440 static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process, 3441 struct vm_area_struct *vma) 3442 { 3443 phys_addr_t address; 3444 3445 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 3446 return -EINVAL; 3447 3448 if (PAGE_SIZE > 4096) 3449 return -EINVAL; 3450 3451 address = dev->adev->rmmio_remap.bus_addr; 3452 3453 vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | 3454 VM_DONTDUMP | VM_PFNMAP); 3455 3456 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 3457 3458 pr_debug("process pid %d mapping mmio page\n" 3459 " target user address == 0x%08llX\n" 3460 " physical address == 0x%08llX\n" 3461 " vm_flags == 0x%04lX\n" 3462 " size == 0x%04lX\n", 3463 process->lead_thread->pid, (unsigned long long) vma->vm_start, 3464 address, vma->vm_flags, PAGE_SIZE); 3465 3466 return io_remap_pfn_range(vma, 3467 vma->vm_start, 3468 address >> PAGE_SHIFT, 3469 PAGE_SIZE, 3470 vma->vm_page_prot); 3471 } 3472 3473 3474 static int kfd_mmap(struct file *filep, struct vm_area_struct *vma) 3475 { 3476 struct kfd_process *process; 3477 struct kfd_node *dev = NULL; 3478 unsigned long mmap_offset; 3479 unsigned int gpu_id; 3480 3481 process = filep->private_data; 3482 if (!process) 3483 return -ESRCH; 3484 3485 if (process->lead_thread != current->group_leader) 3486 return -EBADF; 3487 3488 mmap_offset = vma->vm_pgoff << PAGE_SHIFT; 3489 gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset); 3490 if (gpu_id) 3491 dev = kfd_device_by_id(gpu_id); 3492 3493 switch (mmap_offset & KFD_MMAP_TYPE_MASK) { 3494 case KFD_MMAP_TYPE_DOORBELL: 3495 if (!dev) 3496 return -ENODEV; 3497 return kfd_doorbell_mmap(dev, process, vma); 3498 3499 case KFD_MMAP_TYPE_EVENTS: 3500 return kfd_event_mmap(process, vma); 3501 3502 case KFD_MMAP_TYPE_RESERVED_MEM: 3503 if (!dev) 3504 return -ENODEV; 3505 return kfd_reserved_mem_mmap(dev, process, vma); 3506 case KFD_MMAP_TYPE_MMIO: 3507 if (!dev) 3508 return -ENODEV; 3509 return kfd_mmio_mmap(dev, process, vma); 3510 } 3511 3512 return -EFAULT; 3513 } 3514