1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #ifndef KFD_PRIV_H_INCLUDED 25 #define KFD_PRIV_H_INCLUDED 26 27 #include <linux/hashtable.h> 28 #include <linux/mmu_notifier.h> 29 #include <linux/memremap.h> 30 #include <linux/mutex.h> 31 #include <linux/types.h> 32 #include <linux/atomic.h> 33 #include <linux/workqueue.h> 34 #include <linux/spinlock.h> 35 #include <uapi/linux/kfd_ioctl.h> 36 #include <linux/idr.h> 37 #include <linux/kfifo.h> 38 #include <linux/seq_file.h> 39 #include <linux/kref.h> 40 #include <linux/sysfs.h> 41 #include <linux/device_cgroup.h> 42 #include <drm/drm_file.h> 43 #include <drm/drm_drv.h> 44 #include <drm/drm_device.h> 45 #include <drm/drm_ioctl.h> 46 #include <kgd_kfd_interface.h> 47 #include <linux/swap.h> 48 49 #include "amd_shared.h" 50 #include "amdgpu.h" 51 52 #define KFD_MAX_RING_ENTRY_SIZE 8 53 54 #define KFD_SYSFS_FILE_MODE 0444 55 56 /* GPU ID hash width in bits */ 57 #define KFD_GPU_ID_HASH_WIDTH 16 58 59 /* Use upper bits of mmap offset to store KFD driver specific information. 60 * BITS[63:62] - Encode MMAP type 61 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to 62 * BITS[45:0] - MMAP offset value 63 * 64 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these 65 * defines are w.r.t to PAGE_SIZE 66 */ 67 #define KFD_MMAP_TYPE_SHIFT 62 68 #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) 69 #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) 70 #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) 71 #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) 72 #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT) 73 74 #define KFD_MMAP_GPU_ID_SHIFT 46 75 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ 76 << KFD_MMAP_GPU_ID_SHIFT) 77 #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ 78 & KFD_MMAP_GPU_ID_MASK) 79 #define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ 80 >> KFD_MMAP_GPU_ID_SHIFT) 81 82 /* 83 * When working with cp scheduler we should assign the HIQ manually or via 84 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot 85 * definitions for Kaveri. In Kaveri only the first ME queues participates 86 * in the cp scheduling taking that in mind we set the HIQ slot in the 87 * second ME. 88 */ 89 #define KFD_CIK_HIQ_PIPE 4 90 #define KFD_CIK_HIQ_QUEUE 0 91 92 /* Macro for allocating structures */ 93 #define kfd_alloc_struct(ptr_to_struct) \ 94 ((typeof(ptr_to_struct)) kzalloc_obj(*ptr_to_struct)) 95 96 #define KFD_MAX_NUM_OF_PROCESSES 512 97 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 98 99 /* 100 * Size of the per-process TBA+TMA buffer: 2 pages 101 * 102 * The first chunk is the TBA used for the CWSR ISA code. The second 103 * chunk is used as TMA for user-mode trap handler setup in daisy-chain mode. 104 */ 105 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 106 #define KFD_CWSR_TMA_OFFSET (PAGE_SIZE + 2048) 107 108 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 109 (KFD_MAX_NUM_OF_PROCESSES * \ 110 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 111 112 #define KFD_KERNEL_QUEUE_SIZE 2048 113 114 /* KFD_UNMAP_LATENCY_MS is the timeout CP waiting for SDMA preemption. One XCC 115 * can be associated to 2 SDMA engines. queue_preemption_timeout_ms is the time 116 * driver waiting for CP returning the UNMAP_QUEUE fence. Thus the math is 117 * queue_preemption_timeout_ms = sdma_preemption_time * 2 + cp workload 118 * The format here makes CP workload 10% of total timeout 119 */ 120 #define KFD_UNMAP_LATENCY_MS \ 121 ((queue_preemption_timeout_ms - queue_preemption_timeout_ms / 10) >> 1) 122 123 #define KFD_MAX_SDMA_QUEUES 128 124 125 /* 126 * 512 = 0x200 127 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the 128 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA. 129 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC 130 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in 131 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE. 132 */ 133 #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 134 135 /** 136 * enum kfd_ioctl_flags - KFD ioctl flags 137 * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how 138 * userspace can use a given ioctl. 139 */ 140 enum kfd_ioctl_flags { 141 /* 142 * @KFD_IOC_FLAG_CHECKPOINT_RESTORE: 143 * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially 144 * perform privileged operations and load arbitrary data into MQDs and 145 * eventually HQD registers when the queue is mapped by HWS. In order to 146 * prevent this we should perform additional security checks. 147 * 148 * This is equivalent to callers with the CHECKPOINT_RESTORE capability. 149 * 150 * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE, 151 * we also allow ioctls with SYS_ADMIN capability. 152 */ 153 KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0), 154 }; 155 /* 156 * Kernel module parameter to specify maximum number of supported queues per 157 * device 158 */ 159 extern int max_num_of_queues_per_device; 160 161 162 /* Kernel module parameter to specify the scheduling policy */ 163 extern int sched_policy; 164 165 /* 166 * Kernel module parameter to specify the maximum process 167 * number per HW scheduler 168 */ 169 extern int hws_max_conc_proc; 170 171 extern int cwsr_enable; 172 173 /* 174 * Kernel module parameter to specify whether to send sigterm to HSA process on 175 * unhandled exception 176 */ 177 extern int send_sigterm; 178 179 /* 180 * This kernel module is used to simulate large bar machine on non-large bar 181 * enabled machines. 182 */ 183 extern int debug_largebar; 184 185 /* Set sh_mem_config.retry_disable on GFX v9 */ 186 extern int amdgpu_noretry; 187 188 /* Halt if HWS hang is detected */ 189 extern int halt_if_hws_hang; 190 191 /* Whether MEC FW support GWS barriers */ 192 extern bool hws_gws_support; 193 194 /* Queue preemption timeout in ms */ 195 extern int queue_preemption_timeout_ms; 196 197 /* 198 * Don't evict process queues on vm fault 199 */ 200 extern int amdgpu_no_queue_eviction_on_vm_fault; 201 202 /* Enable eviction debug messages */ 203 extern bool debug_evictions; 204 205 extern struct mutex kfd_processes_mutex; 206 207 enum cache_policy { 208 cache_policy_coherent, 209 cache_policy_noncoherent 210 }; 211 212 #define KFD_GC_VERSION(dev) (amdgpu_ip_version((dev)->adev, GC_HWIP, 0)) 213 #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) 214 #define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\ 215 ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \ 216 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) || \ 217 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 4)) || \ 218 (KFD_GC_VERSION(dev) == IP_VERSION(9, 5, 0)) || \ 219 (KFD_GC_VERSION(dev) == IP_VERSION(12, 1, 0))) 220 221 struct kfd_node; 222 223 struct kfd_event_interrupt_class { 224 bool (*interrupt_isr)(struct kfd_node *dev, 225 const uint32_t *ih_ring_entry, uint32_t *patched_ihre, 226 bool *patched_flag); 227 void (*interrupt_wq)(struct kfd_node *dev, 228 const uint32_t *ih_ring_entry); 229 }; 230 231 struct kfd_device_info { 232 uint32_t gfx_target_version; 233 const struct kfd_event_interrupt_class *event_interrupt_class; 234 unsigned int max_pasid_bits; 235 unsigned int max_no_of_hqd; 236 unsigned int doorbell_size; 237 size_t ih_ring_entry_size; 238 uint8_t num_of_watch_points; 239 uint16_t mqd_size_aligned; 240 bool supports_cwsr; 241 bool needs_pci_atomics; 242 uint32_t no_atomic_fw_version; 243 unsigned int num_sdma_queues_per_engine; 244 unsigned int num_reserved_sdma_queues_per_engine; 245 }; 246 247 unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); 248 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev); 249 250 struct kfd_mem_obj { 251 uint32_t range_start; 252 uint32_t range_end; 253 uint64_t gpu_addr; 254 uint32_t *cpu_ptr; 255 void *mem; 256 }; 257 258 struct kfd_vmid_info { 259 uint32_t first_vmid_kfd; 260 uint32_t last_vmid_kfd; 261 uint32_t vmid_num_kfd; 262 }; 263 264 #define MAX_KFD_NODES 8 265 266 struct kfd_dev; 267 268 struct kfd_node { 269 unsigned int node_id; 270 struct amdgpu_device *adev; /* Duplicated here along with keeping 271 * a copy in kfd_dev to save a hop 272 */ 273 const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with 274 * keeping a copy in kfd_dev to 275 * save a hop 276 */ 277 struct kfd_vmid_info vm_info; 278 unsigned int id; /* topology stub index */ 279 uint32_t xcc_mask; /* Instance mask of XCCs present */ 280 struct amdgpu_xcp *xcp; 281 282 /* Interrupts */ 283 struct kfifo ih_fifo; 284 struct work_struct interrupt_work; 285 spinlock_t interrupt_lock; 286 287 /* 288 * Interrupts of interest to KFD are copied 289 * from the HW ring into a SW ring. 290 */ 291 bool interrupts_active; 292 uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */ 293 294 /* QCM Device instance */ 295 struct device_queue_manager *dqm; 296 297 /* Global GWS resource shared between processes */ 298 void *gws; 299 300 /* Clients watching SMI events */ 301 struct list_head smi_clients; 302 spinlock_t smi_lock; 303 uint32_t reset_seq_num; 304 305 /* SRAM ECC flag */ 306 atomic_t sram_ecc_flag; 307 308 /*spm process id */ 309 unsigned int spm_pasid; 310 311 /* Maximum process number mapped to HW scheduler */ 312 unsigned int max_proc_per_quantum; 313 314 unsigned int compute_vmid_bitmap; 315 316 struct kfd_local_mem_info local_mem_info; 317 318 struct kfd_dev *kfd; 319 320 /* Track per device allocated watch points */ 321 uint32_t alloc_watch_ids; 322 spinlock_t watch_points_lock; 323 }; 324 325 struct kfd_dev { 326 struct amdgpu_device *adev; 327 328 struct kfd_device_info device_info; 329 330 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells 331 * page used by kernel queue 332 */ 333 334 struct kgd2kfd_shared_resources shared_resources; 335 336 const struct kfd2kgd_calls *kfd2kgd; 337 struct mutex doorbell_mutex; 338 339 void *gtt_mem; 340 uint64_t gtt_start_gpu_addr; 341 void *gtt_start_cpu_ptr; 342 void *gtt_sa_bitmap; 343 struct mutex gtt_sa_lock; 344 unsigned int gtt_sa_chunk_size; 345 unsigned int gtt_sa_num_of_chunks; 346 347 bool init_complete; 348 349 /* Firmware versions */ 350 uint16_t mec_fw_version; 351 uint16_t mec2_fw_version; 352 uint16_t sdma_fw_version; 353 354 /* CWSR */ 355 bool cwsr_enabled; 356 const void *cwsr_isa; 357 unsigned int cwsr_isa_size; 358 359 /* xGMI */ 360 uint64_t hive_id; 361 362 bool pci_atomic_requested; 363 364 /* Compute Profile ref. count */ 365 atomic_t compute_profile; 366 367 struct ida doorbell_ida; 368 unsigned int max_doorbell_slices; 369 370 int noretry; 371 372 struct kfd_node *nodes[MAX_KFD_NODES]; 373 unsigned int num_nodes; 374 375 struct workqueue_struct *ih_wq; 376 377 /* Kernel doorbells for KFD device */ 378 struct amdgpu_bo *doorbells; 379 380 /* bitmap for dynamic doorbell allocation from doorbell object */ 381 unsigned long *doorbell_bitmap; 382 383 /* for dynamic partitioning */ 384 int kfd_dev_lock; 385 386 atomic_t kfd_processes_count; 387 }; 388 389 enum kfd_mempool { 390 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, 391 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, 392 KFD_MEMPOOL_FRAMEBUFFER = 3, 393 }; 394 395 /* Character device interface */ 396 int kfd_chardev_init(void); 397 void kfd_chardev_exit(void); 398 399 /** 400 * enum kfd_unmap_queues_filter - Enum for queue filters. 401 * 402 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 403 * running queues list. 404 * 405 * @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues 406 * in the run list. 407 * 408 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 409 * specific process. 410 * 411 */ 412 enum kfd_unmap_queues_filter { 413 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1, 414 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2, 415 KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3 416 }; 417 418 /** 419 * enum kfd_queue_type - Enum for various queue types. 420 * 421 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. 422 * 423 * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type. 424 * 425 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. 426 * 427 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. 428 * 429 * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface. 430 * 431 * @KFD_QUEUE_TYPE_SDMA_BY_ENG_ID: SDMA user mode queue with target SDMA engine ID. 432 */ 433 enum kfd_queue_type { 434 KFD_QUEUE_TYPE_COMPUTE, 435 KFD_QUEUE_TYPE_SDMA, 436 KFD_QUEUE_TYPE_HIQ, 437 KFD_QUEUE_TYPE_SDMA_XGMI, 438 KFD_QUEUE_TYPE_SDMA_BY_ENG_ID 439 }; 440 441 enum kfd_queue_format { 442 KFD_QUEUE_FORMAT_PM4, 443 KFD_QUEUE_FORMAT_AQL 444 }; 445 446 enum KFD_QUEUE_PRIORITY { 447 KFD_QUEUE_PRIORITY_MINIMUM = 0, 448 KFD_QUEUE_PRIORITY_MAXIMUM = 15 449 }; 450 451 /** 452 * struct queue_properties 453 * 454 * @type: The queue type. 455 * 456 * @queue_id: Queue identifier. 457 * 458 * @queue_address: Queue ring buffer address. 459 * 460 * @queue_size: Queue ring buffer size. 461 * 462 * @priority: Defines the queue priority relative to other queues in the 463 * process. 464 * This is just an indication and HW scheduling may override the priority as 465 * necessary while keeping the relative prioritization. 466 * the priority granularity is from 0 to f which f is the highest priority. 467 * currently all queues are initialized with the highest priority. 468 * 469 * @queue_percent: This field is partially implemented and currently a zero in 470 * this field defines that the queue is non active. 471 * 472 * @read_ptr: User space address which points to the number of dwords the 473 * cp read from the ring buffer. This field updates automatically by the H/W. 474 * 475 * @write_ptr: Defines the number of dwords written to the ring buffer. 476 * 477 * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring 478 * buffer. This field should be similar to write_ptr and the user should 479 * update this field after updating the write_ptr. 480 * 481 * @doorbell_off: The doorbell offset in the doorbell pci-bar. 482 * 483 * @is_interop: Defines if this is a interop queue. Interop queue means that 484 * the queue can access both graphics and compute resources. 485 * 486 * @is_evicted: Defines if the queue is evicted. Only active queues 487 * are evicted, rendering them inactive. 488 * 489 * @is_active: Defines if the queue is active or not. @is_active and 490 * @is_evicted are protected by the DQM lock. 491 * 492 * @is_gws: Defines if the queue has been updated to be GWS-capable or not. 493 * @is_gws should be protected by the DQM lock, since changing it can yield the 494 * possibility of updating DQM state on number of GWS queues. 495 * 496 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid 497 * of the queue. 498 * 499 * This structure represents the queue properties for each queue no matter if 500 * it's user mode or kernel mode queue. 501 * 502 */ 503 504 struct queue_properties { 505 enum kfd_queue_type type; 506 enum kfd_queue_format format; 507 unsigned int queue_id; 508 uint64_t queue_address; 509 uint64_t queue_size; 510 uint64_t metadata_queue_size; 511 uint32_t priority; 512 uint32_t queue_percent; 513 void __user *read_ptr; 514 void __user *write_ptr; 515 void __iomem *doorbell_ptr; 516 uint32_t doorbell_off; 517 bool is_interop; 518 bool is_evicted; 519 bool is_suspended; 520 bool is_being_destroyed; 521 bool is_active; 522 bool is_gws; 523 uint32_t pm4_target_xcc; 524 bool is_dbg_wa; 525 bool is_user_cu_masked; 526 /* Not relevant for user mode queues in cp scheduling */ 527 unsigned int vmid; 528 /* Relevant only for sdma queues*/ 529 uint32_t sdma_engine_id; 530 uint32_t sdma_queue_id; 531 uint32_t sdma_vm_addr; 532 /* Relevant only for VI */ 533 uint64_t eop_ring_buffer_address; 534 uint32_t eop_ring_buffer_size; 535 uint64_t ctx_save_restore_area_address; 536 uint32_t ctx_save_restore_area_size; 537 uint32_t ctl_stack_size; 538 uint64_t tba_addr; 539 uint64_t tma_addr; 540 uint64_t exception_status; 541 542 struct amdgpu_bo *wptr_bo; 543 struct amdgpu_bo *rptr_bo; 544 struct amdgpu_bo *ring_bo; 545 struct amdgpu_bo *eop_buf_bo; 546 struct amdgpu_bo *cwsr_bo; 547 }; 548 549 #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ 550 (q).queue_address != 0 && \ 551 (q).queue_percent > 0 && \ 552 !(q).is_evicted && \ 553 !(q).is_suspended) 554 555 enum mqd_update_flag { 556 UPDATE_FLAG_DBG_WA_ENABLE = 1, 557 UPDATE_FLAG_DBG_WA_DISABLE = 2, 558 UPDATE_FLAG_IS_GWS = 4, /* quirk for gfx9 IP */ 559 }; 560 561 struct mqd_update_info { 562 union { 563 struct { 564 uint32_t count; /* Must be a multiple of 32 */ 565 uint32_t *ptr; 566 } cu_mask; 567 }; 568 enum mqd_update_flag update_flag; 569 }; 570 571 /** 572 * struct queue 573 * 574 * @list: Queue linked list. 575 * 576 * @mqd: The queue MQD (memory queue descriptor). 577 * 578 * @mqd_mem_obj: The MQD local gpu memory object. 579 * 580 * @gart_mqd_addr: The MQD gart mc address. 581 * 582 * @properties: The queue properties. 583 * 584 * @mec: Used only in no cp scheduling mode and identifies to micro engine id 585 * that the queue should be executed on. 586 * 587 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe 588 * id. 589 * 590 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. 591 * 592 * @process: The kfd process that created this queue. 593 * 594 * @device: The kfd device that created this queue. 595 * 596 * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL 597 * otherwise. 598 * 599 * This structure represents user mode compute queues. 600 * It contains all the necessary data to handle such queues. 601 * 602 */ 603 604 struct queue { 605 struct list_head list; 606 void *mqd; 607 struct kfd_mem_obj *mqd_mem_obj; 608 uint64_t gart_mqd_addr; 609 struct queue_properties properties; 610 611 uint32_t mec; 612 uint32_t pipe; 613 uint32_t queue; 614 615 unsigned int sdma_id; 616 unsigned int doorbell_id; 617 618 struct kfd_process *process; 619 struct kfd_node *device; 620 void *gws; 621 622 /* procfs */ 623 struct kobject kobj; 624 625 void *gang_ctx_bo; 626 uint64_t gang_ctx_gpu_addr; 627 void *gang_ctx_cpu_ptr; 628 629 struct amdgpu_bo *wptr_bo_gart; 630 }; 631 632 enum KFD_MQD_TYPE { 633 KFD_MQD_TYPE_HIQ = 0, /* for hiq */ 634 KFD_MQD_TYPE_CP, /* for cp queues and diq */ 635 KFD_MQD_TYPE_SDMA, /* for sdma queues */ 636 KFD_MQD_TYPE_DIQ, /* for diq */ 637 KFD_MQD_TYPE_MAX 638 }; 639 640 enum KFD_PIPE_PRIORITY { 641 KFD_PIPE_PRIORITY_CS_LOW = 0, 642 KFD_PIPE_PRIORITY_CS_MEDIUM, 643 KFD_PIPE_PRIORITY_CS_HIGH 644 }; 645 646 struct scheduling_resources { 647 unsigned int vmid_mask; 648 enum kfd_queue_type type; 649 uint64_t queue_mask; 650 uint64_t gws_mask; 651 uint32_t oac_mask; 652 uint32_t gds_heap_base; 653 uint32_t gds_heap_size; 654 }; 655 656 struct process_queue_manager { 657 /* data */ 658 struct kfd_process *process; 659 struct list_head queues; 660 unsigned long *queue_slot_bitmap; 661 }; 662 663 struct qcm_process_device { 664 /* The Device Queue Manager that owns this data */ 665 struct device_queue_manager *dqm; 666 struct process_queue_manager *pqm; 667 /* Queues list */ 668 struct list_head queues_list; 669 struct list_head priv_queue_list; 670 671 unsigned int queue_count; 672 unsigned int vmid; 673 bool is_debug; 674 unsigned int evicted; /* eviction counter, 0=active */ 675 676 /* This flag tells if we should reset all wavefronts on 677 * process termination 678 */ 679 bool reset_wavefronts; 680 681 /* This flag tells us if this process has a GWS-capable 682 * queue that will be mapped into the runlist. It's 683 * possible to request a GWS BO, but not have the queue 684 * currently mapped, and this changes how the MAP_PROCESS 685 * PM4 packet is configured. 686 */ 687 bool mapped_gws_queue; 688 689 /* All the memory management data should be here too */ 690 uint64_t gds_context_area; 691 /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */ 692 uint64_t page_table_base; 693 uint32_t sh_mem_config; 694 uint32_t sh_mem_bases; 695 uint32_t sh_mem_ape1_base; 696 uint32_t sh_mem_ape1_limit; 697 uint32_t gds_size; 698 uint32_t num_gws; 699 uint32_t num_oac; 700 uint32_t sh_hidden_private_base; 701 uint32_t vm_cntx_cntl; 702 703 /* CWSR memory */ 704 struct kgd_mem *cwsr_mem; 705 void *cwsr_kaddr; 706 uint64_t cwsr_base; 707 uint64_t tba_addr; 708 uint64_t tma_addr; 709 710 /* IB memory */ 711 struct kgd_mem *ib_mem; 712 uint64_t ib_base; 713 void *ib_kaddr; 714 715 /* doorbells for kfd process */ 716 struct amdgpu_bo *proc_doorbells; 717 718 /* bitmap for dynamic doorbell allocation from the bo */ 719 unsigned long *doorbell_bitmap; 720 }; 721 722 /* KFD Memory Eviction */ 723 724 /* Approx. wait time before attempting to restore evicted BOs */ 725 #define PROCESS_RESTORE_TIME_MS 100 726 /* Approx. back off time if restore fails due to lack of memory */ 727 #define PROCESS_BACK_OFF_TIME_MS 100 728 /* Approx. time before evicting the process again */ 729 #define PROCESS_ACTIVE_TIME_MS 10 730 731 /* 8 byte handle containing GPU ID in the most significant 4 bytes and 732 * idr_handle in the least significant 4 bytes 733 */ 734 #define MAKE_HANDLE(gpu_id, idr_handle) \ 735 (((uint64_t)(gpu_id) << 32) + idr_handle) 736 #define GET_GPU_ID(handle) (handle >> 32) 737 #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) 738 739 enum kfd_pdd_bound { 740 PDD_UNBOUND = 0, 741 PDD_BOUND, 742 PDD_BOUND_SUSPENDED, 743 }; 744 745 #define MAX_SYSFS_FILENAME_LEN 15 746 747 /* 748 * SDMA counter runs at 100MHz frequency. 749 * We display SDMA activity in microsecond granularity in sysfs. 750 * As a result, the divisor is 100. 751 */ 752 #define SDMA_ACTIVITY_DIVISOR 100 753 754 /* Data that is per-process-per device. */ 755 struct kfd_process_device { 756 /* The device that owns this data. */ 757 struct kfd_node *dev; 758 759 /* The process that owns this kfd_process_device. */ 760 struct kfd_process *process; 761 762 /* per-process-per device QCM data structure */ 763 struct qcm_process_device qpd; 764 765 /*Apertures*/ 766 uint64_t lds_base; 767 uint64_t lds_limit; 768 uint64_t gpuvm_base; 769 uint64_t gpuvm_limit; 770 uint64_t scratch_base; 771 uint64_t scratch_limit; 772 773 /* VM context for GPUVM allocations */ 774 struct file *drm_file; 775 void *drm_priv; 776 777 /* GPUVM allocations storage */ 778 struct idr alloc_idr; 779 780 /* Flag used to tell the pdd has dequeued from the dqm. 781 * This is used to prevent dev->dqm->ops.process_termination() from 782 * being called twice when it is already called in IOMMU callback 783 * function. 784 */ 785 bool already_dequeued; 786 bool runtime_inuse; 787 788 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ 789 enum kfd_pdd_bound bound; 790 791 /* VRAM usage */ 792 atomic64_t vram_usage; 793 struct attribute attr_vram; 794 char vram_filename[MAX_SYSFS_FILENAME_LEN]; 795 796 /* SDMA activity tracking */ 797 uint64_t sdma_past_activity_counter; 798 struct attribute attr_sdma; 799 char sdma_filename[MAX_SYSFS_FILENAME_LEN]; 800 801 /* Eviction activity tracking */ 802 uint64_t last_evict_timestamp; 803 atomic64_t evict_duration_counter; 804 struct attribute attr_evict; 805 806 struct kobject *kobj_stats; 807 808 /* 809 * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process 810 * that is associated with device encoded by "this" struct instance. The 811 * value reflects CU usage by all of the waves launched by this process 812 * on this device. A very important property of occupancy parameter is 813 * that its value is a snapshot of current use. 814 * 815 * Following is to be noted regarding how this parameter is reported: 816 * 817 * The number of waves that a CU can launch is limited by couple of 818 * parameters. These are encoded by struct amdgpu_cu_info instance 819 * that is part of every device definition. For GFX9 devices this 820 * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves 821 * do not use scratch memory and 32 waves (max_scratch_slots_per_cu) 822 * when they do use scratch memory. This could change for future 823 * devices and therefore this example should be considered as a guide. 824 * 825 * All CU's of a device are available for the process. This may not be true 826 * under certain conditions - e.g. CU masking. 827 * 828 * Finally number of CU's that are occupied by a process is affected by both 829 * number of CU's a device has along with number of other competing processes 830 */ 831 struct attribute attr_cu_occupancy; 832 833 /* sysfs counters for GPU retry fault and page migration tracking */ 834 struct kobject *kobj_counters; 835 struct attribute attr_faults; 836 struct attribute attr_page_in; 837 struct attribute attr_page_out; 838 uint64_t faults; 839 uint64_t page_in; 840 uint64_t page_out; 841 842 /* Exception code status*/ 843 uint64_t exception_status; 844 void *vm_fault_exc_data; 845 size_t vm_fault_exc_data_size; 846 847 /* Tracks debug per-vmid request settings */ 848 uint32_t spi_dbg_override; 849 uint32_t spi_dbg_launch_mode; 850 uint32_t watch_points[4]; 851 uint32_t alloc_watch_ids; 852 853 /* 854 * If this process has been checkpointed before, then the user 855 * application will use the original gpu_id on the 856 * checkpointed node to refer to this device. 857 */ 858 uint32_t user_gpu_id; 859 860 void *proc_ctx_bo; 861 uint64_t proc_ctx_gpu_addr; 862 void *proc_ctx_cpu_ptr; 863 864 /* Tracks queue reset status */ 865 bool has_reset_queue; 866 867 u32 pasid; 868 }; 869 870 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 871 872 struct svm_range_list { 873 struct mutex lock; 874 struct rb_root_cached objects; 875 struct list_head list; 876 struct work_struct deferred_list_work; 877 struct list_head deferred_range_list; 878 struct list_head criu_svm_metadata_list; 879 spinlock_t deferred_list_lock; 880 atomic_t evicted_ranges; 881 atomic_t drain_pagefaults; 882 struct delayed_work restore_work; 883 DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); 884 struct task_struct *faulting_task; 885 /* check point ts decides if page fault recovery need be dropped */ 886 uint64_t checkpoint_ts[MAX_GPU_INSTANCE]; 887 888 /* Default granularity to use in buffer migration 889 * and restoration of backing memory while handling 890 * recoverable page faults 891 */ 892 uint8_t default_granularity; 893 }; 894 895 /* Process data */ 896 struct kfd_process { 897 /* 898 * kfd_process are stored in an mm_struct*->kfd_process* 899 * hash table (kfd_processes in kfd_process.c) 900 */ 901 struct hlist_node kfd_processes; 902 903 /* 904 * Opaque pointer to mm_struct. We don't hold a reference to 905 * it so it should never be dereferenced from here. This is 906 * only used for looking up processes by their mm. 907 */ 908 void *mm; 909 910 struct kref ref; 911 struct work_struct release_work; 912 913 struct mutex mutex; 914 915 /* 916 * In any process, the thread that started main() is the lead 917 * thread and outlives the rest. 918 * It is here because amd_iommu_bind_pasid wants a task_struct. 919 * It can also be used for safely getting a reference to the 920 * mm_struct of the process. 921 */ 922 struct task_struct *lead_thread; 923 924 /* We want to receive a notification when the mm_struct is destroyed */ 925 struct mmu_notifier mmu_notifier; 926 927 /* 928 * Array of kfd_process_device pointers, 929 * one for each device the process is using. 930 */ 931 struct kfd_process_device *pdds[MAX_GPU_INSTANCE]; 932 uint32_t n_pdds; 933 934 struct process_queue_manager pqm; 935 936 /*Is the user space process 32 bit?*/ 937 bool is_32bit_user_mode; 938 939 /* Event-related data */ 940 struct mutex event_mutex; 941 /* Event ID allocator and lookup */ 942 struct idr event_idr; 943 /* Event page */ 944 u64 signal_handle; 945 struct kfd_signal_page *signal_page; 946 size_t signal_mapped_size; 947 size_t signal_event_count; 948 bool signal_event_limit_reached; 949 950 /* Information used for memory eviction */ 951 void *kgd_process_info; 952 /* Eviction fence that is attached to all the BOs of this process. The 953 * fence will be triggered during eviction and new one will be created 954 * during restore 955 */ 956 struct dma_fence __rcu *ef; 957 958 /* Work items for evicting and restoring BOs */ 959 struct delayed_work eviction_work; 960 struct delayed_work restore_work; 961 /* seqno of the last scheduled eviction */ 962 unsigned int last_eviction_seqno; 963 /* Approx. the last timestamp (in jiffies) when the process was 964 * restored after an eviction 965 */ 966 unsigned long last_restore_timestamp; 967 968 /* Indicates device process is debug attached with reserved vmid. */ 969 bool debug_trap_enabled; 970 971 /* per-process-per device debug event fd file */ 972 struct file *dbg_ev_file; 973 974 /* If the process is a kfd debugger, we need to know so we can clean 975 * up at exit time. If a process enables debugging on itself, it does 976 * its own clean-up, so we don't set the flag here. We track this by 977 * counting the number of processes this process is debugging. 978 */ 979 atomic_t debugged_process_count; 980 981 /* If the process is a debugged, this is the debugger process */ 982 struct kfd_process *debugger_process; 983 984 /* Kobj for our procfs */ 985 struct kobject *kobj; 986 struct kobject *kobj_queues; 987 struct attribute attr_pasid; 988 989 /* Keep track cwsr init */ 990 bool has_cwsr; 991 992 /* Exception code enable mask and status */ 993 uint64_t exception_enable_mask; 994 uint64_t exception_status; 995 996 /* Used to drain stale interrupts */ 997 wait_queue_head_t wait_irq_drain; 998 bool irq_drain_is_open; 999 1000 /* shared virtual memory registered by this process */ 1001 struct svm_range_list svms; 1002 1003 bool xnack_enabled; 1004 1005 /* Work area for debugger event writer worker. */ 1006 struct work_struct debug_event_workarea; 1007 1008 /* Tracks debug per-vmid request for debug flags */ 1009 u32 dbg_flags; 1010 1011 atomic_t poison; 1012 /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ 1013 bool queues_paused; 1014 1015 /* Tracks runtime enable status */ 1016 struct semaphore runtime_enable_sema; 1017 bool is_runtime_retry; 1018 struct kfd_runtime_info runtime_info; 1019 1020 /* if gpu page fault sent to KFD */ 1021 bool gpu_page_fault; 1022 1023 /*kfd context id */ 1024 u16 context_id; 1025 1026 /* The primary kfd_process allocating IDs for its secondary kfd_process, 0 for primary kfd_process */ 1027 struct ida id_table; 1028 1029 }; 1030 1031 #define KFD_PROCESS_TABLE_SIZE 8 /* bits: 256 entries */ 1032 #define KFD_CONTEXT_ID_PRIMARY 0xFFFF 1033 #define KFD_CONTEXT_ID_MIN 0 1034 1035 extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 1036 extern struct srcu_struct kfd_processes_srcu; 1037 1038 /** 1039 * typedef amdkfd_ioctl_t - typedef for ioctl function pointer. 1040 * 1041 * @filep: pointer to file structure. 1042 * @p: amdkfd process pointer. 1043 * @data: pointer to arg that was copied from user. 1044 * 1045 * Return: returns ioctl completion code. 1046 */ 1047 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 1048 void *data); 1049 1050 struct amdkfd_ioctl_desc { 1051 unsigned int cmd; 1052 int flags; 1053 amdkfd_ioctl_t *func; 1054 unsigned int cmd_drv; 1055 const char *name; 1056 }; 1057 bool kfd_dev_is_large_bar(struct kfd_node *dev); 1058 1059 struct kfd_process *create_process(const struct task_struct *thread, bool primary); 1060 int kfd_process_create_wq(void); 1061 void kfd_process_destroy_wq(void); 1062 void kfd_cleanup_processes(void); 1063 struct kfd_process *kfd_create_process(struct task_struct *thread); 1064 int kfd_create_process_sysfs(struct kfd_process *process); 1065 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid, 1066 struct kfd_process_device **pdd); 1067 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); 1068 struct kfd_process *kfd_lookup_process_by_id(const struct mm_struct *mm, u16 id); 1069 1070 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); 1071 int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, 1072 uint32_t *gpuid, uint32_t *gpuidx); 1073 static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, 1074 uint32_t gpuidx, uint32_t *gpuid) { 1075 return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; 1076 } 1077 static inline struct kfd_process_device *kfd_process_device_from_gpuidx( 1078 struct kfd_process *p, uint32_t gpuidx) { 1079 return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL; 1080 } 1081 1082 void kfd_unref_process(struct kfd_process *p); 1083 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger); 1084 int kfd_process_restore_queues(struct kfd_process *p); 1085 void kfd_suspend_all_processes(void); 1086 int kfd_resume_all_processes(void); 1087 1088 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process, 1089 uint32_t gpu_id); 1090 1091 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); 1092 1093 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 1094 struct file *drm_file); 1095 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, 1096 struct kfd_process *p); 1097 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, 1098 struct kfd_process *p); 1099 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, 1100 struct kfd_process *p); 1101 1102 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); 1103 1104 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, 1105 struct vm_area_struct *vma); 1106 void kfd_process_notifier_release_internal(struct kfd_process *p); 1107 1108 /* KFD process API for creating and translating handles */ 1109 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 1110 void *mem); 1111 void *kfd_process_device_translate_handle(struct kfd_process_device *p, 1112 int handle); 1113 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 1114 int handle); 1115 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid); 1116 1117 /* PASIDs */ 1118 int kfd_pasid_init(void); 1119 void kfd_pasid_exit(void); 1120 u32 kfd_pasid_alloc(void); 1121 void kfd_pasid_free(u32 pasid); 1122 1123 /* Doorbells */ 1124 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 1125 int kfd_doorbell_init(struct kfd_dev *kfd); 1126 void kfd_doorbell_fini(struct kfd_dev *kfd); 1127 int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, 1128 struct vm_area_struct *vma); 1129 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 1130 unsigned int *doorbell_off); 1131 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 1132 u32 read_kernel_doorbell(u32 __iomem *db); 1133 void write_kernel_doorbell(void __iomem *db, u32 value); 1134 void write_kernel_doorbell64(void __iomem *db, u64 value); 1135 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 1136 struct kfd_process_device *pdd, 1137 unsigned int doorbell_id); 1138 phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd); 1139 int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 1140 struct kfd_process_device *pdd); 1141 void kfd_free_process_doorbells(struct kfd_dev *kfd, 1142 struct kfd_process_device *pdd); 1143 /* GTT Sub-Allocator */ 1144 1145 int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, 1146 struct kfd_mem_obj **mem_obj); 1147 1148 int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj); 1149 1150 extern struct device *kfd_device; 1151 1152 /* KFD's procfs */ 1153 void kfd_procfs_init(void); 1154 void kfd_procfs_shutdown(void); 1155 int kfd_procfs_add_queue(struct queue *q); 1156 void kfd_procfs_del_queue(struct queue *q); 1157 1158 /* Topology */ 1159 int kfd_topology_init(void); 1160 void kfd_topology_shutdown(void); 1161 int kfd_topology_add_device(struct kfd_node *gpu); 1162 int kfd_topology_remove_device(struct kfd_node *gpu); 1163 struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 1164 uint32_t proximity_domain); 1165 struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( 1166 uint32_t proximity_domain); 1167 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); 1168 struct kfd_node *kfd_device_by_id(uint32_t gpu_id); 1169 static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id, 1170 uint32_t vmid) 1171 { 1172 return (node->interrupt_bitmap & (1 << node_id)) != 0 && 1173 (node->compute_vmid_bitmap & (1 << vmid)) != 0; 1174 } 1175 static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, 1176 uint32_t node_id, uint32_t vmid) { 1177 struct kfd_dev *dev = adev->kfd.dev; 1178 uint32_t i; 1179 1180 /* 1181 * On multi-aid system, attempt per-node matching. Otherwise, 1182 * fall back to the first node. 1183 */ 1184 if (!amdgpu_is_multi_aid(adev)) 1185 return dev->nodes[0]; 1186 1187 for (i = 0; i < dev->num_nodes; i++) 1188 if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid)) 1189 return dev->nodes[i]; 1190 1191 return NULL; 1192 } 1193 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); 1194 int kfd_numa_node_to_apic_id(int numa_node_id); 1195 uint32_t kfd_gpu_node_num(void); 1196 1197 /* Interrupts */ 1198 #define KFD_IRQ_FENCE_CLIENTID 0xff 1199 #define KFD_IRQ_FENCE_SOURCEID 0xff 1200 #define KFD_IRQ_IS_FENCE(client, source) \ 1201 ((client) == KFD_IRQ_FENCE_CLIENTID && \ 1202 (source) == KFD_IRQ_FENCE_SOURCEID) 1203 int kfd_interrupt_init(struct kfd_node *dev); 1204 void kfd_interrupt_exit(struct kfd_node *dev); 1205 bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); 1206 bool interrupt_is_wanted(struct kfd_node *dev, 1207 const uint32_t *ih_ring_entry, 1208 uint32_t *patched_ihre, bool *flag); 1209 int kfd_process_drain_interrupts(struct kfd_process_device *pdd); 1210 void kfd_process_close_interrupt_drain(unsigned int pasid); 1211 1212 /* amdkfd Apertures */ 1213 int kfd_init_apertures(struct kfd_process *process); 1214 1215 void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 1216 uint64_t tba_addr, 1217 uint64_t tma_addr); 1218 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd, 1219 bool enabled); 1220 1221 /* CWSR initialization */ 1222 int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep); 1223 1224 /* CRIU */ 1225 /* 1226 * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private 1227 * structures: 1228 * kfd_criu_process_priv_data 1229 * kfd_criu_device_priv_data 1230 * kfd_criu_bo_priv_data 1231 * kfd_criu_queue_priv_data 1232 * kfd_criu_event_priv_data 1233 * kfd_criu_svm_range_priv_data 1234 */ 1235 1236 #define KFD_CRIU_PRIV_VERSION 1 1237 1238 struct kfd_criu_process_priv_data { 1239 uint32_t version; 1240 uint32_t xnack_mode; 1241 }; 1242 1243 struct kfd_criu_device_priv_data { 1244 /* For future use */ 1245 uint64_t reserved; 1246 }; 1247 1248 struct kfd_criu_bo_priv_data { 1249 uint64_t user_addr; 1250 uint32_t idr_handle; 1251 uint32_t mapped_gpuids[MAX_GPU_INSTANCE]; 1252 }; 1253 1254 /* 1255 * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data, 1256 * kfd_criu_svm_range_priv_data is the object type 1257 */ 1258 enum kfd_criu_object_type { 1259 KFD_CRIU_OBJECT_TYPE_QUEUE, 1260 KFD_CRIU_OBJECT_TYPE_EVENT, 1261 KFD_CRIU_OBJECT_TYPE_SVM_RANGE, 1262 }; 1263 1264 struct kfd_criu_svm_range_priv_data { 1265 uint32_t object_type; 1266 uint64_t start_addr; 1267 uint64_t size; 1268 /* Variable length array of attributes */ 1269 struct kfd_ioctl_svm_attribute attrs[]; 1270 }; 1271 1272 struct kfd_criu_queue_priv_data { 1273 uint32_t object_type; 1274 uint64_t q_address; 1275 uint64_t q_size; 1276 uint64_t read_ptr_addr; 1277 uint64_t write_ptr_addr; 1278 uint64_t doorbell_off; 1279 uint64_t eop_ring_buffer_address; 1280 uint64_t ctx_save_restore_area_address; 1281 uint32_t gpu_id; 1282 uint32_t type; 1283 uint32_t format; 1284 uint32_t q_id; 1285 uint32_t priority; 1286 uint32_t q_percent; 1287 uint32_t doorbell_id; 1288 uint32_t gws; 1289 uint32_t sdma_id; 1290 uint32_t eop_ring_buffer_size; 1291 uint32_t ctx_save_restore_area_size; 1292 uint32_t ctl_stack_size; 1293 uint32_t mqd_size; 1294 }; 1295 1296 struct kfd_criu_event_priv_data { 1297 uint32_t object_type; 1298 uint64_t user_handle; 1299 uint32_t event_id; 1300 uint32_t auto_reset; 1301 uint32_t type; 1302 uint32_t signaled; 1303 1304 union { 1305 struct kfd_hsa_memory_exception_data memory_exception_data; 1306 struct kfd_hsa_hw_exception_data hw_exception_data; 1307 }; 1308 }; 1309 1310 int kfd_process_get_queue_info(struct kfd_process *p, 1311 uint32_t *num_queues, 1312 uint64_t *priv_data_sizes); 1313 1314 int kfd_criu_checkpoint_queues(struct kfd_process *p, 1315 uint8_t __user *user_priv_data, 1316 uint64_t *priv_data_offset); 1317 1318 int kfd_criu_restore_queue(struct kfd_process *p, 1319 uint8_t __user *user_priv_data, 1320 uint64_t *priv_data_offset, 1321 uint64_t max_priv_data_size); 1322 1323 int kfd_criu_checkpoint_events(struct kfd_process *p, 1324 uint8_t __user *user_priv_data, 1325 uint64_t *priv_data_offset); 1326 1327 int kfd_criu_restore_event(struct file *devkfd, 1328 struct kfd_process *p, 1329 uint8_t __user *user_priv_data, 1330 uint64_t *priv_data_offset, 1331 uint64_t max_priv_data_size); 1332 /* CRIU - End */ 1333 1334 /* Queue Context Management */ 1335 int init_queue(struct queue **q, const struct queue_properties *properties); 1336 void uninit_queue(struct queue *q); 1337 void print_queue_properties(struct queue_properties *q); 1338 void print_queue(struct queue *q); 1339 int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo, 1340 u64 expected_size); 1341 void kfd_queue_buffer_put(struct amdgpu_bo **bo); 1342 int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties); 1343 int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties); 1344 void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo); 1345 int kfd_queue_unref_bo_vas(struct kfd_process_device *pdd, 1346 struct queue_properties *properties); 1347 void kfd_queue_ctx_save_restore_size(struct kfd_topology_device *dev); 1348 1349 struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, 1350 struct kfd_node *dev); 1351 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, 1352 struct kfd_node *dev); 1353 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, 1354 struct kfd_node *dev); 1355 struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, 1356 struct kfd_node *dev); 1357 struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, 1358 struct kfd_node *dev); 1359 struct mqd_manager *mqd_manager_init_v12(enum KFD_MQD_TYPE type, 1360 struct kfd_node *dev); 1361 struct mqd_manager *mqd_manager_init_v12_1(enum KFD_MQD_TYPE type, 1362 struct kfd_node *dev); 1363 struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); 1364 void device_queue_manager_uninit(struct device_queue_manager *dqm); 1365 struct kernel_queue *kernel_queue_init(struct kfd_node *dev, 1366 enum kfd_queue_type type); 1367 void kernel_queue_uninit(struct kernel_queue *kq); 1368 int kfd_evict_process_device(struct kfd_process_device *pdd); 1369 int kfd_dqm_suspend_bad_queue_mes(struct kfd_node *knode, u32 pasid, u32 doorbell_id); 1370 1371 /* Process Queue Manager */ 1372 struct process_queue_node { 1373 struct queue *q; 1374 struct kernel_queue *kq; 1375 struct list_head process_queue_list; 1376 }; 1377 1378 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); 1379 void kfd_process_dequeue_from_all_devices(struct kfd_process *p); 1380 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); 1381 void pqm_uninit(struct process_queue_manager *pqm); 1382 int pqm_create_queue(struct process_queue_manager *pqm, 1383 struct kfd_node *dev, 1384 struct queue_properties *properties, 1385 unsigned int *qid, 1386 const struct kfd_criu_queue_priv_data *q_data, 1387 const void *restore_mqd, 1388 const void *restore_ctl_stack, 1389 uint32_t *p_doorbell_offset_in_process); 1390 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); 1391 int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid, 1392 struct queue_properties *p); 1393 int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid, 1394 struct mqd_update_info *minfo); 1395 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, 1396 void *gws); 1397 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm, 1398 unsigned int qid); 1399 int pqm_get_wave_state(struct process_queue_manager *pqm, 1400 unsigned int qid, 1401 void __user *ctl_stack, 1402 u32 *ctl_stack_used_size, 1403 u32 *save_area_used_size); 1404 int pqm_get_queue_snapshot(struct process_queue_manager *pqm, 1405 uint64_t exception_clear_mask, 1406 void __user *buf, 1407 int *num_qss_entries, 1408 uint32_t *entry_size); 1409 1410 int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm, 1411 uint64_t fence_value, 1412 unsigned int timeout_ms); 1413 1414 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm, 1415 unsigned int qid, 1416 u32 *mqd_size, 1417 u32 *ctl_stack_size); 1418 /* Packet Manager */ 1419 1420 #define KFD_FENCE_COMPLETED (100) 1421 #define KFD_FENCE_INIT (10) 1422 1423 /** 1424 * enum kfd_config_dequeue_wait_counts_cmd - Command for configuring 1425 * dequeue wait counts. 1426 * 1427 * @KFD_DEQUEUE_WAIT_INIT: Set optimized dequeue wait counts for a 1428 * certain ASICs. For these ASICs, this is default value used by RESET 1429 * @KFD_DEQUEUE_WAIT_RESET: Reset dequeue wait counts to the optimized value 1430 * for certain ASICs. For others set it to default hardware reset value 1431 * @KFD_DEQUEUE_WAIT_SET_SCH_WAVE: Set context switch latency wait 1432 * 1433 */ 1434 enum kfd_config_dequeue_wait_counts_cmd { 1435 KFD_DEQUEUE_WAIT_INIT = 1, 1436 KFD_DEQUEUE_WAIT_RESET = 2, 1437 KFD_DEQUEUE_WAIT_SET_SCH_WAVE = 3 1438 }; 1439 1440 1441 struct packet_manager { 1442 struct device_queue_manager *dqm; 1443 struct kernel_queue *priv_queue; 1444 struct mutex lock; 1445 bool allocated; 1446 struct kfd_mem_obj *ib_buffer_obj; 1447 unsigned int ib_size_bytes; 1448 bool is_over_subscription; 1449 1450 const struct packet_manager_funcs *pmf; 1451 }; 1452 1453 struct packet_manager_funcs { 1454 /* Support ASIC-specific packet formats for PM4 packets */ 1455 int (*map_process)(struct packet_manager *pm, uint32_t *buffer, 1456 struct qcm_process_device *qpd); 1457 int (*runlist)(struct packet_manager *pm, uint32_t *buffer, 1458 uint64_t ib, size_t ib_size_in_dwords, bool chain); 1459 int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, 1460 struct scheduling_resources *res); 1461 int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, 1462 struct queue *q, bool is_static); 1463 int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, 1464 enum kfd_unmap_queues_filter mode, 1465 uint32_t filter_param, bool reset); 1466 int (*config_dequeue_wait_counts)(struct packet_manager *pm, uint32_t *buffer, 1467 enum kfd_config_dequeue_wait_counts_cmd cmd, uint32_t value); 1468 int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 1469 uint64_t fence_address, uint64_t fence_value); 1470 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); 1471 1472 /* Packet sizes */ 1473 int map_process_size; 1474 int runlist_size; 1475 int set_resources_size; 1476 int map_queues_size; 1477 int unmap_queues_size; 1478 int config_dequeue_wait_counts_size; 1479 int query_status_size; 1480 int release_mem_size; 1481 }; 1482 1483 extern const struct packet_manager_funcs kfd_vi_pm_funcs; 1484 extern const struct packet_manager_funcs kfd_v9_pm_funcs; 1485 extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs; 1486 1487 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1488 void pm_uninit(struct packet_manager *pm); 1489 int pm_send_set_resources(struct packet_manager *pm, 1490 struct scheduling_resources *res); 1491 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); 1492 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 1493 uint64_t fence_value); 1494 1495 int pm_send_unmap_queue(struct packet_manager *pm, 1496 enum kfd_unmap_queues_filter mode, 1497 uint32_t filter_param, bool reset); 1498 1499 void pm_release_ib(struct packet_manager *pm); 1500 1501 int pm_config_dequeue_wait_counts(struct packet_manager *pm, 1502 enum kfd_config_dequeue_wait_counts_cmd cmd, 1503 uint32_t wait_counts_config); 1504 1505 /* Following PM funcs can be shared among VI and AI */ 1506 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); 1507 1508 uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 1509 1510 /* Events */ 1511 extern const struct kfd_event_interrupt_class event_interrupt_class_cik; 1512 extern const struct kfd_event_interrupt_class event_interrupt_class_v9; 1513 extern const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3; 1514 extern const struct kfd_event_interrupt_class event_interrupt_class_v10; 1515 extern const struct kfd_event_interrupt_class event_interrupt_class_v11; 1516 extern const struct kfd_event_interrupt_class event_interrupt_class_v12_1; 1517 1518 extern const struct kfd_device_global_init_class device_global_init_class_cik; 1519 1520 int kfd_event_init_process(struct kfd_process *p); 1521 void kfd_event_free_process(struct kfd_process *p); 1522 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); 1523 int kfd_wait_on_events(struct kfd_process *p, 1524 uint32_t num_events, void __user *data, 1525 bool all, uint32_t *user_timeout_ms, 1526 uint32_t *wait_result); 1527 void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, 1528 uint32_t valid_id_bits); 1529 void kfd_signal_hw_exception_event(u32 pasid); 1530 int kfd_set_event(struct kfd_process *p, uint32_t event_id); 1531 int kfd_reset_event(struct kfd_process *p, uint32_t event_id); 1532 int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset); 1533 1534 int kfd_event_create(struct file *devkfd, struct kfd_process *p, 1535 uint32_t event_type, bool auto_reset, uint32_t node_id, 1536 uint32_t *event_id, uint32_t *event_trigger_data, 1537 uint64_t *event_page_offset, uint32_t *event_slot_index); 1538 1539 int kfd_get_num_events(struct kfd_process *p); 1540 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); 1541 1542 void kfd_signal_vm_fault_event_with_userptr(struct kfd_process *p, uint64_t gpu_va); 1543 1544 void kfd_signal_vm_fault_event(struct kfd_process_device *pdd, 1545 struct kfd_vm_fault_info *info, 1546 struct kfd_hsa_memory_exception_data *data); 1547 1548 void kfd_signal_reset_event(struct kfd_node *dev); 1549 1550 void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); 1551 void kfd_signal_process_terminate_event(struct kfd_process *p); 1552 1553 static inline void kfd_flush_tlb(struct kfd_process_device *pdd, 1554 enum TLB_FLUSH_TYPE type) 1555 { 1556 struct amdgpu_device *adev = pdd->dev->adev; 1557 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); 1558 1559 amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask); 1560 } 1561 1562 static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) 1563 { 1564 return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) || 1565 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || 1566 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); 1567 } 1568 1569 int kfd_send_exception_to_runtime(struct kfd_process *p, 1570 unsigned int queue_id, 1571 uint64_t error_reason); 1572 bool kfd_is_locked(struct kfd_dev *kfd); 1573 1574 /* Compute profile */ 1575 void kfd_inc_compute_active(struct kfd_node *dev); 1576 void kfd_dec_compute_active(struct kfd_node *dev); 1577 1578 /* Cgroup Support */ 1579 /* Check with device cgroup if @kfd device is accessible */ 1580 static inline int kfd_devcgroup_check_permission(struct kfd_node *node) 1581 { 1582 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 1583 struct drm_device *ddev; 1584 1585 if (node->xcp) 1586 ddev = node->xcp->ddev; 1587 else 1588 ddev = adev_to_drm(node->adev); 1589 1590 return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, 1591 ddev->render->index, 1592 DEVCG_ACC_WRITE | DEVCG_ACC_READ); 1593 #else 1594 return 0; 1595 #endif 1596 } 1597 1598 static inline bool kfd_is_first_node(struct kfd_node *node) 1599 { 1600 return (node == node->kfd->nodes[0]); 1601 } 1602 1603 /* Debugfs */ 1604 #if defined(CONFIG_DEBUG_FS) 1605 1606 void kfd_debugfs_init(void); 1607 void kfd_debugfs_fini(void); 1608 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); 1609 int pqm_debugfs_mqds(struct seq_file *m, void *data); 1610 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); 1611 int dqm_debugfs_hqds(struct seq_file *m, void *data); 1612 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); 1613 int pm_debugfs_runlist(struct seq_file *m, void *data); 1614 1615 int kfd_debugfs_hang_hws(struct kfd_node *dev); 1616 int pm_debugfs_hang_hws(struct packet_manager *pm); 1617 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); 1618 1619 void kfd_debugfs_add_process(struct kfd_process *p); 1620 void kfd_debugfs_remove_process(struct kfd_process *p); 1621 1622 #else 1623 1624 static inline void kfd_debugfs_init(void) {} 1625 static inline void kfd_debugfs_fini(void) {} 1626 static inline void kfd_debugfs_add_process(struct kfd_process *p) {} 1627 static inline void kfd_debugfs_remove_process(struct kfd_process *p) {} 1628 1629 #endif 1630 1631 #endif 1632