1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 /* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */ 24 25 #ifndef AMDGPU_AMDKFD_H_INCLUDED 26 #define AMDGPU_AMDKFD_H_INCLUDED 27 28 #include <linux/list.h> 29 #include <linux/types.h> 30 #include <linux/mm.h> 31 #include <linux/kthread.h> 32 #include <linux/workqueue.h> 33 #include <linux/mmu_notifier.h> 34 #include <linux/memremap.h> 35 #include <kgd_kfd_interface.h> 36 #include <drm/drm_client.h> 37 #include "amdgpu_sync.h" 38 #include "amdgpu_vm.h" 39 #include "amdgpu_xcp.h" 40 41 extern uint64_t amdgpu_amdkfd_total_mem_size; 42 43 enum TLB_FLUSH_TYPE { 44 TLB_FLUSH_LEGACY = 0, 45 TLB_FLUSH_LIGHTWEIGHT, 46 TLB_FLUSH_HEAVYWEIGHT 47 }; 48 49 struct amdgpu_device; 50 51 enum kfd_mem_attachment_type { 52 KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */ 53 KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */ 54 KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */ 55 KFD_MEM_ATT_SG /* Tag to DMA map SG BOs */ 56 }; 57 58 struct kfd_mem_attachment { 59 struct list_head list; 60 enum kfd_mem_attachment_type type; 61 bool is_mapped; 62 struct amdgpu_bo_va *bo_va; 63 struct amdgpu_device *adev; 64 uint64_t va; 65 uint64_t pte_flags; 66 }; 67 68 struct kgd_mem { 69 struct mutex lock; 70 struct amdgpu_bo *bo; 71 struct dma_buf *dmabuf; 72 struct hmm_range *range; 73 struct list_head attachments; 74 /* protected by amdkfd_process_info.lock */ 75 struct list_head validate_list; 76 uint32_t domain; 77 unsigned int mapped_to_gpu_memory; 78 uint64_t va; 79 80 uint32_t alloc_flags; 81 82 uint32_t invalid; 83 struct amdkfd_process_info *process_info; 84 85 struct amdgpu_sync sync; 86 87 uint32_t gem_handle; 88 bool aql_queue; 89 bool is_imported; 90 }; 91 92 /* KFD Memory Eviction */ 93 struct amdgpu_amdkfd_fence { 94 struct dma_fence base; 95 struct mm_struct *mm; 96 spinlock_t lock; 97 char timeline_name[TASK_COMM_LEN]; 98 struct svm_range_bo *svm_bo; 99 }; 100 101 struct amdgpu_kfd_dev { 102 struct kfd_dev *dev; 103 int64_t vram_used[MAX_XCP]; 104 uint64_t vram_used_aligned[MAX_XCP]; 105 bool init_complete; 106 struct work_struct reset_work; 107 108 /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ 109 struct dev_pagemap pgmap; 110 111 /* Client for KFD BO GEM handle allocations */ 112 struct drm_client_dev client; 113 }; 114 115 enum kgd_engine_type { 116 KGD_ENGINE_PFP = 1, 117 KGD_ENGINE_ME, 118 KGD_ENGINE_CE, 119 KGD_ENGINE_MEC1, 120 KGD_ENGINE_MEC2, 121 KGD_ENGINE_RLC, 122 KGD_ENGINE_SDMA1, 123 KGD_ENGINE_SDMA2, 124 KGD_ENGINE_MAX 125 }; 126 127 128 struct amdkfd_process_info { 129 /* List head of all VMs that belong to a KFD process */ 130 struct list_head vm_list_head; 131 /* List head for all KFD BOs that belong to a KFD process. */ 132 struct list_head kfd_bo_list; 133 /* List of userptr BOs that are valid or invalid */ 134 struct list_head userptr_valid_list; 135 struct list_head userptr_inval_list; 136 /* Lock to protect kfd_bo_list */ 137 struct mutex lock; 138 139 /* Number of VMs */ 140 unsigned int n_vms; 141 /* Eviction Fence */ 142 struct amdgpu_amdkfd_fence *eviction_fence; 143 144 /* MMU-notifier related fields */ 145 struct mutex notifier_lock; 146 uint32_t evicted_bos; 147 struct delayed_work restore_userptr_work; 148 struct pid *pid; 149 bool block_mmu_notifications; 150 }; 151 152 int amdgpu_amdkfd_init(void); 153 void amdgpu_amdkfd_fini(void); 154 155 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); 156 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); 157 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, 158 const void *ih_ring_entry); 159 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); 160 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); 161 void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); 162 int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev); 163 void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev); 164 int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, 165 enum kgd_engine_type engine, 166 uint32_t vmid, uint64_t gpu_addr, 167 uint32_t *ib_cmd, uint32_t ib_len); 168 void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle); 169 bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); 170 171 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); 172 173 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); 174 175 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); 176 177 void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev); 178 179 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, 180 int queue_bit); 181 182 struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, 183 struct mm_struct *mm, 184 struct svm_range_bo *svm_bo); 185 #if defined(CONFIG_DEBUG_FS) 186 int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data); 187 #endif 188 #if IS_ENABLED(CONFIG_HSA_AMD) 189 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); 190 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); 191 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); 192 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, 193 unsigned long cur_seq, struct kgd_mem *mem); 194 int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, 195 uint32_t domain, 196 struct dma_fence *fence); 197 #else 198 static inline 199 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) 200 { 201 return false; 202 } 203 204 static inline 205 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) 206 { 207 return NULL; 208 } 209 210 static inline 211 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 212 { 213 return 0; 214 } 215 216 static inline 217 int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, 218 unsigned long cur_seq, struct kgd_mem *mem) 219 { 220 return 0; 221 } 222 static inline 223 int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, 224 uint32_t domain, 225 struct dma_fence *fence) 226 { 227 return 0; 228 } 229 #endif 230 /* Shared API */ 231 int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, 232 void **mem_obj, uint64_t *gpu_addr, 233 void **cpu_ptr, bool mqd_gfx9); 234 void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj); 235 int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, 236 void **mem_obj); 237 void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj); 238 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem); 239 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); 240 uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, 241 enum kgd_engine_type type); 242 void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, 243 struct kfd_local_mem_info *mem_info, 244 struct amdgpu_xcp *xcp); 245 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); 246 247 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); 248 int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, 249 struct amdgpu_device **dmabuf_adev, 250 uint64_t *bo_size, void *metadata_buffer, 251 size_t buffer_size, uint32_t *metadata_size, 252 uint32_t *flags, int8_t *xcp_id); 253 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, 254 struct amdgpu_device *src); 255 int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, 256 struct amdgpu_device *src, 257 bool is_min); 258 int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); 259 int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, 260 uint32_t *payload); 261 int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, 262 u32 inst); 263 264 /* Read user wptr from a specified user address space with page fault 265 * disabled. The memory must be pinned and mapped to the hardware when 266 * this is called in hqd_load functions, so it should never fault in 267 * the first place. This resolves a circular lock dependency involving 268 * four locks, including the DQM lock and mmap_lock. 269 */ 270 #define read_user_wptr(mmptr, wptr, dst) \ 271 ({ \ 272 bool valid = false; \ 273 if ((mmptr) && (wptr)) { \ 274 pagefault_disable(); \ 275 if ((mmptr) == current->mm) { \ 276 valid = !get_user((dst), (wptr)); \ 277 } else if (current->flags & PF_KTHREAD) { \ 278 kthread_use_mm(mmptr); \ 279 valid = !get_user((dst), (wptr)); \ 280 kthread_unuse_mm(mmptr); \ 281 } \ 282 pagefault_enable(); \ 283 } \ 284 valid; \ 285 }) 286 287 /* GPUVM API */ 288 #define drm_priv_to_vm(drm_priv) \ 289 (&((struct amdgpu_fpriv *) \ 290 ((struct drm_file *)(drm_priv))->driver_priv)->vm) 291 292 int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev, 293 struct amdgpu_vm *avm, u32 pasid); 294 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, 295 struct amdgpu_vm *avm, 296 void **process_info, 297 struct dma_fence **ef); 298 void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, 299 void *drm_priv); 300 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); 301 size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, 302 uint8_t xcp_id); 303 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 304 struct amdgpu_device *adev, uint64_t va, uint64_t size, 305 void *drm_priv, struct kgd_mem **mem, 306 uint64_t *offset, uint32_t flags, bool criu_resume); 307 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 308 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, 309 uint64_t *size); 310 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev, 311 struct kgd_mem *mem, void *drm_priv); 312 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 313 struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); 314 void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); 315 int amdgpu_amdkfd_gpuvm_sync_memory( 316 struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); 317 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, 318 void **kptr, uint64_t *size); 319 void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem); 320 321 int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo); 322 323 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, 324 struct dma_fence __rcu **ef); 325 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, 326 struct kfd_vm_fault_info *info); 327 int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd, 328 uint64_t va, void *drm_priv, 329 struct kgd_mem **mem, uint64_t *size, 330 uint64_t *mmap_offset); 331 int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, 332 struct dma_buf **dmabuf); 333 void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); 334 int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, 335 struct tile_config *config); 336 void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, 337 bool reset); 338 bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); 339 void amdgpu_amdkfd_block_mmu_notifications(void *p); 340 int amdgpu_amdkfd_criu_resume(void *p); 341 bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev); 342 int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, 343 uint64_t size, u32 alloc_flag, int8_t xcp_id); 344 void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, 345 uint64_t size, u32 alloc_flag, int8_t xcp_id); 346 347 u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id); 348 349 #define KFD_XCP_MEM_ID(adev, xcp_id) \ 350 ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ 351 (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) 352 353 #define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id)) 354 355 356 #if IS_ENABLED(CONFIG_HSA_AMD) 357 void amdgpu_amdkfd_gpuvm_init_mem_limits(void); 358 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 359 struct amdgpu_vm *vm); 360 361 /** 362 * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released 363 * 364 * Allows KFD to release its resources associated with the GEM object. 365 */ 366 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo); 367 void amdgpu_amdkfd_reserve_system_mem(uint64_t size); 368 #else 369 static inline 370 void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 371 { 372 } 373 374 static inline 375 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 376 struct amdgpu_vm *vm) 377 { 378 } 379 380 static inline 381 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) 382 { 383 } 384 #endif 385 386 #if IS_ENABLED(CONFIG_HSA_AMD_SVM) 387 int kgd2kfd_init_zone_device(struct amdgpu_device *adev); 388 #else 389 static inline 390 int kgd2kfd_init_zone_device(struct amdgpu_device *adev) 391 { 392 return 0; 393 } 394 #endif 395 396 /* KGD2KFD callbacks */ 397 int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger); 398 int kgd2kfd_resume_mm(struct mm_struct *mm); 399 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 400 struct dma_fence *fence); 401 #if IS_ENABLED(CONFIG_HSA_AMD) 402 int kgd2kfd_init(void); 403 void kgd2kfd_exit(void); 404 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); 405 bool kgd2kfd_device_init(struct kfd_dev *kfd, 406 const struct kgd2kfd_shared_resources *gpu_resources); 407 void kgd2kfd_device_exit(struct kfd_dev *kfd); 408 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); 409 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); 410 int kgd2kfd_pre_reset(struct kfd_dev *kfd); 411 int kgd2kfd_post_reset(struct kfd_dev *kfd); 412 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); 413 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); 414 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); 415 int kgd2kfd_check_and_lock_kfd(void); 416 void kgd2kfd_unlock_kfd(void); 417 #else 418 static inline int kgd2kfd_init(void) 419 { 420 return -ENOENT; 421 } 422 423 static inline void kgd2kfd_exit(void) 424 { 425 } 426 427 static inline 428 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) 429 { 430 return NULL; 431 } 432 433 static inline 434 bool kgd2kfd_device_init(struct kfd_dev *kfd, 435 const struct kgd2kfd_shared_resources *gpu_resources) 436 { 437 return false; 438 } 439 440 static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) 441 { 442 } 443 444 static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 445 { 446 } 447 448 static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) 449 { 450 return 0; 451 } 452 453 static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd) 454 { 455 return 0; 456 } 457 458 static inline int kgd2kfd_post_reset(struct kfd_dev *kfd) 459 { 460 return 0; 461 } 462 463 static inline 464 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 465 { 466 } 467 468 static inline 469 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) 470 { 471 } 472 473 static inline 474 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) 475 { 476 } 477 478 static inline int kgd2kfd_check_and_lock_kfd(void) 479 { 480 return 0; 481 } 482 483 static inline void kgd2kfd_unlock_kfd(void) 484 { 485 } 486 #endif 487 #endif /* AMDGPU_AMDKFD_H_INCLUDED */ 488