1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #ifndef __AMDGPU_TTM_H__ 25 #define __AMDGPU_TTM_H__ 26 27 #include <linux/dma-direction.h> 28 #include <drm/gpu_scheduler.h> 29 #include <drm/ttm/ttm_placement.h> 30 #include "amdgpu_vram_mgr.h" 31 #include "amdgpu_hmm.h" 32 #include "amdgpu_gmc.h" 33 34 #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) 35 #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) 36 #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) 37 #define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) 38 #define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4) 39 #define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5) 40 #define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6) 41 42 #define AMDGPU_GTT_MAX_TRANSFER_SIZE 1024 43 44 extern const struct attribute_group amdgpu_vram_mgr_attr_group; 45 extern const struct attribute_group amdgpu_gtt_mgr_attr_group; 46 47 struct hmm_range; 48 49 struct amdgpu_gtt_mgr { 50 struct ttm_resource_manager manager; 51 struct drm_mm mm; 52 spinlock_t lock; 53 }; 54 55 struct amdgpu_ttm_buffer_entity { 56 struct drm_sched_entity base; 57 struct mutex lock; 58 struct drm_mm_node gart_node; 59 u64 gart_window_offs[2]; 60 }; 61 62 struct amdgpu_mman { 63 struct ttm_device bdev; 64 struct ttm_pool *ttm_pools; 65 bool initialized; 66 void __iomem *aper_base_kaddr; 67 68 /* buffer handling */ 69 const struct amdgpu_buffer_funcs *buffer_funcs; 70 struct amdgpu_ring *buffer_funcs_ring; 71 bool buffer_funcs_enabled; 72 73 /* @default_entity: for workarounds, has no gart windows */ 74 struct amdgpu_ttm_buffer_entity default_entity; 75 struct amdgpu_ttm_buffer_entity *clear_entities; 76 atomic_t next_clear_entity; 77 u32 num_clear_entities; 78 struct amdgpu_ttm_buffer_entity move_entities[TTM_NUM_MOVE_FENCES]; 79 atomic_t next_move_entity; 80 u32 num_move_entities; 81 82 struct amdgpu_vram_mgr vram_mgr; 83 struct amdgpu_gtt_mgr gtt_mgr; 84 struct ttm_resource_manager preempt_mgr; 85 86 uint64_t stolen_vga_size; 87 struct amdgpu_bo *stolen_vga_memory; 88 uint64_t stolen_extended_size; 89 struct amdgpu_bo *stolen_extended_memory; 90 bool keep_stolen_vga_memory; 91 92 struct amdgpu_bo *stolen_reserved_memory; 93 uint64_t stolen_reserved_offset; 94 uint64_t stolen_reserved_size; 95 96 /* fw reserved memory */ 97 struct amdgpu_bo *fw_reserved_memory; 98 struct amdgpu_bo *fw_reserved_memory_extend; 99 100 /* firmware VRAM reservation */ 101 u64 fw_vram_usage_start_offset; 102 u64 fw_vram_usage_size; 103 struct amdgpu_bo *fw_vram_usage_reserved_bo; 104 void *fw_vram_usage_va; 105 106 /* driver VRAM reservation */ 107 u64 drv_vram_usage_start_offset; 108 u64 drv_vram_usage_size; 109 struct amdgpu_bo *drv_vram_usage_reserved_bo; 110 void *drv_vram_usage_va; 111 112 /* PAGE_SIZE'd BO for process memory r/w over SDMA. */ 113 struct amdgpu_bo *sdma_access_bo; 114 void *sdma_access_ptr; 115 }; 116 117 struct amdgpu_copy_mem { 118 struct ttm_buffer_object *bo; 119 struct ttm_resource *mem; 120 unsigned long offset; 121 }; 122 123 #define AMDGPU_COPY_FLAGS_TMZ (1 << 0) 124 #define AMDGPU_COPY_FLAGS_READ_DECOMPRESSED (1 << 1) 125 #define AMDGPU_COPY_FLAGS_WRITE_COMPRESSED (1 << 2) 126 #define AMDGPU_COPY_FLAGS_MAX_COMPRESSED_SHIFT 3 127 #define AMDGPU_COPY_FLAGS_MAX_COMPRESSED_MASK 0x03 128 #define AMDGPU_COPY_FLAGS_NUMBER_TYPE_SHIFT 5 129 #define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK 0x07 130 #define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT 8 131 #define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK 0x3f 132 #define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT 14 133 #define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK 0x1 134 135 #define AMDGPU_COPY_FLAGS_SET(field, value) \ 136 (((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT) 137 #define AMDGPU_COPY_FLAGS_GET(value, field) \ 138 (((__u32)(value) >> AMDGPU_COPY_FLAGS_##field##_SHIFT) & AMDGPU_COPY_FLAGS_##field##_MASK) 139 140 int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size); 141 void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev); 142 int amdgpu_preempt_mgr_init(struct amdgpu_device *adev); 143 void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev); 144 int amdgpu_vram_mgr_init(struct amdgpu_device *adev); 145 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); 146 147 bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); 148 void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr); 149 150 int amdgpu_gtt_mgr_alloc_entries(struct amdgpu_gtt_mgr *mgr, 151 struct drm_mm_node *mm_node, 152 u64 num_pages, 153 enum drm_mm_insert_mode mode); 154 void amdgpu_gtt_mgr_free_entries(struct amdgpu_gtt_mgr *mgr, 155 struct drm_mm_node *mm_node); 156 uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man); 157 158 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); 159 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, 160 struct ttm_resource *mem, 161 u64 offset, u64 size, 162 struct device *dev, 163 enum dma_data_direction dir, 164 struct sg_table **sgt); 165 void amdgpu_vram_mgr_free_sgt(struct device *dev, 166 enum dma_data_direction dir, 167 struct sg_table *sgt); 168 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr); 169 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, 170 uint64_t start, uint64_t size); 171 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, 172 uint64_t start); 173 void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev); 174 175 bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, 176 struct ttm_resource *res); 177 178 int amdgpu_ttm_init(struct amdgpu_device *adev); 179 void amdgpu_ttm_fini(struct amdgpu_device *adev); 180 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, 181 bool enable); 182 int amdgpu_copy_buffer(struct amdgpu_device *adev, 183 struct amdgpu_ttm_buffer_entity *entity, 184 uint64_t src_offset, 185 uint64_t dst_offset, uint32_t byte_count, 186 struct dma_resv *resv, 187 struct dma_fence **fence, 188 bool vm_needs_flush, uint32_t copy_flags); 189 int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, 190 struct dma_resv *resv, 191 struct dma_fence **fence); 192 int amdgpu_fill_buffer(struct amdgpu_ttm_buffer_entity *entity, 193 struct amdgpu_bo *bo, 194 uint32_t src_data, 195 struct dma_resv *resv, 196 struct dma_fence **f, 197 u64 k_job_id); 198 struct amdgpu_ttm_buffer_entity *amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev); 199 200 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); 201 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); 202 uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); 203 204 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) 205 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, 206 struct amdgpu_hmm_range *range); 207 #else 208 static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, 209 struct amdgpu_hmm_range *range) 210 { 211 return -EPERM; 212 } 213 #endif 214 215 /** 216 * amdgpu_compute_gart_address() - Returns GART address of an entity's window 217 * @gmc: The &struct amdgpu_gmc instance to use 218 * @entity: The &struct amdgpu_ttm_buffer_entity owning the GART window 219 * @index: The window to use (must be 0 or 1) 220 */ 221 static inline u64 amdgpu_compute_gart_address(struct amdgpu_gmc *gmc, 222 struct amdgpu_ttm_buffer_entity *entity, 223 int index) 224 { 225 return gmc->gart_start + entity->gart_window_offs[index]; 226 } 227 228 /** 229 * amdgpu_gtt_node_to_byte_offset() - Returns a byte offset of a gtt node 230 */ 231 static inline u64 amdgpu_gtt_node_to_byte_offset(const struct drm_mm_node *gtt_node) 232 { 233 return gtt_node->start * (u64)PAGE_SIZE; 234 } 235 236 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range); 237 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, 238 uint64_t *user_addr); 239 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, 240 uint64_t addr, uint32_t flags); 241 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); 242 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 243 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 244 unsigned long end, unsigned long *userptr); 245 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, 246 int *last_invalidated); 247 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm); 248 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); 249 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem); 250 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, 251 struct ttm_resource *mem); 252 int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type); 253 254 void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); 255 256 int amdgpu_ttm_mmio_remap_alloc_sgt(struct amdgpu_device *adev, 257 struct ttm_resource *res, 258 struct device *dev, 259 enum dma_data_direction dir, 260 struct sg_table **sgt); 261 void amdgpu_ttm_mmio_remap_free_sgt(struct device *dev, 262 enum dma_data_direction dir, 263 struct sg_table *sgt); 264 265 #endif 266