1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2016-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include "kfd_kernel_queue.h" 26 #include "kfd_device_queue_manager.h" 27 #include "kfd_pm4_headers_ai.h" 28 #include "kfd_pm4_headers_aldebaran.h" 29 #include "kfd_pm4_opcodes.h" 30 #include "gc/gc_10_1_0_sh_mask.h" 31 32 static int pm_map_process_v9(struct packet_manager *pm, 33 uint32_t *buffer, struct qcm_process_device *qpd) 34 { 35 struct pm4_mes_map_process *packet; 36 uint64_t vm_page_table_base_addr = qpd->page_table_base; 37 struct kfd_node *kfd = pm->dqm->dev; 38 struct kfd_process_device *pdd = 39 container_of(qpd, struct kfd_process_device, qpd); 40 41 packet = (struct pm4_mes_map_process *)buffer; 42 memset(buffer, 0, sizeof(struct pm4_mes_map_process)); 43 packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, 44 sizeof(struct pm4_mes_map_process)); 45 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; 46 packet->bitfields2.process_quantum = 10; 47 packet->bitfields2.pasid = qpd->pqm->process->pasid; 48 packet->bitfields14.gds_size = qpd->gds_size & 0x3F; 49 packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF; 50 packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0; 51 packet->bitfields14.num_oac = qpd->num_oac; 52 packet->bitfields14.sdma_enable = 1; 53 packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; 54 55 if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && 56 pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { 57 packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; 58 packet->bitfields2.new_debug = 1; 59 } 60 61 packet->sh_mem_config = qpd->sh_mem_config; 62 packet->sh_mem_bases = qpd->sh_mem_bases; 63 if (qpd->tba_addr) { 64 packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); 65 /* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is 66 * not defined, so setting it won't do any harm. 67 */ 68 packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8) 69 | 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT; 70 71 packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); 72 packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); 73 } 74 75 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); 76 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); 77 78 packet->vm_context_page_table_base_addr_lo32 = 79 lower_32_bits(vm_page_table_base_addr); 80 packet->vm_context_page_table_base_addr_hi32 = 81 upper_32_bits(vm_page_table_base_addr); 82 83 return 0; 84 } 85 86 static int pm_map_process_aldebaran(struct packet_manager *pm, 87 uint32_t *buffer, struct qcm_process_device *qpd) 88 { 89 struct pm4_mes_map_process_aldebaran *packet; 90 uint64_t vm_page_table_base_addr = qpd->page_table_base; 91 struct kfd_dev *kfd = pm->dqm->dev->kfd; 92 struct kfd_process_device *pdd = 93 container_of(qpd, struct kfd_process_device, qpd); 94 int i; 95 96 packet = (struct pm4_mes_map_process_aldebaran *)buffer; 97 memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran)); 98 packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, 99 sizeof(struct pm4_mes_map_process_aldebaran)); 100 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; 101 packet->bitfields2.process_quantum = 10; 102 packet->bitfields2.pasid = qpd->pqm->process->pasid; 103 packet->bitfields14.gds_size = qpd->gds_size & 0x3F; 104 packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF; 105 packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0; 106 packet->bitfields14.num_oac = qpd->num_oac; 107 packet->bitfields14.sdma_enable = 1; 108 packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; 109 packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override | 110 pdd->spi_dbg_launch_mode; 111 112 if (pdd->process->debug_trap_enabled) { 113 for (i = 0; i < kfd->device_info.num_of_watch_points; i++) 114 packet->tcp_watch_cntl[i] = pdd->watch_points[i]; 115 116 packet->bitfields2.single_memops = 117 !!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP); 118 } 119 120 packet->sh_mem_config = qpd->sh_mem_config; 121 packet->sh_mem_bases = qpd->sh_mem_bases; 122 if (qpd->tba_addr) { 123 packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); 124 packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8); 125 packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); 126 packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); 127 } 128 129 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); 130 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); 131 132 packet->vm_context_page_table_base_addr_lo32 = 133 lower_32_bits(vm_page_table_base_addr); 134 packet->vm_context_page_table_base_addr_hi32 = 135 upper_32_bits(vm_page_table_base_addr); 136 137 return 0; 138 } 139 140 static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, 141 uint64_t ib, size_t ib_size_in_dwords, bool chain) 142 { 143 struct pm4_mes_runlist *packet; 144 145 int concurrent_proc_cnt = 0; 146 struct kfd_node *kfd = pm->dqm->dev; 147 148 /* Determine the number of processes to map together to HW: 149 * it can not exceed the number of VMIDs available to the 150 * scheduler, and it is determined by the smaller of the number 151 * of processes in the runlist and kfd module parameter 152 * hws_max_conc_proc. 153 * Note: the arbitration between the number of VMIDs and 154 * hws_max_conc_proc has been done in 155 * kgd2kfd_device_init(). 156 */ 157 concurrent_proc_cnt = min(pm->dqm->processes_count, 158 kfd->max_proc_per_quantum); 159 160 packet = (struct pm4_mes_runlist *)buffer; 161 162 memset(buffer, 0, sizeof(struct pm4_mes_runlist)); 163 packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, 164 sizeof(struct pm4_mes_runlist)); 165 166 packet->bitfields4.ib_size = ib_size_in_dwords; 167 packet->bitfields4.chain = chain ? 1 : 0; 168 packet->bitfields4.offload_polling = 0; 169 packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0; 170 packet->bitfields4.valid = 1; 171 packet->bitfields4.process_cnt = concurrent_proc_cnt; 172 packet->ordinal2 = lower_32_bits(ib); 173 packet->ib_base_hi = upper_32_bits(ib); 174 175 return 0; 176 } 177 178 static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer, 179 struct scheduling_resources *res) 180 { 181 struct pm4_mes_set_resources *packet; 182 183 packet = (struct pm4_mes_set_resources *)buffer; 184 memset(buffer, 0, sizeof(struct pm4_mes_set_resources)); 185 186 packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES, 187 sizeof(struct pm4_mes_set_resources)); 188 189 packet->bitfields2.queue_type = 190 queue_type__mes_set_resources__hsa_interface_queue_hiq; 191 packet->bitfields2.vmid_mask = res->vmid_mask; 192 packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; 193 packet->bitfields7.oac_mask = res->oac_mask; 194 packet->bitfields8.gds_heap_base = res->gds_heap_base; 195 packet->bitfields8.gds_heap_size = res->gds_heap_size; 196 197 packet->gws_mask_lo = lower_32_bits(res->gws_mask); 198 packet->gws_mask_hi = upper_32_bits(res->gws_mask); 199 200 packet->queue_mask_lo = lower_32_bits(res->queue_mask); 201 packet->queue_mask_hi = upper_32_bits(res->queue_mask); 202 203 return 0; 204 } 205 206 static inline bool pm_use_ext_eng(struct kfd_dev *dev) 207 { 208 return amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) >= 209 IP_VERSION(5, 2, 0); 210 } 211 212 static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, 213 struct queue *q, bool is_static) 214 { 215 struct pm4_mes_map_queues *packet; 216 217 packet = (struct pm4_mes_map_queues *)buffer; 218 memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); 219 220 packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, 221 sizeof(struct pm4_mes_map_queues)); 222 packet->bitfields2.num_queues = 1; 223 packet->bitfields2.queue_sel = 224 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; 225 226 packet->bitfields2.engine_sel = 227 engine_sel__mes_map_queues__compute_vi; 228 packet->bitfields2.gws_control_queue = q->gws ? 1 : 0; 229 packet->bitfields2.extended_engine_sel = 230 extended_engine_sel__mes_map_queues__legacy_engine_sel; 231 packet->bitfields2.queue_type = 232 queue_type__mes_map_queues__normal_compute_vi; 233 234 switch (q->properties.type) { 235 case KFD_QUEUE_TYPE_COMPUTE: 236 if (is_static) 237 packet->bitfields2.queue_type = 238 queue_type__mes_map_queues__normal_latency_static_queue_vi; 239 break; 240 case KFD_QUEUE_TYPE_DIQ: 241 packet->bitfields2.queue_type = 242 queue_type__mes_map_queues__debug_interface_queue_vi; 243 break; 244 case KFD_QUEUE_TYPE_SDMA: 245 case KFD_QUEUE_TYPE_SDMA_XGMI: 246 if (q->properties.sdma_engine_id < 2 && 247 !pm_use_ext_eng(q->device->kfd)) 248 packet->bitfields2.engine_sel = q->properties.sdma_engine_id + 249 engine_sel__mes_map_queues__sdma0_vi; 250 else { 251 /* 252 * For GFX9.4.3, SDMA engine id can be greater than 8. 253 * For such cases, set extended_engine_sel to 2 and 254 * ensure engine_sel lies between 0-7. 255 */ 256 if (q->properties.sdma_engine_id >= 8) 257 packet->bitfields2.extended_engine_sel = 258 extended_engine_sel__mes_map_queues__sdma8_to_15_sel; 259 else 260 packet->bitfields2.extended_engine_sel = 261 extended_engine_sel__mes_map_queues__sdma0_to_7_sel; 262 263 packet->bitfields2.engine_sel = q->properties.sdma_engine_id % 8; 264 } 265 break; 266 default: 267 WARN(1, "queue type %d", q->properties.type); 268 return -EINVAL; 269 } 270 packet->bitfields3.doorbell_offset = 271 q->properties.doorbell_off; 272 273 packet->mqd_addr_lo = 274 lower_32_bits(q->gart_mqd_addr); 275 276 packet->mqd_addr_hi = 277 upper_32_bits(q->gart_mqd_addr); 278 279 packet->wptr_addr_lo = 280 lower_32_bits((uint64_t)q->properties.write_ptr); 281 282 packet->wptr_addr_hi = 283 upper_32_bits((uint64_t)q->properties.write_ptr); 284 285 return 0; 286 } 287 288 static int pm_set_grace_period_v9(struct packet_manager *pm, 289 uint32_t *buffer, 290 uint32_t grace_period) 291 { 292 struct pm4_mec_write_data_mmio *packet; 293 uint32_t reg_offset = 0; 294 uint32_t reg_data = 0; 295 296 pm->dqm->dev->kfd2kgd->build_grace_period_packet_info( 297 pm->dqm->dev->adev, 298 pm->dqm->wait_times, 299 grace_period, 300 ®_offset, 301 ®_data); 302 303 if (grace_period == USE_DEFAULT_GRACE_PERIOD) 304 reg_data = pm->dqm->wait_times; 305 306 packet = (struct pm4_mec_write_data_mmio *)buffer; 307 memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio)); 308 309 packet->header.u32All = pm_build_pm4_header(IT_WRITE_DATA, 310 sizeof(struct pm4_mec_write_data_mmio)); 311 312 packet->bitfields2.dst_sel = dst_sel___write_data__mem_mapped_register; 313 packet->bitfields2.addr_incr = 314 addr_incr___write_data__do_not_increment_address; 315 316 packet->bitfields3.dst_mmreg_addr = reg_offset; 317 318 packet->data = reg_data; 319 320 return 0; 321 } 322 323 static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, 324 enum kfd_unmap_queues_filter filter, 325 uint32_t filter_param, bool reset) 326 { 327 struct pm4_mes_unmap_queues *packet; 328 329 packet = (struct pm4_mes_unmap_queues *)buffer; 330 memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); 331 332 packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, 333 sizeof(struct pm4_mes_unmap_queues)); 334 335 packet->bitfields2.extended_engine_sel = 336 pm_use_ext_eng(pm->dqm->dev->kfd) ? 337 extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel : 338 extended_engine_sel__mes_unmap_queues__legacy_engine_sel; 339 340 packet->bitfields2.engine_sel = 341 engine_sel__mes_unmap_queues__compute; 342 343 if (reset) 344 packet->bitfields2.action = 345 action__mes_unmap_queues__reset_queues; 346 else 347 packet->bitfields2.action = 348 action__mes_unmap_queues__preempt_queues; 349 350 switch (filter) { 351 case KFD_UNMAP_QUEUES_FILTER_BY_PASID: 352 packet->bitfields2.queue_sel = 353 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; 354 packet->bitfields3a.pasid = filter_param; 355 break; 356 case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: 357 packet->bitfields2.queue_sel = 358 queue_sel__mes_unmap_queues__unmap_all_queues; 359 break; 360 case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: 361 /* in this case, we do not preempt static queues */ 362 packet->bitfields2.queue_sel = 363 queue_sel__mes_unmap_queues__unmap_all_non_static_queues; 364 break; 365 default: 366 WARN(1, "filter %d", filter); 367 return -EINVAL; 368 } 369 370 return 0; 371 372 } 373 374 static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, 375 uint64_t fence_address, uint64_t fence_value) 376 { 377 struct pm4_mes_query_status *packet; 378 379 packet = (struct pm4_mes_query_status *)buffer; 380 memset(buffer, 0, sizeof(struct pm4_mes_query_status)); 381 382 383 packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, 384 sizeof(struct pm4_mes_query_status)); 385 386 packet->bitfields2.context_id = 0; 387 packet->bitfields2.interrupt_sel = 388 interrupt_sel__mes_query_status__completion_status; 389 packet->bitfields2.command = 390 command__mes_query_status__fence_only_after_write_ack; 391 392 packet->addr_hi = upper_32_bits((uint64_t)fence_address); 393 packet->addr_lo = lower_32_bits((uint64_t)fence_address); 394 packet->data_hi = upper_32_bits((uint64_t)fence_value); 395 packet->data_lo = lower_32_bits((uint64_t)fence_value); 396 397 return 0; 398 } 399 400 const struct packet_manager_funcs kfd_v9_pm_funcs = { 401 .map_process = pm_map_process_v9, 402 .runlist = pm_runlist_v9, 403 .set_resources = pm_set_resources_v9, 404 .map_queues = pm_map_queues_v9, 405 .unmap_queues = pm_unmap_queues_v9, 406 .set_grace_period = pm_set_grace_period_v9, 407 .query_status = pm_query_status_v9, 408 .release_mem = NULL, 409 .map_process_size = sizeof(struct pm4_mes_map_process), 410 .runlist_size = sizeof(struct pm4_mes_runlist), 411 .set_resources_size = sizeof(struct pm4_mes_set_resources), 412 .map_queues_size = sizeof(struct pm4_mes_map_queues), 413 .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), 414 .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), 415 .query_status_size = sizeof(struct pm4_mes_query_status), 416 .release_mem_size = 0, 417 }; 418 419 const struct packet_manager_funcs kfd_aldebaran_pm_funcs = { 420 .map_process = pm_map_process_aldebaran, 421 .runlist = pm_runlist_v9, 422 .set_resources = pm_set_resources_v9, 423 .map_queues = pm_map_queues_v9, 424 .unmap_queues = pm_unmap_queues_v9, 425 .set_grace_period = pm_set_grace_period_v9, 426 .query_status = pm_query_status_v9, 427 .release_mem = NULL, 428 .map_process_size = sizeof(struct pm4_mes_map_process_aldebaran), 429 .runlist_size = sizeof(struct pm4_mes_runlist), 430 .set_resources_size = sizeof(struct pm4_mes_set_resources), 431 .map_queues_size = sizeof(struct pm4_mes_map_queues), 432 .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), 433 .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), 434 .query_status_size = sizeof(struct pm4_mes_query_status), 435 .release_mem_size = 0, 436 }; 437