1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 */ 24 25 #include "kfd_kernel_queue.h" 26 #include "kfd_device_queue_manager.h" 27 #include "kfd_pm4_headers_vi.h" 28 #include "kfd_pm4_opcodes.h" 29 30 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size) 31 { 32 union PM4_MES_TYPE_3_HEADER header; 33 34 header.u32All = 0; 35 header.opcode = opcode; 36 header.count = packet_size / 4 - 2; 37 header.type = PM4_TYPE_3; 38 39 return header.u32All; 40 } 41 42 static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer, 43 struct qcm_process_device *qpd) 44 { 45 struct kfd_process_device *pdd = qpd_to_pdd(qpd); 46 struct pm4_mes_map_process *packet; 47 48 packet = (struct pm4_mes_map_process *)buffer; 49 50 memset(buffer, 0, sizeof(struct pm4_mes_map_process)); 51 52 packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, 53 sizeof(struct pm4_mes_map_process)); 54 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; 55 packet->bitfields2.process_quantum = 10; 56 packet->bitfields2.pasid = pdd->pasid; 57 packet->bitfields3.page_table_base = qpd->page_table_base; 58 packet->bitfields10.gds_size = qpd->gds_size; 59 packet->bitfields10.num_gws = qpd->num_gws; 60 packet->bitfields10.num_oac = qpd->num_oac; 61 packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; 62 63 packet->sh_mem_config = qpd->sh_mem_config; 64 packet->sh_mem_bases = qpd->sh_mem_bases; 65 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; 66 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; 67 68 packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base; 69 70 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); 71 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); 72 73 return 0; 74 } 75 76 static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, 77 uint64_t ib, size_t ib_size_in_dwords, bool chain) 78 { 79 struct pm4_mes_runlist *packet; 80 int concurrent_proc_cnt = 0; 81 struct kfd_node *kfd = pm->dqm->dev; 82 83 if (WARN_ON(!ib)) 84 return -EFAULT; 85 86 /* Determine the number of processes to map together to HW: 87 * it can not exceed the number of VMIDs available to the 88 * scheduler, and it is determined by the smaller of the number 89 * of processes in the runlist and kfd module parameter 90 * hws_max_conc_proc. 91 * Note: the arbitration between the number of VMIDs and 92 * hws_max_conc_proc has been done in 93 * kgd2kfd_device_init(). 94 */ 95 concurrent_proc_cnt = min(pm->dqm->processes_count, 96 kfd->max_proc_per_quantum); 97 98 packet = (struct pm4_mes_runlist *)buffer; 99 100 memset(buffer, 0, sizeof(struct pm4_mes_runlist)); 101 packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, 102 sizeof(struct pm4_mes_runlist)); 103 104 packet->bitfields4.ib_size = ib_size_in_dwords; 105 packet->bitfields4.chain = chain ? 1 : 0; 106 packet->bitfields4.offload_polling = 0; 107 packet->bitfields4.valid = 1; 108 packet->bitfields4.process_cnt = concurrent_proc_cnt; 109 packet->ordinal2 = lower_32_bits(ib); 110 packet->bitfields3.ib_base_hi = upper_32_bits(ib); 111 112 return 0; 113 } 114 115 static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, 116 struct scheduling_resources *res) 117 { 118 struct pm4_mes_set_resources *packet; 119 120 packet = (struct pm4_mes_set_resources *)buffer; 121 memset(buffer, 0, sizeof(struct pm4_mes_set_resources)); 122 123 packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES, 124 sizeof(struct pm4_mes_set_resources)); 125 126 packet->bitfields2.queue_type = 127 queue_type__mes_set_resources__hsa_interface_queue_hiq; 128 packet->bitfields2.vmid_mask = res->vmid_mask; 129 packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; 130 packet->bitfields7.oac_mask = res->oac_mask; 131 packet->bitfields8.gds_heap_base = res->gds_heap_base; 132 packet->bitfields8.gds_heap_size = res->gds_heap_size; 133 134 packet->gws_mask_lo = lower_32_bits(res->gws_mask); 135 packet->gws_mask_hi = upper_32_bits(res->gws_mask); 136 137 packet->queue_mask_lo = lower_32_bits(res->queue_mask); 138 packet->queue_mask_hi = upper_32_bits(res->queue_mask); 139 140 return 0; 141 } 142 143 static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer, 144 struct queue *q, bool is_static) 145 { 146 struct pm4_mes_map_queues *packet; 147 bool use_static = is_static; 148 149 packet = (struct pm4_mes_map_queues *)buffer; 150 memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); 151 152 packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, 153 sizeof(struct pm4_mes_map_queues)); 154 packet->bitfields2.num_queues = 1; 155 packet->bitfields2.queue_sel = 156 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; 157 158 packet->bitfields2.engine_sel = 159 engine_sel__mes_map_queues__compute_vi; 160 packet->bitfields2.queue_type = 161 queue_type__mes_map_queues__normal_compute_vi; 162 163 switch (q->properties.type) { 164 case KFD_QUEUE_TYPE_COMPUTE: 165 if (use_static) 166 packet->bitfields2.queue_type = 167 queue_type__mes_map_queues__normal_latency_static_queue_vi; 168 break; 169 case KFD_QUEUE_TYPE_DIQ: 170 packet->bitfields2.queue_type = 171 queue_type__mes_map_queues__debug_interface_queue_vi; 172 break; 173 case KFD_QUEUE_TYPE_SDMA: 174 case KFD_QUEUE_TYPE_SDMA_XGMI: 175 packet->bitfields2.engine_sel = q->properties.sdma_engine_id + 176 engine_sel__mes_map_queues__sdma0_vi; 177 use_static = false; /* no static queues under SDMA */ 178 break; 179 default: 180 WARN(1, "queue type %d", q->properties.type); 181 return -EINVAL; 182 } 183 packet->bitfields3.doorbell_offset = 184 q->properties.doorbell_off; 185 186 packet->mqd_addr_lo = 187 lower_32_bits(q->gart_mqd_addr); 188 189 packet->mqd_addr_hi = 190 upper_32_bits(q->gart_mqd_addr); 191 192 packet->wptr_addr_lo = 193 lower_32_bits((uint64_t)q->properties.write_ptr); 194 195 packet->wptr_addr_hi = 196 upper_32_bits((uint64_t)q->properties.write_ptr); 197 198 return 0; 199 } 200 201 static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, 202 enum kfd_unmap_queues_filter filter, 203 uint32_t filter_param, bool reset) 204 { 205 struct pm4_mes_unmap_queues *packet; 206 207 packet = (struct pm4_mes_unmap_queues *)buffer; 208 memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); 209 210 packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, 211 sizeof(struct pm4_mes_unmap_queues)); 212 213 packet->bitfields2.engine_sel = 214 engine_sel__mes_unmap_queues__compute; 215 216 if (reset) 217 packet->bitfields2.action = 218 action__mes_unmap_queues__reset_queues; 219 else 220 packet->bitfields2.action = 221 action__mes_unmap_queues__preempt_queues; 222 223 switch (filter) { 224 case KFD_UNMAP_QUEUES_FILTER_BY_PASID: 225 packet->bitfields2.queue_sel = 226 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; 227 packet->bitfields3a.pasid = filter_param; 228 break; 229 case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: 230 packet->bitfields2.queue_sel = 231 queue_sel__mes_unmap_queues__unmap_all_queues; 232 break; 233 case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: 234 /* in this case, we do not preempt static queues */ 235 packet->bitfields2.queue_sel = 236 queue_sel__mes_unmap_queues__unmap_all_non_static_queues; 237 break; 238 default: 239 WARN(1, "filter %d", filter); 240 return -EINVAL; 241 } 242 243 return 0; 244 245 } 246 247 static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer, 248 uint64_t fence_address, uint64_t fence_value) 249 { 250 struct pm4_mes_query_status *packet; 251 252 packet = (struct pm4_mes_query_status *)buffer; 253 memset(buffer, 0, sizeof(struct pm4_mes_query_status)); 254 255 packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, 256 sizeof(struct pm4_mes_query_status)); 257 258 packet->bitfields2.context_id = 0; 259 packet->bitfields2.interrupt_sel = 260 interrupt_sel__mes_query_status__completion_status; 261 packet->bitfields2.command = 262 command__mes_query_status__fence_only_after_write_ack; 263 264 packet->addr_hi = upper_32_bits((uint64_t)fence_address); 265 packet->addr_lo = lower_32_bits((uint64_t)fence_address); 266 packet->data_hi = upper_32_bits((uint64_t)fence_value); 267 packet->data_lo = lower_32_bits((uint64_t)fence_value); 268 269 return 0; 270 } 271 272 static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer) 273 { 274 struct pm4_mec_release_mem *packet; 275 276 packet = (struct pm4_mec_release_mem *)buffer; 277 memset(buffer, 0, sizeof(*packet)); 278 279 packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM, 280 sizeof(*packet)); 281 282 packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; 283 packet->bitfields2.event_index = event_index___release_mem__end_of_pipe; 284 packet->bitfields2.tcl1_action_ena = 1; 285 packet->bitfields2.tc_action_ena = 1; 286 packet->bitfields2.cache_policy = cache_policy___release_mem__lru; 287 packet->bitfields2.atc = 0; 288 289 packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low; 290 packet->bitfields3.int_sel = 291 int_sel___release_mem__send_interrupt_after_write_confirm; 292 293 packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; 294 packet->address_hi = upper_32_bits(gpu_addr); 295 296 packet->data_lo = 0; 297 298 return 0; 299 } 300 301 const struct packet_manager_funcs kfd_vi_pm_funcs = { 302 .map_process = pm_map_process_vi, 303 .runlist = pm_runlist_vi, 304 .set_resources = pm_set_resources_vi, 305 .map_queues = pm_map_queues_vi, 306 .unmap_queues = pm_unmap_queues_vi, 307 .config_dequeue_wait_counts = NULL, 308 .query_status = pm_query_status_vi, 309 .release_mem = pm_release_mem_vi, 310 .map_process_size = sizeof(struct pm4_mes_map_process), 311 .runlist_size = sizeof(struct pm4_mes_runlist), 312 .set_resources_size = sizeof(struct pm4_mes_set_resources), 313 .map_queues_size = sizeof(struct pm4_mes_map_queues), 314 .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), 315 .config_dequeue_wait_counts_size = 0, 316 .query_status_size = sizeof(struct pm4_mes_query_status), 317 .release_mem_size = sizeof(struct pm4_mec_release_mem) 318 }; 319