1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/completion.h> 5 #include <linux/circ_buf.h> 6 #include <linux/list.h> 7 8 #include <soc/qcom/cmd-db.h> 9 10 #include "a6xx_gmu.h" 11 #include "a6xx_gmu.xml.h" 12 #include "a6xx_gpu.h" 13 14 #define HFI_MSG_ID(val) [val] = #val 15 16 static const char * const a6xx_hfi_msg_id[] = { 17 HFI_MSG_ID(HFI_H2F_MSG_INIT), 18 HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION), 19 HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE), 20 HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE), 21 HFI_MSG_ID(HFI_H2F_MSG_TEST), 22 HFI_MSG_ID(HFI_H2F_MSG_START), 23 HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START), 24 HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE), 25 HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER), 26 }; 27 28 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, 29 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) 30 { 31 struct a6xx_hfi_queue_header *header = queue->header; 32 u32 i, hdr, index = header->read_index; 33 34 if (header->read_index == header->write_index) { 35 header->rx_request = 1; 36 return 0; 37 } 38 39 hdr = queue->data[index]; 40 41 queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index; 42 43 /* 44 * If we are to assume that the GMU firmware is in fact a rational actor 45 * and is programmed to not send us a larger response than we expect 46 * then we can also assume that if the header size is unexpectedly large 47 * that it is due to memory corruption and/or hardware failure. In this 48 * case the only reasonable course of action is to BUG() to help harden 49 * the failure. 50 */ 51 52 BUG_ON(HFI_HEADER_SIZE(hdr) > dwords); 53 54 for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) { 55 data[i] = queue->data[index]; 56 index = (index + 1) % header->size; 57 } 58 59 if (!gmu->legacy) 60 index = ALIGN(index, 4) % header->size; 61 62 header->read_index = index; 63 return HFI_HEADER_SIZE(hdr); 64 } 65 66 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, 67 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) 68 { 69 struct a6xx_hfi_queue_header *header = queue->header; 70 u32 i, space, index = header->write_index; 71 72 spin_lock(&queue->lock); 73 74 space = CIRC_SPACE(header->write_index, header->read_index, 75 header->size); 76 if (space < dwords) { 77 header->dropped++; 78 spin_unlock(&queue->lock); 79 return -ENOSPC; 80 } 81 82 queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index; 83 84 for (i = 0; i < dwords; i++) { 85 queue->data[index] = data[i]; 86 index = (index + 1) % header->size; 87 } 88 89 /* Cookify any non used data at the end of the write buffer */ 90 if (!gmu->legacy) { 91 for (; index % 4; index = (index + 1) % header->size) 92 queue->data[index] = 0xfafafafa; 93 } 94 95 header->write_index = index; 96 spin_unlock(&queue->lock); 97 98 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); 99 return 0; 100 } 101 102 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, 103 u32 *payload, u32 payload_size) 104 { 105 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; 106 u32 val; 107 int ret; 108 109 /* Wait for a response */ 110 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 111 val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000); 112 113 if (ret) { 114 DRM_DEV_ERROR(gmu->dev, 115 "Message %s id %d timed out waiting for response\n", 116 a6xx_hfi_msg_id[id], seqnum); 117 return -ETIMEDOUT; 118 } 119 120 /* Clear the interrupt */ 121 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 122 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ); 123 124 for (;;) { 125 struct a6xx_hfi_msg_response resp; 126 127 /* Get the next packet */ 128 ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp, 129 sizeof(resp) >> 2); 130 131 /* If the queue is empty our response never made it */ 132 if (!ret) { 133 DRM_DEV_ERROR(gmu->dev, 134 "The HFI response queue is unexpectedly empty\n"); 135 136 return -ENOENT; 137 } 138 139 if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) { 140 struct a6xx_hfi_msg_error *error = 141 (struct a6xx_hfi_msg_error *) &resp; 142 143 DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n", 144 error->code); 145 continue; 146 } 147 148 if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) { 149 DRM_DEV_ERROR(gmu->dev, 150 "Unexpected message id %d on the response queue\n", 151 HFI_HEADER_SEQNUM(resp.ret_header)); 152 continue; 153 } 154 155 if (resp.error) { 156 DRM_DEV_ERROR(gmu->dev, 157 "Message %s id %d returned error %d\n", 158 a6xx_hfi_msg_id[id], seqnum, resp.error); 159 return -EINVAL; 160 } 161 162 /* All is well, copy over the buffer */ 163 if (payload && payload_size) 164 memcpy(payload, resp.payload, 165 min_t(u32, payload_size, sizeof(resp.payload))); 166 167 return 0; 168 } 169 } 170 171 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, 172 void *data, u32 size, u32 *payload, u32 payload_size) 173 { 174 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; 175 int ret, dwords = size >> 2; 176 u32 seqnum; 177 178 seqnum = atomic_inc_return(&queue->seqnum) % 0xfff; 179 180 /* First dword of the message is the message header - fill it in */ 181 *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) | 182 (dwords << 8) | id; 183 184 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); 185 if (ret) { 186 DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n", 187 a6xx_hfi_msg_id[id], seqnum); 188 return ret; 189 } 190 191 return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size); 192 } 193 194 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) 195 { 196 struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 }; 197 198 msg.dbg_buffer_addr = (u32) gmu->debug.iova; 199 msg.dbg_buffer_size = (u32) gmu->debug.size; 200 msg.boot_state = boot_state; 201 202 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg), 203 NULL, 0); 204 } 205 206 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version) 207 { 208 struct a6xx_hfi_msg_fw_version msg = { 0 }; 209 210 /* Currently supporting version 1.10 */ 211 msg.supported_version = (1 << 28) | (1 << 19) | (1 << 17); 212 213 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg), 214 version, sizeof(*version)); 215 } 216 217 static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu) 218 { 219 struct a6xx_hfi_msg_perf_table_v1 msg = { 0 }; 220 int i; 221 222 msg.num_gpu_levels = gmu->nr_gpu_freqs; 223 msg.num_gmu_levels = gmu->nr_gmu_freqs; 224 225 for (i = 0; i < gmu->nr_gpu_freqs; i++) { 226 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; 227 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; 228 } 229 230 for (i = 0; i < gmu->nr_gmu_freqs; i++) { 231 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; 232 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; 233 } 234 235 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), 236 NULL, 0); 237 } 238 239 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) 240 { 241 struct a6xx_hfi_msg_perf_table msg = { 0 }; 242 int i; 243 244 msg.num_gpu_levels = gmu->nr_gpu_freqs; 245 msg.num_gmu_levels = gmu->nr_gmu_freqs; 246 247 for (i = 0; i < gmu->nr_gpu_freqs; i++) { 248 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; 249 msg.gx_votes[i].acd = 0xffffffff; 250 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; 251 } 252 253 for (i = 0; i < gmu->nr_gmu_freqs; i++) { 254 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; 255 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; 256 } 257 258 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), 259 NULL, 0); 260 } 261 262 static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 263 { 264 /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */ 265 msg->bw_level_num = 1; 266 267 msg->ddr_cmds_num = 3; 268 msg->ddr_wait_bitmask = 0x01; 269 270 msg->ddr_cmds_addrs[0] = 0x50000; 271 msg->ddr_cmds_addrs[1] = 0x5003c; 272 msg->ddr_cmds_addrs[2] = 0x5000c; 273 274 msg->ddr_cmds_data[0][0] = 0x40000000; 275 msg->ddr_cmds_data[0][1] = 0x40000000; 276 msg->ddr_cmds_data[0][2] = 0x40000000; 277 278 /* 279 * These are the CX (CNOC) votes - these are used by the GMU but the 280 * votes are known and fixed for the target 281 */ 282 msg->cnoc_cmds_num = 1; 283 msg->cnoc_wait_bitmask = 0x01; 284 285 msg->cnoc_cmds_addrs[0] = 0x5007c; 286 msg->cnoc_cmds_data[0][0] = 0x40000000; 287 msg->cnoc_cmds_data[1][0] = 0x60000001; 288 } 289 290 static void a619_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 291 { 292 msg->bw_level_num = 13; 293 294 msg->ddr_cmds_num = 3; 295 msg->ddr_wait_bitmask = 0x0; 296 297 msg->ddr_cmds_addrs[0] = 0x50000; 298 msg->ddr_cmds_addrs[1] = 0x50004; 299 msg->ddr_cmds_addrs[2] = 0x50080; 300 301 msg->ddr_cmds_data[0][0] = 0x40000000; 302 msg->ddr_cmds_data[0][1] = 0x40000000; 303 msg->ddr_cmds_data[0][2] = 0x40000000; 304 msg->ddr_cmds_data[1][0] = 0x6000030c; 305 msg->ddr_cmds_data[1][1] = 0x600000db; 306 msg->ddr_cmds_data[1][2] = 0x60000008; 307 msg->ddr_cmds_data[2][0] = 0x60000618; 308 msg->ddr_cmds_data[2][1] = 0x600001b6; 309 msg->ddr_cmds_data[2][2] = 0x60000008; 310 msg->ddr_cmds_data[3][0] = 0x60000925; 311 msg->ddr_cmds_data[3][1] = 0x60000291; 312 msg->ddr_cmds_data[3][2] = 0x60000008; 313 msg->ddr_cmds_data[4][0] = 0x60000dc1; 314 msg->ddr_cmds_data[4][1] = 0x600003dc; 315 msg->ddr_cmds_data[4][2] = 0x60000008; 316 msg->ddr_cmds_data[5][0] = 0x600010ad; 317 msg->ddr_cmds_data[5][1] = 0x600004ae; 318 msg->ddr_cmds_data[5][2] = 0x60000008; 319 msg->ddr_cmds_data[6][0] = 0x600014c3; 320 msg->ddr_cmds_data[6][1] = 0x600005d4; 321 msg->ddr_cmds_data[6][2] = 0x60000008; 322 msg->ddr_cmds_data[7][0] = 0x6000176a; 323 msg->ddr_cmds_data[7][1] = 0x60000693; 324 msg->ddr_cmds_data[7][2] = 0x60000008; 325 msg->ddr_cmds_data[8][0] = 0x60001f01; 326 msg->ddr_cmds_data[8][1] = 0x600008b5; 327 msg->ddr_cmds_data[8][2] = 0x60000008; 328 msg->ddr_cmds_data[9][0] = 0x60002940; 329 msg->ddr_cmds_data[9][1] = 0x60000b95; 330 msg->ddr_cmds_data[9][2] = 0x60000008; 331 msg->ddr_cmds_data[10][0] = 0x60002f68; 332 msg->ddr_cmds_data[10][1] = 0x60000d50; 333 msg->ddr_cmds_data[10][2] = 0x60000008; 334 msg->ddr_cmds_data[11][0] = 0x60003700; 335 msg->ddr_cmds_data[11][1] = 0x60000f71; 336 msg->ddr_cmds_data[11][2] = 0x60000008; 337 msg->ddr_cmds_data[12][0] = 0x60003fce; 338 msg->ddr_cmds_data[12][1] = 0x600011ea; 339 msg->ddr_cmds_data[12][2] = 0x60000008; 340 341 msg->cnoc_cmds_num = 1; 342 msg->cnoc_wait_bitmask = 0x0; 343 344 msg->cnoc_cmds_addrs[0] = 0x50054; 345 346 msg->cnoc_cmds_data[0][0] = 0x40000000; 347 } 348 349 static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 350 { 351 /* 352 * Send a single "off" entry just to get things running 353 * TODO: bus scaling 354 */ 355 msg->bw_level_num = 1; 356 357 msg->ddr_cmds_num = 3; 358 msg->ddr_wait_bitmask = 0x01; 359 360 msg->ddr_cmds_addrs[0] = 0x50000; 361 msg->ddr_cmds_addrs[1] = 0x5003c; 362 msg->ddr_cmds_addrs[2] = 0x5000c; 363 364 msg->ddr_cmds_data[0][0] = 0x40000000; 365 msg->ddr_cmds_data[0][1] = 0x40000000; 366 msg->ddr_cmds_data[0][2] = 0x40000000; 367 368 /* 369 * These are the CX (CNOC) votes - these are used by the GMU but the 370 * votes are known and fixed for the target 371 */ 372 msg->cnoc_cmds_num = 3; 373 msg->cnoc_wait_bitmask = 0x01; 374 375 msg->cnoc_cmds_addrs[0] = 0x50034; 376 msg->cnoc_cmds_addrs[1] = 0x5007c; 377 msg->cnoc_cmds_addrs[2] = 0x5004c; 378 379 msg->cnoc_cmds_data[0][0] = 0x40000000; 380 msg->cnoc_cmds_data[0][1] = 0x00000000; 381 msg->cnoc_cmds_data[0][2] = 0x40000000; 382 383 msg->cnoc_cmds_data[1][0] = 0x60000001; 384 msg->cnoc_cmds_data[1][1] = 0x20000001; 385 msg->cnoc_cmds_data[1][2] = 0x60000001; 386 } 387 388 static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 389 { 390 /* 391 * Send a single "off" entry just to get things running 392 * TODO: bus scaling 393 */ 394 msg->bw_level_num = 1; 395 396 msg->ddr_cmds_num = 3; 397 msg->ddr_wait_bitmask = 0x01; 398 399 msg->ddr_cmds_addrs[0] = 0x50000; 400 msg->ddr_cmds_addrs[1] = 0x50004; 401 msg->ddr_cmds_addrs[2] = 0x5007c; 402 403 msg->ddr_cmds_data[0][0] = 0x40000000; 404 msg->ddr_cmds_data[0][1] = 0x40000000; 405 msg->ddr_cmds_data[0][2] = 0x40000000; 406 407 /* 408 * These are the CX (CNOC) votes - these are used by the GMU but the 409 * votes are known and fixed for the target 410 */ 411 msg->cnoc_cmds_num = 1; 412 msg->cnoc_wait_bitmask = 0x01; 413 414 msg->cnoc_cmds_addrs[0] = 0x500a4; 415 msg->cnoc_cmds_data[0][0] = 0x40000000; 416 msg->cnoc_cmds_data[1][0] = 0x60000001; 417 } 418 419 static void a690_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 420 { 421 /* 422 * Send a single "off" entry just to get things running 423 * TODO: bus scaling 424 */ 425 msg->bw_level_num = 1; 426 427 msg->ddr_cmds_num = 3; 428 msg->ddr_wait_bitmask = 0x01; 429 430 msg->ddr_cmds_addrs[0] = 0x50004; 431 msg->ddr_cmds_addrs[1] = 0x50000; 432 msg->ddr_cmds_addrs[2] = 0x500ac; 433 434 msg->ddr_cmds_data[0][0] = 0x40000000; 435 msg->ddr_cmds_data[0][1] = 0x40000000; 436 msg->ddr_cmds_data[0][2] = 0x40000000; 437 438 /* 439 * These are the CX (CNOC) votes - these are used by the GMU but the 440 * votes are known and fixed for the target 441 */ 442 msg->cnoc_cmds_num = 1; 443 msg->cnoc_wait_bitmask = 0x01; 444 445 msg->cnoc_cmds_addrs[0] = 0x5003c; 446 msg->cnoc_cmds_data[0][0] = 0x40000000; 447 msg->cnoc_cmds_data[1][0] = 0x60000001; 448 } 449 450 static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 451 { 452 /* 453 * Send a single "off" entry just to get things running 454 * TODO: bus scaling 455 */ 456 msg->bw_level_num = 1; 457 458 msg->ddr_cmds_num = 3; 459 msg->ddr_wait_bitmask = 0x01; 460 461 msg->ddr_cmds_addrs[0] = 0x50004; 462 msg->ddr_cmds_addrs[1] = 0x500a0; 463 msg->ddr_cmds_addrs[2] = 0x50000; 464 465 msg->ddr_cmds_data[0][0] = 0x40000000; 466 msg->ddr_cmds_data[0][1] = 0x40000000; 467 msg->ddr_cmds_data[0][2] = 0x40000000; 468 469 /* 470 * These are the CX (CNOC) votes - these are used by the GMU but the 471 * votes are known and fixed for the target 472 */ 473 msg->cnoc_cmds_num = 1; 474 msg->cnoc_wait_bitmask = 0x01; 475 476 msg->cnoc_cmds_addrs[0] = 0x50070; 477 msg->cnoc_cmds_data[0][0] = 0x40000000; 478 msg->cnoc_cmds_data[1][0] = 0x60000001; 479 } 480 481 static void adreno_7c3_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 482 { 483 /* 484 * Send a single "off" entry just to get things running 485 * TODO: bus scaling 486 */ 487 msg->bw_level_num = 1; 488 489 msg->ddr_cmds_num = 3; 490 msg->ddr_wait_bitmask = 0x07; 491 492 msg->ddr_cmds_addrs[0] = 0x50004; 493 msg->ddr_cmds_addrs[1] = 0x50000; 494 msg->ddr_cmds_addrs[2] = 0x50088; 495 496 msg->ddr_cmds_data[0][0] = 0x40000000; 497 msg->ddr_cmds_data[0][1] = 0x40000000; 498 msg->ddr_cmds_data[0][2] = 0x40000000; 499 500 /* 501 * These are the CX (CNOC) votes - these are used by the GMU but the 502 * votes are known and fixed for the target 503 */ 504 msg->cnoc_cmds_num = 1; 505 msg->cnoc_wait_bitmask = 0x01; 506 507 msg->cnoc_cmds_addrs[0] = 0x5006c; 508 msg->cnoc_cmds_data[0][0] = 0x40000000; 509 msg->cnoc_cmds_data[1][0] = 0x60000001; 510 } 511 512 static void a730_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 513 { 514 msg->bw_level_num = 12; 515 516 msg->ddr_cmds_num = 3; 517 msg->ddr_wait_bitmask = 0x7; 518 519 msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0"); 520 msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0"); 521 msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV"); 522 523 msg->ddr_cmds_data[0][0] = 0x40000000; 524 msg->ddr_cmds_data[0][1] = 0x40000000; 525 msg->ddr_cmds_data[0][2] = 0x40000000; 526 msg->ddr_cmds_data[1][0] = 0x600002e8; 527 msg->ddr_cmds_data[1][1] = 0x600003d0; 528 msg->ddr_cmds_data[1][2] = 0x60000008; 529 msg->ddr_cmds_data[2][0] = 0x6000068d; 530 msg->ddr_cmds_data[2][1] = 0x6000089a; 531 msg->ddr_cmds_data[2][2] = 0x60000008; 532 msg->ddr_cmds_data[3][0] = 0x600007f2; 533 msg->ddr_cmds_data[3][1] = 0x60000a6e; 534 msg->ddr_cmds_data[3][2] = 0x60000008; 535 msg->ddr_cmds_data[4][0] = 0x600009e5; 536 msg->ddr_cmds_data[4][1] = 0x60000cfd; 537 msg->ddr_cmds_data[4][2] = 0x60000008; 538 msg->ddr_cmds_data[5][0] = 0x60000b29; 539 msg->ddr_cmds_data[5][1] = 0x60000ea6; 540 msg->ddr_cmds_data[5][2] = 0x60000008; 541 msg->ddr_cmds_data[6][0] = 0x60001698; 542 msg->ddr_cmds_data[6][1] = 0x60001da8; 543 msg->ddr_cmds_data[6][2] = 0x60000008; 544 msg->ddr_cmds_data[7][0] = 0x600018d2; 545 msg->ddr_cmds_data[7][1] = 0x60002093; 546 msg->ddr_cmds_data[7][2] = 0x60000008; 547 msg->ddr_cmds_data[8][0] = 0x60001e66; 548 msg->ddr_cmds_data[8][1] = 0x600027e6; 549 msg->ddr_cmds_data[8][2] = 0x60000008; 550 msg->ddr_cmds_data[9][0] = 0x600027c2; 551 msg->ddr_cmds_data[9][1] = 0x6000342f; 552 msg->ddr_cmds_data[9][2] = 0x60000008; 553 msg->ddr_cmds_data[10][0] = 0x60002e71; 554 msg->ddr_cmds_data[10][1] = 0x60003cf5; 555 msg->ddr_cmds_data[10][2] = 0x60000008; 556 msg->ddr_cmds_data[11][0] = 0x600030ae; 557 msg->ddr_cmds_data[11][1] = 0x60003fe5; 558 msg->ddr_cmds_data[11][2] = 0x60000008; 559 560 msg->cnoc_cmds_num = 1; 561 msg->cnoc_wait_bitmask = 0x1; 562 563 msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0"); 564 msg->cnoc_cmds_data[0][0] = 0x40000000; 565 msg->cnoc_cmds_data[1][0] = 0x60000001; 566 } 567 568 static void a740_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 569 { 570 msg->bw_level_num = 1; 571 572 msg->ddr_cmds_num = 3; 573 msg->ddr_wait_bitmask = 0x7; 574 575 msg->ddr_cmds_addrs[0] = cmd_db_read_addr("SH0"); 576 msg->ddr_cmds_addrs[1] = cmd_db_read_addr("MC0"); 577 msg->ddr_cmds_addrs[2] = cmd_db_read_addr("ACV"); 578 579 msg->ddr_cmds_data[0][0] = 0x40000000; 580 msg->ddr_cmds_data[0][1] = 0x40000000; 581 msg->ddr_cmds_data[0][2] = 0x40000000; 582 583 /* TODO: add a proper dvfs table */ 584 585 msg->cnoc_cmds_num = 1; 586 msg->cnoc_wait_bitmask = 0x1; 587 588 msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0"); 589 msg->cnoc_cmds_data[0][0] = 0x40000000; 590 msg->cnoc_cmds_data[1][0] = 0x60000001; 591 } 592 593 static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 594 { 595 /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ 596 msg->bw_level_num = 1; 597 598 msg->ddr_cmds_num = 3; 599 msg->ddr_wait_bitmask = 0x07; 600 601 msg->ddr_cmds_addrs[0] = 0x50000; 602 msg->ddr_cmds_addrs[1] = 0x5005c; 603 msg->ddr_cmds_addrs[2] = 0x5000c; 604 605 msg->ddr_cmds_data[0][0] = 0x40000000; 606 msg->ddr_cmds_data[0][1] = 0x40000000; 607 msg->ddr_cmds_data[0][2] = 0x40000000; 608 609 /* 610 * These are the CX (CNOC) votes. This is used but the values for the 611 * sdm845 GMU are known and fixed so we can hard code them. 612 */ 613 614 msg->cnoc_cmds_num = 3; 615 msg->cnoc_wait_bitmask = 0x05; 616 617 msg->cnoc_cmds_addrs[0] = 0x50034; 618 msg->cnoc_cmds_addrs[1] = 0x5007c; 619 msg->cnoc_cmds_addrs[2] = 0x5004c; 620 621 msg->cnoc_cmds_data[0][0] = 0x40000000; 622 msg->cnoc_cmds_data[0][1] = 0x00000000; 623 msg->cnoc_cmds_data[0][2] = 0x40000000; 624 625 msg->cnoc_cmds_data[1][0] = 0x60000001; 626 msg->cnoc_cmds_data[1][1] = 0x20000001; 627 msg->cnoc_cmds_data[1][2] = 0x60000001; 628 } 629 630 631 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) 632 { 633 struct a6xx_hfi_msg_bw_table msg = { 0 }; 634 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 635 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 636 637 if (adreno_is_a618(adreno_gpu)) 638 a618_build_bw_table(&msg); 639 else if (adreno_is_a619(adreno_gpu)) 640 a619_build_bw_table(&msg); 641 else if (adreno_is_a640_family(adreno_gpu)) 642 a640_build_bw_table(&msg); 643 else if (adreno_is_a650(adreno_gpu)) 644 a650_build_bw_table(&msg); 645 else if (adreno_is_7c3(adreno_gpu)) 646 adreno_7c3_build_bw_table(&msg); 647 else if (adreno_is_a660(adreno_gpu)) 648 a660_build_bw_table(&msg); 649 else if (adreno_is_a690(adreno_gpu)) 650 a690_build_bw_table(&msg); 651 else if (adreno_is_a730(adreno_gpu)) 652 a730_build_bw_table(&msg); 653 else if (adreno_is_a740_family(adreno_gpu)) 654 a740_build_bw_table(&msg); 655 else 656 a6xx_build_bw_table(&msg); 657 658 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), 659 NULL, 0); 660 } 661 662 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu) 663 { 664 struct a6xx_hfi_msg_test msg = { 0 }; 665 666 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg), 667 NULL, 0); 668 } 669 670 static int a6xx_hfi_send_start(struct a6xx_gmu *gmu) 671 { 672 struct a6xx_hfi_msg_start msg = { 0 }; 673 674 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg), 675 NULL, 0); 676 } 677 678 static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu) 679 { 680 struct a6xx_hfi_msg_core_fw_start msg = { 0 }; 681 682 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg, 683 sizeof(msg), NULL, 0); 684 } 685 686 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index) 687 { 688 struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 }; 689 690 msg.ack_type = 1; /* blocking */ 691 msg.freq = index; 692 msg.bw = 0; /* TODO: bus scaling */ 693 694 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg, 695 sizeof(msg), NULL, 0); 696 } 697 698 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu) 699 { 700 struct a6xx_hfi_prep_slumber_cmd msg = { 0 }; 701 702 /* TODO: should freq and bw fields be non-zero ? */ 703 704 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg, 705 sizeof(msg), NULL, 0); 706 } 707 708 static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state) 709 { 710 int ret; 711 712 ret = a6xx_hfi_send_gmu_init(gmu, boot_state); 713 if (ret) 714 return ret; 715 716 ret = a6xx_hfi_get_fw_version(gmu, NULL); 717 if (ret) 718 return ret; 719 720 /* 721 * We have to get exchange version numbers per the sequence but at this 722 * point th kernel driver doesn't need to know the exact version of 723 * the GMU firmware 724 */ 725 726 ret = a6xx_hfi_send_perf_table_v1(gmu); 727 if (ret) 728 return ret; 729 730 ret = a6xx_hfi_send_bw_table(gmu); 731 if (ret) 732 return ret; 733 734 /* 735 * Let the GMU know that there won't be any more HFI messages until next 736 * boot 737 */ 738 a6xx_hfi_send_test(gmu); 739 740 return 0; 741 } 742 743 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) 744 { 745 int ret; 746 747 if (gmu->legacy) 748 return a6xx_hfi_start_v1(gmu, boot_state); 749 750 751 ret = a6xx_hfi_send_perf_table(gmu); 752 if (ret) 753 return ret; 754 755 ret = a6xx_hfi_send_bw_table(gmu); 756 if (ret) 757 return ret; 758 759 ret = a6xx_hfi_send_core_fw_start(gmu); 760 if (ret) 761 return ret; 762 763 /* 764 * Downstream driver sends this in its "a6xx_hw_init" equivalent, 765 * but seems to be no harm in sending it here 766 */ 767 ret = a6xx_hfi_send_start(gmu); 768 if (ret) 769 return ret; 770 771 return 0; 772 } 773 774 void a6xx_hfi_stop(struct a6xx_gmu *gmu) 775 { 776 int i; 777 778 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { 779 struct a6xx_hfi_queue *queue = &gmu->queues[i]; 780 781 if (!queue->header) 782 continue; 783 784 if (queue->header->read_index != queue->header->write_index) 785 DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i); 786 787 queue->header->read_index = 0; 788 queue->header->write_index = 0; 789 790 memset(&queue->history, 0xff, sizeof(queue->history)); 791 queue->history_idx = 0; 792 } 793 } 794 795 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue, 796 struct a6xx_hfi_queue_header *header, void *virt, u64 iova, 797 u32 id) 798 { 799 spin_lock_init(&queue->lock); 800 queue->header = header; 801 queue->data = virt; 802 atomic_set(&queue->seqnum, 0); 803 804 memset(&queue->history, 0xff, sizeof(queue->history)); 805 queue->history_idx = 0; 806 807 /* Set up the shared memory header */ 808 header->iova = iova; 809 header->type = 10 << 8 | id; 810 header->status = 1; 811 header->size = SZ_4K >> 2; 812 header->msg_size = 0; 813 header->dropped = 0; 814 header->rx_watermark = 1; 815 header->tx_watermark = 1; 816 header->rx_request = 1; 817 header->tx_request = 0; 818 header->read_index = 0; 819 header->write_index = 0; 820 } 821 822 void a6xx_hfi_init(struct a6xx_gmu *gmu) 823 { 824 struct a6xx_gmu_bo *hfi = &gmu->hfi; 825 struct a6xx_hfi_queue_table_header *table = hfi->virt; 826 struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table); 827 u64 offset; 828 int table_size; 829 830 /* 831 * The table size is the size of the table header plus all of the queue 832 * headers 833 */ 834 table_size = sizeof(*table); 835 table_size += (ARRAY_SIZE(gmu->queues) * 836 sizeof(struct a6xx_hfi_queue_header)); 837 838 table->version = 0; 839 table->size = table_size; 840 /* First queue header is located immediately after the table header */ 841 table->qhdr0_offset = sizeof(*table) >> 2; 842 table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2; 843 table->num_queues = ARRAY_SIZE(gmu->queues); 844 table->active_queues = ARRAY_SIZE(gmu->queues); 845 846 /* Command queue */ 847 offset = SZ_4K; 848 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, 849 hfi->iova + offset, 0); 850 851 /* GMU response queue */ 852 offset += SZ_4K; 853 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, 854 hfi->iova + offset, gmu->legacy ? 4 : 1); 855 } 856